xref: /linux/arch/x86/kvm/cpuid.h (revision f125e2d4339dda6937865f975470b29c84714c9b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_CPUID_H
3 #define ARCH_X86_KVM_CPUID_H
4 
5 #include "x86.h"
6 #include <asm/cpu.h>
7 #include <asm/processor.h>
8 
9 int kvm_update_cpuid(struct kvm_vcpu *vcpu);
10 bool kvm_mpx_supported(void);
11 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
12 					      u32 function, u32 index);
13 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
14 			    struct kvm_cpuid_entry2 __user *entries,
15 			    unsigned int type);
16 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
17 			     struct kvm_cpuid *cpuid,
18 			     struct kvm_cpuid_entry __user *entries);
19 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20 			      struct kvm_cpuid2 *cpuid,
21 			      struct kvm_cpuid_entry2 __user *entries);
22 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
23 			      struct kvm_cpuid2 *cpuid,
24 			      struct kvm_cpuid_entry2 __user *entries);
25 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
26 	       u32 *ecx, u32 *edx, bool check_limit);
27 
28 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
29 
30 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
31 {
32 	return vcpu->arch.maxphyaddr;
33 }
34 
35 struct cpuid_reg {
36 	u32 function;
37 	u32 index;
38 	int reg;
39 };
40 
41 static const struct cpuid_reg reverse_cpuid[] = {
42 	[CPUID_1_EDX]         = {         1, 0, CPUID_EDX},
43 	[CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
44 	[CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
45 	[CPUID_1_ECX]         = {         1, 0, CPUID_ECX},
46 	[CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
47 	[CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
48 	[CPUID_7_0_EBX]       = {         7, 0, CPUID_EBX},
49 	[CPUID_D_1_EAX]       = {       0xd, 1, CPUID_EAX},
50 	[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
51 	[CPUID_6_EAX]         = {         6, 0, CPUID_EAX},
52 	[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
53 	[CPUID_7_ECX]         = {         7, 0, CPUID_ECX},
54 	[CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
55 	[CPUID_7_EDX]         = {         7, 0, CPUID_EDX},
56 	[CPUID_7_1_EAX]       = {         7, 1, CPUID_EAX},
57 };
58 
59 /*
60  * Reverse CPUID and its derivatives can only be used for hardware-defined
61  * feature words, i.e. words whose bits directly correspond to a CPUID leaf.
62  * Retrieving a feature bit or masking guest CPUID from a Linux-defined word
63  * is nonsensical as the bit number/mask is an arbitrary software-defined value
64  * and can't be used by KVM to query/control guest capabilities.  And obviously
65  * the leaf being queried must have an entry in the lookup table.
66  */
67 static __always_inline void reverse_cpuid_check(unsigned x86_leaf)
68 {
69 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
70 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
71 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
72 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
73 	BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
74 	BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
75 }
76 
77 /*
78  * Retrieve the bit mask from an X86_FEATURE_* definition.  Features contain
79  * the hardware defined bit number (stored in bits 4:0) and a software defined
80  * "word" (stored in bits 31:5).  The word is used to index into arrays of
81  * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
82  */
83 static __always_inline u32 __feature_bit(int x86_feature)
84 {
85 	reverse_cpuid_check(x86_feature / 32);
86 	return 1 << (x86_feature & 31);
87 }
88 
89 #define feature_bit(name)  __feature_bit(X86_FEATURE_##name)
90 
91 static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
92 {
93 	unsigned x86_leaf = x86_feature / 32;
94 
95 	reverse_cpuid_check(x86_leaf);
96 	return reverse_cpuid[x86_leaf];
97 }
98 
99 static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature)
100 {
101 	struct kvm_cpuid_entry2 *entry;
102 	const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
103 
104 	entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
105 	if (!entry)
106 		return NULL;
107 
108 	switch (cpuid.reg) {
109 	case CPUID_EAX:
110 		return &entry->eax;
111 	case CPUID_EBX:
112 		return &entry->ebx;
113 	case CPUID_ECX:
114 		return &entry->ecx;
115 	case CPUID_EDX:
116 		return &entry->edx;
117 	default:
118 		BUILD_BUG();
119 		return NULL;
120 	}
121 }
122 
123 static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature)
124 {
125 	int *reg;
126 
127 	reg = guest_cpuid_get_register(vcpu, x86_feature);
128 	if (!reg)
129 		return false;
130 
131 	return *reg & __feature_bit(x86_feature);
132 }
133 
134 static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x86_feature)
135 {
136 	int *reg;
137 
138 	reg = guest_cpuid_get_register(vcpu, x86_feature);
139 	if (reg)
140 		*reg &= ~__feature_bit(x86_feature);
141 }
142 
143 static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
144 {
145 	struct kvm_cpuid_entry2 *best;
146 
147 	best = kvm_find_cpuid_entry(vcpu, 0, 0);
148 	return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
149 }
150 
151 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
152 {
153 	struct kvm_cpuid_entry2 *best;
154 
155 	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
156 	if (!best)
157 		return -1;
158 
159 	return x86_family(best->eax);
160 }
161 
162 static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
163 {
164 	struct kvm_cpuid_entry2 *best;
165 
166 	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
167 	if (!best)
168 		return -1;
169 
170 	return x86_model(best->eax);
171 }
172 
173 static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
174 {
175 	struct kvm_cpuid_entry2 *best;
176 
177 	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
178 	if (!best)
179 		return -1;
180 
181 	return x86_stepping(best->eax);
182 }
183 
184 static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
185 {
186 	return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
187 }
188 
189 static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
190 {
191 	return vcpu->arch.msr_misc_features_enables &
192 		  MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
193 }
194 
195 #endif
196