1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 * cpuid support routines
5 *
6 * derived from arch/x86/kvm/x86.c
7 *
8 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9 * Copyright IBM Corporation, 2008
10 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/kvm_host.h>
14 #include "linux/lockdep.h"
15 #include <linux/export.h>
16 #include <linux/vmalloc.h>
17 #include <linux/uaccess.h>
18 #include <linux/sched/stat.h>
19
20 #include <asm/processor.h>
21 #include <asm/user.h>
22 #include <asm/fpu/xstate.h>
23 #include <asm/sgx.h>
24 #include <asm/cpuid/api.h>
25 #include "cpuid.h"
26 #include "lapic.h"
27 #include "mmu.h"
28 #include "trace.h"
29 #include "pmu.h"
30 #include "xen.h"
31
32 /*
33 * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
34 * aligned to sizeof(unsigned long) because it's not accessed via bitops.
35 */
36 u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
37 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpu_caps);
38
39 bool kvm_is_configuring_cpu_caps __read_mostly;
40 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_is_configuring_cpu_caps);
41
42 struct cpuid_xstate_sizes {
43 u32 eax;
44 u32 ebx;
45 u32 ecx;
46 };
47
48 static struct cpuid_xstate_sizes xstate_sizes[XFEATURE_MAX] __ro_after_init;
49
kvm_init_xstate_sizes(void)50 void __init kvm_init_xstate_sizes(void)
51 {
52 u32 ign;
53 int i;
54
55 for (i = XFEATURE_YMM; i < ARRAY_SIZE(xstate_sizes); i++) {
56 struct cpuid_xstate_sizes *xs = &xstate_sizes[i];
57
58 cpuid_count(0xD, i, &xs->eax, &xs->ebx, &xs->ecx, &ign);
59 }
60 }
61
xstate_required_size(u64 xstate_bv,bool compacted)62 u32 xstate_required_size(u64 xstate_bv, bool compacted)
63 {
64 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
65 int i;
66
67 xstate_bv &= XFEATURE_MASK_EXTEND;
68 for (i = XFEATURE_YMM; i < ARRAY_SIZE(xstate_sizes) && xstate_bv; i++) {
69 struct cpuid_xstate_sizes *xs = &xstate_sizes[i];
70 u32 offset;
71
72 if (!(xstate_bv & BIT_ULL(i)))
73 continue;
74
75 /* ECX[1]: 64B alignment in compacted form */
76 if (compacted)
77 offset = (xs->ecx & 0x2) ? ALIGN(ret, 64) : ret;
78 else
79 offset = xs->ebx;
80 ret = max(ret, offset + xs->eax);
81 xstate_bv &= ~BIT_ULL(i);
82 }
83
84 return ret;
85 }
86
kvm_find_cpuid_entry2(struct kvm_cpuid_entry2 * entries,int nent,u32 function,u64 index)87 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry2(
88 struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index)
89 {
90 struct kvm_cpuid_entry2 *e;
91 int i;
92
93 /*
94 * KVM has a semi-arbitrary rule that querying the guest's CPUID model
95 * with IRQs disabled is disallowed. The CPUID model can legitimately
96 * have over one hundred entries, i.e. the lookup is slow, and IRQs are
97 * typically disabled in KVM only when KVM is in a performance critical
98 * path, e.g. the core VM-Enter/VM-Exit run loop. Nothing will break
99 * if this rule is violated, this assertion is purely to flag potential
100 * performance issues. If this fires, consider moving the lookup out
101 * of the hotpath, e.g. by caching information during CPUID updates.
102 */
103 lockdep_assert_irqs_enabled();
104
105 for (i = 0; i < nent; i++) {
106 e = &entries[i];
107
108 if (e->function != function)
109 continue;
110
111 /*
112 * If the index isn't significant, use the first entry with a
113 * matching function. It's userspace's responsibility to not
114 * provide "duplicate" entries in all cases.
115 */
116 if (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index)
117 return e;
118
119
120 /*
121 * Similarly, use the first matching entry if KVM is doing a
122 * lookup (as opposed to emulating CPUID) for a function that's
123 * architecturally defined as not having a significant index.
124 */
125 if (index == KVM_CPUID_INDEX_NOT_SIGNIFICANT) {
126 /*
127 * Direct lookups from KVM should not diverge from what
128 * KVM defines internally (the architectural behavior).
129 */
130 WARN_ON_ONCE(cpuid_function_is_indexed(function));
131 return e;
132 }
133 }
134
135 return NULL;
136 }
137 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_find_cpuid_entry2);
138
kvm_check_cpuid(struct kvm_vcpu * vcpu)139 static int kvm_check_cpuid(struct kvm_vcpu *vcpu)
140 {
141 struct kvm_cpuid_entry2 *best;
142 u64 xfeatures;
143
144 /*
145 * The existing code assumes virtual address is 48-bit or 57-bit in the
146 * canonical address checks; exit if it is ever changed.
147 */
148 best = kvm_find_cpuid_entry(vcpu, 0x80000008);
149 if (best) {
150 int vaddr_bits = (best->eax & 0xff00) >> 8;
151
152 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
153 return -EINVAL;
154 }
155
156 /*
157 * Exposing dynamic xfeatures to the guest requires additional
158 * enabling in the FPU, e.g. to expand the guest XSAVE state size.
159 */
160 best = kvm_find_cpuid_entry_index(vcpu, 0xd, 0);
161 if (!best)
162 return 0;
163
164 xfeatures = best->eax | ((u64)best->edx << 32);
165 xfeatures &= XFEATURE_MASK_USER_DYNAMIC;
166 if (!xfeatures)
167 return 0;
168
169 return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures);
170 }
171
172 static u32 kvm_apply_cpuid_pv_features_quirk(struct kvm_vcpu *vcpu);
173 static void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
174
175 /* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */
kvm_cpuid_check_equal(struct kvm_vcpu * vcpu,struct kvm_cpuid_entry2 * e2,int nent)176 static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
177 int nent)
178 {
179 struct kvm_cpuid_entry2 *orig;
180 int i;
181
182 /*
183 * Apply runtime CPUID updates to the incoming CPUID entries to avoid
184 * false positives due mismatches on KVM-owned feature flags.
185 *
186 * Note! @e2 and @nent track the _old_ CPUID entries!
187 */
188 kvm_update_cpuid_runtime(vcpu);
189 kvm_apply_cpuid_pv_features_quirk(vcpu);
190
191 if (nent != vcpu->arch.cpuid_nent)
192 return -EINVAL;
193
194 for (i = 0; i < nent; i++) {
195 orig = &vcpu->arch.cpuid_entries[i];
196 if (e2[i].function != orig->function ||
197 e2[i].index != orig->index ||
198 e2[i].flags != orig->flags ||
199 e2[i].eax != orig->eax || e2[i].ebx != orig->ebx ||
200 e2[i].ecx != orig->ecx || e2[i].edx != orig->edx)
201 return -EINVAL;
202 }
203
204 return 0;
205 }
206
kvm_get_hypervisor_cpuid(struct kvm_vcpu * vcpu,const char * sig)207 static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu,
208 const char *sig)
209 {
210 struct kvm_hypervisor_cpuid cpuid = {};
211 struct kvm_cpuid_entry2 *entry;
212 u32 base;
213
214 for_each_possible_cpuid_base_hypervisor(base) {
215 entry = kvm_find_cpuid_entry(vcpu, base);
216
217 if (entry) {
218 u32 signature[3];
219
220 signature[0] = entry->ebx;
221 signature[1] = entry->ecx;
222 signature[2] = entry->edx;
223
224 if (!memcmp(signature, sig, sizeof(signature))) {
225 cpuid.base = base;
226 cpuid.limit = entry->eax;
227 break;
228 }
229 }
230 }
231
232 return cpuid;
233 }
234
kvm_apply_cpuid_pv_features_quirk(struct kvm_vcpu * vcpu)235 static u32 kvm_apply_cpuid_pv_features_quirk(struct kvm_vcpu *vcpu)
236 {
237 struct kvm_hypervisor_cpuid kvm_cpuid;
238 struct kvm_cpuid_entry2 *best;
239
240 kvm_cpuid = kvm_get_hypervisor_cpuid(vcpu, KVM_SIGNATURE);
241 if (!kvm_cpuid.base)
242 return 0;
243
244 best = kvm_find_cpuid_entry(vcpu, kvm_cpuid.base | KVM_CPUID_FEATURES);
245 if (!best)
246 return 0;
247
248 if (kvm_hlt_in_guest(vcpu->kvm))
249 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
250
251 return best->eax;
252 }
253
254 /*
255 * Calculate guest's supported XCR0 taking into account guest CPUID data and
256 * KVM's supported XCR0 (comprised of host's XCR0 and KVM_SUPPORTED_XCR0).
257 */
cpuid_get_supported_xcr0(struct kvm_vcpu * vcpu)258 static u64 cpuid_get_supported_xcr0(struct kvm_vcpu *vcpu)
259 {
260 struct kvm_cpuid_entry2 *best;
261
262 best = kvm_find_cpuid_entry_index(vcpu, 0xd, 0);
263 if (!best)
264 return 0;
265
266 return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0;
267 }
268
cpuid_get_supported_xss(struct kvm_vcpu * vcpu)269 static u64 cpuid_get_supported_xss(struct kvm_vcpu *vcpu)
270 {
271 struct kvm_cpuid_entry2 *best;
272
273 best = kvm_find_cpuid_entry_index(vcpu, 0xd, 1);
274 if (!best)
275 return 0;
276
277 return (best->ecx | ((u64)best->edx << 32)) & kvm_caps.supported_xss;
278 }
279
kvm_update_feature_runtime(struct kvm_vcpu * vcpu,struct kvm_cpuid_entry2 * entry,unsigned int x86_feature,bool has_feature)280 static __always_inline void kvm_update_feature_runtime(struct kvm_vcpu *vcpu,
281 struct kvm_cpuid_entry2 *entry,
282 unsigned int x86_feature,
283 bool has_feature)
284 {
285 cpuid_entry_change(entry, x86_feature, has_feature);
286 guest_cpu_cap_change(vcpu, x86_feature, has_feature);
287 }
288
kvm_update_cpuid_runtime(struct kvm_vcpu * vcpu)289 static void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
290 {
291 struct kvm_cpuid_entry2 *best;
292
293 vcpu->arch.cpuid_dynamic_bits_dirty = false;
294
295 best = kvm_find_cpuid_entry(vcpu, 1);
296 if (best) {
297 kvm_update_feature_runtime(vcpu, best, X86_FEATURE_OSXSAVE,
298 kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE));
299
300 kvm_update_feature_runtime(vcpu, best, X86_FEATURE_APIC,
301 vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
302
303 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT))
304 kvm_update_feature_runtime(vcpu, best, X86_FEATURE_MWAIT,
305 vcpu->arch.ia32_misc_enable_msr &
306 MSR_IA32_MISC_ENABLE_MWAIT);
307 }
308
309 best = kvm_find_cpuid_entry_index(vcpu, 7, 0);
310 if (best)
311 kvm_update_feature_runtime(vcpu, best, X86_FEATURE_OSPKE,
312 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE));
313
314
315 best = kvm_find_cpuid_entry_index(vcpu, 0xD, 0);
316 if (best)
317 best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
318
319 best = kvm_find_cpuid_entry_index(vcpu, 0xD, 1);
320 if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
321 cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
322 best->ebx = xstate_required_size(vcpu->arch.xcr0 |
323 vcpu->arch.ia32_xss, true);
324 }
325
kvm_cpuid_has_hyperv(struct kvm_vcpu * vcpu)326 static bool kvm_cpuid_has_hyperv(struct kvm_vcpu *vcpu)
327 {
328 #ifdef CONFIG_KVM_HYPERV
329 struct kvm_cpuid_entry2 *entry;
330
331 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE);
332 return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX;
333 #else
334 return false;
335 #endif
336 }
337
guest_cpuid_is_amd_or_hygon(struct kvm_vcpu * vcpu)338 static bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
339 {
340 struct kvm_cpuid_entry2 *entry;
341
342 entry = kvm_find_cpuid_entry(vcpu, 0);
343 if (!entry)
344 return false;
345
346 return is_guest_vendor_amd(entry->ebx, entry->ecx, entry->edx) ||
347 is_guest_vendor_hygon(entry->ebx, entry->ecx, entry->edx);
348 }
349
350 /*
351 * This isn't truly "unsafe", but except for the cpu_caps initialization code,
352 * all register lookups should use __cpuid_entry_get_reg(), which provides
353 * compile-time validation of the input.
354 */
cpuid_get_reg_unsafe(struct kvm_cpuid_entry2 * entry,u32 reg)355 static u32 cpuid_get_reg_unsafe(struct kvm_cpuid_entry2 *entry, u32 reg)
356 {
357 switch (reg) {
358 case CPUID_EAX:
359 return entry->eax;
360 case CPUID_EBX:
361 return entry->ebx;
362 case CPUID_ECX:
363 return entry->ecx;
364 case CPUID_EDX:
365 return entry->edx;
366 default:
367 WARN_ON_ONCE(1);
368 return 0;
369 }
370 }
371
372 static int cpuid_func_emulated(struct kvm_cpuid_entry2 *entry, u32 func,
373 bool include_partially_emulated);
374
kvm_vcpu_after_set_cpuid(struct kvm_vcpu * vcpu)375 void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
376 {
377 struct kvm_lapic *apic = vcpu->arch.apic;
378 struct kvm_cpuid_entry2 *best;
379 struct kvm_cpuid_entry2 *entry;
380 bool allow_gbpages;
381 int i;
382
383 memset(vcpu->arch.cpu_caps, 0, sizeof(vcpu->arch.cpu_caps));
384 BUILD_BUG_ON(ARRAY_SIZE(reverse_cpuid) != NR_KVM_CPU_CAPS);
385
386 /*
387 * Reset guest capabilities to userspace's guest CPUID definition, i.e.
388 * honor userspace's definition for features that don't require KVM or
389 * hardware management/support (or that KVM simply doesn't care about).
390 */
391 for (i = 0; i < NR_KVM_CPU_CAPS; i++) {
392 const struct cpuid_reg cpuid = reverse_cpuid[i];
393 struct kvm_cpuid_entry2 emulated;
394
395 if (!cpuid.function)
396 continue;
397
398 entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
399 if (!entry)
400 continue;
401
402 cpuid_func_emulated(&emulated, cpuid.function, true);
403
404 /*
405 * A vCPU has a feature if it's supported by KVM and is enabled
406 * in guest CPUID. Note, this includes features that are
407 * supported by KVM but aren't advertised to userspace!
408 */
409 vcpu->arch.cpu_caps[i] = kvm_cpu_caps[i] |
410 cpuid_get_reg_unsafe(&emulated, cpuid.reg);
411 vcpu->arch.cpu_caps[i] &= cpuid_get_reg_unsafe(entry, cpuid.reg);
412 }
413
414 kvm_update_cpuid_runtime(vcpu);
415
416 /*
417 * If TDP is enabled, let the guest use GBPAGES if they're supported in
418 * hardware. The hardware page walker doesn't let KVM disable GBPAGES,
419 * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
420 * walk for performance and complexity reasons. Not to mention KVM
421 * _can't_ solve the problem because GVA->GPA walks aren't visible to
422 * KVM once a TDP translation is installed. Mimic hardware behavior so
423 * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
424 * If TDP is disabled, honor *only* guest CPUID as KVM has full control
425 * and can install smaller shadow pages if the host lacks 1GiB support.
426 */
427 allow_gbpages = tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
428 guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES);
429 guest_cpu_cap_change(vcpu, X86_FEATURE_GBPAGES, allow_gbpages);
430
431 best = kvm_find_cpuid_entry(vcpu, 1);
432 if (best && apic) {
433 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
434 apic->lapic_timer.timer_mode_mask = 3 << 17;
435 else
436 apic->lapic_timer.timer_mode_mask = 1 << 17;
437
438 kvm_apic_set_version(vcpu);
439 }
440
441 vcpu->arch.guest_supported_xcr0 = cpuid_get_supported_xcr0(vcpu);
442 vcpu->arch.guest_supported_xss = cpuid_get_supported_xss(vcpu);
443
444 vcpu->arch.pv_cpuid.features = kvm_apply_cpuid_pv_features_quirk(vcpu);
445
446 vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
447 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
448 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
449
450 kvm_pmu_refresh(vcpu);
451
452 #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f)
453 vcpu->arch.cr4_guest_rsvd_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_) |
454 __cr4_reserved_bits(guest_cpu_cap_has, vcpu);
455 #undef __kvm_cpu_cap_has
456
457 kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu));
458
459 /* Invoke the vendor callback only after the above state is updated. */
460 kvm_x86_call(vcpu_after_set_cpuid)(vcpu);
461
462 /*
463 * Except for the MMU, which needs to do its thing any vendor specific
464 * adjustments to the reserved GPA bits.
465 */
466 kvm_mmu_after_set_cpuid(vcpu);
467
468 kvm_make_request(KVM_REQ_RECALC_INTERCEPTS, vcpu);
469 }
470
cpuid_query_maxphyaddr(struct kvm_vcpu * vcpu)471 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
472 {
473 struct kvm_cpuid_entry2 *best;
474
475 best = kvm_find_cpuid_entry(vcpu, 0x80000000);
476 if (!best || best->eax < 0x80000008)
477 goto not_found;
478 best = kvm_find_cpuid_entry(vcpu, 0x80000008);
479 if (best)
480 return best->eax & 0xff;
481 not_found:
482 return 36;
483 }
484
cpuid_query_maxguestphyaddr(struct kvm_vcpu * vcpu)485 int cpuid_query_maxguestphyaddr(struct kvm_vcpu *vcpu)
486 {
487 struct kvm_cpuid_entry2 *best;
488
489 best = kvm_find_cpuid_entry(vcpu, 0x80000000);
490 if (!best || best->eax < 0x80000008)
491 goto not_found;
492 best = kvm_find_cpuid_entry(vcpu, 0x80000008);
493 if (best)
494 return (best->eax >> 16) & 0xff;
495 not_found:
496 return 0;
497 }
498
499 /*
500 * This "raw" version returns the reserved GPA bits without any adjustments for
501 * encryption technologies that usurp bits. The raw mask should be used if and
502 * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs.
503 */
kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu * vcpu)504 u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
505 {
506 return rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
507 }
508
kvm_set_cpuid(struct kvm_vcpu * vcpu,struct kvm_cpuid_entry2 * e2,int nent)509 static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
510 int nent)
511 {
512 u32 vcpu_caps[NR_KVM_CPU_CAPS];
513 int r;
514
515 /*
516 * Apply pending runtime CPUID updates to the current CPUID entries to
517 * avoid false positives due to mismatches on KVM-owned feature flags.
518 */
519 if (vcpu->arch.cpuid_dynamic_bits_dirty)
520 kvm_update_cpuid_runtime(vcpu);
521
522 /*
523 * Swap the existing (old) entries with the incoming (new) entries in
524 * order to massage the new entries, e.g. to account for dynamic bits
525 * that KVM controls, without losing the current guest CPUID, which KVM
526 * needs to preserve in order to unwind on failure.
527 *
528 * Similarly, save the vCPU's current cpu_caps so that the capabilities
529 * can be updated alongside the CPUID entries when performing runtime
530 * updates. Full initialization is done if and only if the vCPU hasn't
531 * run, i.e. only if userspace is potentially changing CPUID features.
532 */
533 swap(vcpu->arch.cpuid_entries, e2);
534 swap(vcpu->arch.cpuid_nent, nent);
535
536 memcpy(vcpu_caps, vcpu->arch.cpu_caps, sizeof(vcpu_caps));
537 BUILD_BUG_ON(sizeof(vcpu_caps) != sizeof(vcpu->arch.cpu_caps));
538
539 /*
540 * KVM does not correctly handle changing guest CPUID after KVM_RUN or
541 * while L2 is active, as MAXPHYADDR, GBPAGES support, AMD reserved bit
542 * behavior, etc. aren't tracked in kvm_mmu_page_role, and L2 state
543 * can't be adjusted (without breaking L2 in some way). As a result,
544 * KVM may reuse SPs/SPTEs and/or run L2 with bad/misconfigured state.
545 *
546 * In practice, no sane VMM mucks with the core vCPU model on the fly.
547 * It would've been better to forbid any KVM_SET_CPUID{,2} calls after
548 * KVM_RUN or KVM_SET_NESTED_STATE altogether, but unfortunately some
549 * VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do
550 * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
551 * whether the supplied CPUID data is equal to what's already set.
552 */
553 if (!kvm_can_set_cpuid_and_feature_msrs(vcpu)) {
554 r = kvm_cpuid_check_equal(vcpu, e2, nent);
555 if (r)
556 goto err;
557 goto success;
558 }
559
560 #ifdef CONFIG_KVM_HYPERV
561 if (kvm_cpuid_has_hyperv(vcpu)) {
562 r = kvm_hv_vcpu_init(vcpu);
563 if (r)
564 goto err;
565 }
566 #endif
567
568 r = kvm_check_cpuid(vcpu);
569 if (r)
570 goto err;
571
572 #ifdef CONFIG_KVM_XEN
573 vcpu->arch.xen.cpuid = kvm_get_hypervisor_cpuid(vcpu, XEN_SIGNATURE);
574 #endif
575 kvm_vcpu_after_set_cpuid(vcpu);
576
577 success:
578 kvfree(e2);
579 return 0;
580
581 err:
582 memcpy(vcpu->arch.cpu_caps, vcpu_caps, sizeof(vcpu_caps));
583 swap(vcpu->arch.cpuid_entries, e2);
584 swap(vcpu->arch.cpuid_nent, nent);
585 return r;
586 }
587
588 /* when an old userspace process fills a new kernel module */
kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu * vcpu,struct kvm_cpuid * cpuid,struct kvm_cpuid_entry __user * entries)589 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
590 struct kvm_cpuid *cpuid,
591 struct kvm_cpuid_entry __user *entries)
592 {
593 int r, i;
594 struct kvm_cpuid_entry *e = NULL;
595 struct kvm_cpuid_entry2 *e2 = NULL;
596
597 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
598 return -E2BIG;
599
600 if (cpuid->nent) {
601 e = vmemdup_array_user(entries, cpuid->nent, sizeof(*e));
602 if (IS_ERR(e))
603 return PTR_ERR(e);
604
605 e2 = kvmalloc_objs(*e2, cpuid->nent, GFP_KERNEL_ACCOUNT);
606 if (!e2) {
607 r = -ENOMEM;
608 goto out_free_cpuid;
609 }
610 }
611 for (i = 0; i < cpuid->nent; i++) {
612 e2[i].function = e[i].function;
613 e2[i].eax = e[i].eax;
614 e2[i].ebx = e[i].ebx;
615 e2[i].ecx = e[i].ecx;
616 e2[i].edx = e[i].edx;
617 e2[i].index = 0;
618 e2[i].flags = 0;
619 e2[i].padding[0] = 0;
620 e2[i].padding[1] = 0;
621 e2[i].padding[2] = 0;
622 }
623
624 r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
625 if (r)
626 kvfree(e2);
627
628 out_free_cpuid:
629 kvfree(e);
630
631 return r;
632 }
633
kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu * vcpu,struct kvm_cpuid2 * cpuid,struct kvm_cpuid_entry2 __user * entries)634 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
635 struct kvm_cpuid2 *cpuid,
636 struct kvm_cpuid_entry2 __user *entries)
637 {
638 struct kvm_cpuid_entry2 *e2 = NULL;
639 int r;
640
641 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
642 return -E2BIG;
643
644 if (cpuid->nent) {
645 e2 = vmemdup_array_user(entries, cpuid->nent, sizeof(*e2));
646 if (IS_ERR(e2))
647 return PTR_ERR(e2);
648 }
649
650 r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
651 if (r)
652 kvfree(e2);
653
654 return r;
655 }
656
kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu * vcpu,struct kvm_cpuid2 * cpuid,struct kvm_cpuid_entry2 __user * entries)657 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
658 struct kvm_cpuid2 *cpuid,
659 struct kvm_cpuid_entry2 __user *entries)
660 {
661 if (cpuid->nent < vcpu->arch.cpuid_nent)
662 return -E2BIG;
663
664 if (vcpu->arch.cpuid_dynamic_bits_dirty)
665 kvm_update_cpuid_runtime(vcpu);
666
667 if (copy_to_user(entries, vcpu->arch.cpuid_entries,
668 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
669 return -EFAULT;
670
671 cpuid->nent = vcpu->arch.cpuid_nent;
672 return 0;
673 }
674
raw_cpuid_get(struct cpuid_reg cpuid)675 static __always_inline u32 raw_cpuid_get(struct cpuid_reg cpuid)
676 {
677 struct kvm_cpuid_entry2 entry;
678 u32 base;
679
680 /*
681 * KVM only supports features defined by Intel (0x0), AMD (0x80000000),
682 * and Centaur (0xc0000000). WARN if a feature for new vendor base is
683 * defined, as this and other code would need to be updated.
684 */
685 base = cpuid.function & 0xffff0000;
686 if (WARN_ON_ONCE(base && base != 0x80000000 && base != 0xc0000000))
687 return 0;
688
689 if (cpuid_eax(base) < cpuid.function)
690 return 0;
691
692 cpuid_count(cpuid.function, cpuid.index,
693 &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
694
695 return *__cpuid_entry_get_reg(&entry, cpuid.reg);
696 }
697
698 /*
699 * For kernel-defined leafs, mask KVM's supported feature set with the kernel's
700 * capabilities as well as raw CPUID. For KVM-defined leafs, consult only raw
701 * CPUID, as KVM is the one and only authority (in the kernel).
702 */
703 #define kvm_cpu_cap_init(leaf, feature_initializers...) \
704 do { \
705 const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32); \
706 const u32 __maybe_unused kvm_cpu_cap_init_in_progress = leaf; \
707 const u32 *kernel_cpu_caps = boot_cpu_data.x86_capability; \
708 u32 kvm_cpu_cap_passthrough = 0; \
709 u32 kvm_cpu_cap_synthesized = 0; \
710 u32 kvm_cpu_cap_emulated = 0; \
711 u32 kvm_cpu_cap_features = 0; \
712 \
713 feature_initializers \
714 \
715 kvm_cpu_caps[leaf] = kvm_cpu_cap_features; \
716 \
717 if (leaf < NCAPINTS) \
718 kvm_cpu_caps[leaf] &= kernel_cpu_caps[leaf]; \
719 \
720 kvm_cpu_caps[leaf] |= kvm_cpu_cap_passthrough; \
721 kvm_cpu_caps[leaf] &= (raw_cpuid_get(cpuid) | \
722 kvm_cpu_cap_synthesized); \
723 kvm_cpu_caps[leaf] |= kvm_cpu_cap_emulated; \
724 } while (0)
725
726 /*
727 * Assert that the feature bit being declared, e.g. via F(), is in the CPUID
728 * word that's being initialized. Exempt 0x8000_0001.EDX usage of 0x1.EDX
729 * features, as AMD duplicated many 0x1.EDX features into 0x8000_0001.EDX.
730 */
731 #define KVM_VALIDATE_CPU_CAP_USAGE(name) \
732 do { \
733 u32 __leaf = __feature_leaf(X86_FEATURE_##name); \
734 \
735 BUILD_BUG_ON(__leaf != kvm_cpu_cap_init_in_progress); \
736 } while (0)
737
738 #define F(name) \
739 ({ \
740 KVM_VALIDATE_CPU_CAP_USAGE(name); \
741 kvm_cpu_cap_features |= feature_bit(name); \
742 })
743
744 /* Scattered Flag - For features that are scattered by cpufeatures.h. */
745 #define SCATTERED_F(name) \
746 ({ \
747 BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \
748 KVM_VALIDATE_CPU_CAP_USAGE(name); \
749 if (boot_cpu_has(X86_FEATURE_##name)) \
750 F(name); \
751 })
752
753 /* Features that KVM supports only on 64-bit kernels. */
754 #define X86_64_F(name) \
755 ({ \
756 KVM_VALIDATE_CPU_CAP_USAGE(name); \
757 if (IS_ENABLED(CONFIG_X86_64)) \
758 F(name); \
759 })
760
761 /*
762 * Emulated Feature - For features that KVM emulates in software irrespective
763 * of host CPU/kernel support.
764 */
765 #define EMULATED_F(name) \
766 ({ \
767 kvm_cpu_cap_emulated |= feature_bit(name); \
768 F(name); \
769 })
770
771 /*
772 * Synthesized Feature - For features that are synthesized into boot_cpu_data,
773 * i.e. may not be present in the raw CPUID, but can still be advertised to
774 * userspace. Primarily used for mitigation related feature flags.
775 */
776 #define SYNTHESIZED_F(name) \
777 ({ \
778 kvm_cpu_cap_synthesized |= feature_bit(name); \
779 \
780 BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \
781 if (boot_cpu_has(X86_FEATURE_##name)) \
782 F(name); \
783 })
784
785 /*
786 * Passthrough Feature - For features that KVM supports based purely on raw
787 * hardware CPUID, i.e. that KVM virtualizes even if the host kernel doesn't
788 * use the feature. Simply force set the feature in KVM's capabilities, raw
789 * CPUID support will be factored in by kvm_cpu_cap_mask().
790 */
791 #define PASSTHROUGH_F(name) \
792 ({ \
793 kvm_cpu_cap_passthrough |= feature_bit(name); \
794 F(name); \
795 })
796
797 /*
798 * Aliased Features - For features in 0x8000_0001.EDX that are duplicates of
799 * identical 0x1.EDX features, and thus are aliased from 0x1 to 0x8000_0001.
800 */
801 #define ALIASED_1_EDX_F(name) \
802 ({ \
803 BUILD_BUG_ON(__feature_leaf(X86_FEATURE_##name) != CPUID_1_EDX); \
804 BUILD_BUG_ON(kvm_cpu_cap_init_in_progress != CPUID_8000_0001_EDX); \
805 kvm_cpu_cap_features |= feature_bit(name); \
806 })
807
808 /*
809 * Vendor Features - For features that KVM supports, but are added in later
810 * because they require additional vendor enabling.
811 */
812 #define VENDOR_F(name) \
813 ({ \
814 KVM_VALIDATE_CPU_CAP_USAGE(name); \
815 })
816
817 /*
818 * Runtime Features - For features that KVM dynamically sets/clears at runtime,
819 * e.g. when CR4 changes, but which are never advertised to userspace.
820 */
821 #define RUNTIME_F(name) \
822 ({ \
823 KVM_VALIDATE_CPU_CAP_USAGE(name); \
824 })
825
826 /*
827 * Undefine the MSR bit macro to avoid token concatenation issues when
828 * processing X86_FEATURE_SPEC_CTRL_SSBD.
829 */
830 #undef SPEC_CTRL_SSBD
831
832 /* DS is defined by ptrace-abi.h on 32-bit builds. */
833 #undef DS
834
kvm_initialize_cpu_caps(void)835 void kvm_initialize_cpu_caps(void)
836 {
837 memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps));
838
839 WARN_ON_ONCE(kvm_is_configuring_cpu_caps);
840 kvm_is_configuring_cpu_caps = true;
841
842 BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) >
843 sizeof(boot_cpu_data.x86_capability));
844
845 kvm_cpu_cap_init(CPUID_1_ECX,
846 F(XMM3),
847 F(PCLMULQDQ),
848 VENDOR_F(DTES64),
849 /*
850 * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not*
851 * advertised to guests via CPUID! MWAIT is also technically a
852 * runtime flag thanks to IA32_MISC_ENABLES; mark it as such so
853 * that KVM is aware that it's a known, unadvertised flag.
854 */
855 RUNTIME_F(MWAIT),
856 /* DS-CPL */
857 VENDOR_F(VMX),
858 /* SMX, EST */
859 /* TM2 */
860 F(SSSE3),
861 /* CNXT-ID */
862 /* Reserved */
863 F(FMA),
864 F(CX16),
865 /* xTPR Update */
866 F(PDCM),
867 F(PCID),
868 /* Reserved, DCA */
869 F(XMM4_1),
870 F(XMM4_2),
871 EMULATED_F(X2APIC),
872 F(MOVBE),
873 F(POPCNT),
874 EMULATED_F(TSC_DEADLINE_TIMER),
875 F(AES),
876 F(XSAVE),
877 RUNTIME_F(OSXSAVE),
878 F(AVX),
879 F(F16C),
880 F(RDRAND),
881 EMULATED_F(HYPERVISOR),
882 );
883
884 kvm_cpu_cap_init(CPUID_1_EDX,
885 F(FPU),
886 F(VME),
887 F(DE),
888 F(PSE),
889 F(TSC),
890 F(MSR),
891 F(PAE),
892 F(MCE),
893 F(CX8),
894 F(APIC),
895 /* Reserved */
896 F(SEP),
897 F(MTRR),
898 F(PGE),
899 F(MCA),
900 F(CMOV),
901 F(PAT),
902 F(PSE36),
903 /* PSN */
904 F(CLFLUSH),
905 /* Reserved */
906 VENDOR_F(DS),
907 /* ACPI */
908 F(MMX),
909 F(FXSR),
910 F(XMM),
911 F(XMM2),
912 F(SELFSNOOP),
913 /* HTT, TM, Reserved, PBE */
914 );
915
916 kvm_cpu_cap_init(CPUID_7_0_EBX,
917 F(FSGSBASE),
918 EMULATED_F(TSC_ADJUST),
919 F(SGX),
920 F(BMI1),
921 F(HLE),
922 F(AVX2),
923 F(FDP_EXCPTN_ONLY),
924 F(SMEP),
925 F(BMI2),
926 F(ERMS),
927 F(INVPCID),
928 F(RTM),
929 F(ZERO_FCS_FDS),
930 VENDOR_F(MPX),
931 F(AVX512F),
932 F(AVX512DQ),
933 F(RDSEED),
934 F(ADX),
935 F(SMAP),
936 F(AVX512IFMA),
937 F(CLFLUSHOPT),
938 F(CLWB),
939 VENDOR_F(INTEL_PT),
940 F(AVX512PF),
941 F(AVX512ER),
942 F(AVX512CD),
943 F(SHA_NI),
944 F(AVX512BW),
945 F(AVX512VL),
946 );
947
948 kvm_cpu_cap_init(CPUID_7_ECX,
949 F(AVX512VBMI),
950 PASSTHROUGH_F(LA57),
951 F(PKU),
952 RUNTIME_F(OSPKE),
953 F(RDPID),
954 F(AVX512_VPOPCNTDQ),
955 F(UMIP),
956 F(AVX512_VBMI2),
957 F(GFNI),
958 F(VAES),
959 F(VPCLMULQDQ),
960 F(AVX512_VNNI),
961 F(AVX512_BITALG),
962 F(CLDEMOTE),
963 F(MOVDIRI),
964 F(MOVDIR64B),
965 VENDOR_F(WAITPKG),
966 F(SGX_LC),
967 F(BUS_LOCK_DETECT),
968 X86_64_F(SHSTK),
969 );
970
971 /*
972 * PKU not yet implemented for shadow paging and requires OSPKE
973 * to be set on the host. Clear it if that is not the case
974 */
975 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
976 kvm_cpu_cap_clear(X86_FEATURE_PKU);
977
978 /*
979 * Shadow Stacks aren't implemented in the Shadow MMU. Shadow Stack
980 * accesses require "magic" Writable=0,Dirty=1 protection, which KVM
981 * doesn't know how to emulate or map.
982 */
983 if (!tdp_enabled)
984 kvm_cpu_cap_clear(X86_FEATURE_SHSTK);
985
986 kvm_cpu_cap_init(CPUID_7_EDX,
987 F(AVX512_4VNNIW),
988 F(AVX512_4FMAPS),
989 F(SPEC_CTRL),
990 F(SPEC_CTRL_SSBD),
991 EMULATED_F(ARCH_CAPABILITIES),
992 F(INTEL_STIBP),
993 F(MD_CLEAR),
994 F(AVX512_VP2INTERSECT),
995 F(FSRM),
996 F(SERIALIZE),
997 F(TSXLDTRK),
998 F(AVX512_FP16),
999 F(AMX_TILE),
1000 F(AMX_INT8),
1001 F(AMX_BF16),
1002 F(FLUSH_L1D),
1003 F(IBT),
1004 );
1005
1006 /*
1007 * Disable support for IBT and SHSTK if KVM is configured to emulate
1008 * accesses to reserved GPAs, as KVM's emulator doesn't support IBT or
1009 * SHSTK, nor does KVM handle Shadow Stack #PFs (see above).
1010 */
1011 if (allow_smaller_maxphyaddr) {
1012 kvm_cpu_cap_clear(X86_FEATURE_SHSTK);
1013 kvm_cpu_cap_clear(X86_FEATURE_IBT);
1014 }
1015
1016 if (boot_cpu_has(X86_FEATURE_AMD_IBPB_RET) &&
1017 boot_cpu_has(X86_FEATURE_AMD_IBPB) &&
1018 boot_cpu_has(X86_FEATURE_AMD_IBRS))
1019 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL);
1020 if (boot_cpu_has(X86_FEATURE_STIBP))
1021 kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP);
1022 if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
1023 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
1024
1025 kvm_cpu_cap_init(CPUID_7_1_EAX,
1026 F(SHA512),
1027 F(SM3),
1028 F(SM4),
1029 F(AVX_VNNI),
1030 F(AVX512_BF16),
1031 F(CMPCCXADD),
1032 F(FZRM),
1033 F(FSRS),
1034 F(FSRC),
1035 F(WRMSRNS),
1036 X86_64_F(LKGS),
1037 F(AMX_FP16),
1038 F(AVX_IFMA),
1039 F(LAM),
1040 F(MOVRS),
1041 );
1042
1043 kvm_cpu_cap_init(CPUID_7_1_ECX,
1044 SCATTERED_F(MSR_IMM),
1045 );
1046
1047 kvm_cpu_cap_init(CPUID_7_1_EDX,
1048 F(AVX_VNNI_INT8),
1049 F(AVX_NE_CONVERT),
1050 F(AMX_COMPLEX),
1051 F(AVX_VNNI_INT16),
1052 F(PREFETCHITI),
1053 F(AVX10),
1054 );
1055
1056 kvm_cpu_cap_init(CPUID_7_2_EDX,
1057 F(INTEL_PSFD),
1058 F(IPRED_CTRL),
1059 F(RRSBA_CTRL),
1060 F(DDPD_U),
1061 F(BHI_CTRL),
1062 F(MCDT_NO),
1063 );
1064
1065 kvm_cpu_cap_init(CPUID_D_1_EAX,
1066 F(XSAVEOPT),
1067 F(XSAVEC),
1068 F(XGETBV1),
1069 F(XSAVES),
1070 X86_64_F(XFD),
1071 );
1072
1073 kvm_cpu_cap_init(CPUID_12_EAX,
1074 SCATTERED_F(SGX1),
1075 SCATTERED_F(SGX2),
1076 SCATTERED_F(SGX_EDECCSSA),
1077 );
1078
1079 kvm_cpu_cap_init(CPUID_1E_1_EAX,
1080 F(AMX_INT8_ALIAS),
1081 F(AMX_BF16_ALIAS),
1082 F(AMX_COMPLEX_ALIAS),
1083 F(AMX_FP16_ALIAS),
1084 F(AMX_FP8),
1085 F(AMX_TF32),
1086 F(AMX_AVX512),
1087 F(AMX_MOVRS),
1088 );
1089
1090 kvm_cpu_cap_init(CPUID_24_0_EBX,
1091 F(AVX10_128),
1092 F(AVX10_256),
1093 F(AVX10_512),
1094 );
1095
1096 kvm_cpu_cap_init(CPUID_24_1_ECX,
1097 F(AVX10_VNNI_INT),
1098 );
1099
1100 kvm_cpu_cap_init(CPUID_8000_0001_ECX,
1101 F(LAHF_LM),
1102 F(CMP_LEGACY),
1103 VENDOR_F(SVM),
1104 /* ExtApicSpace */
1105 F(CR8_LEGACY),
1106 F(ABM),
1107 F(SSE4A),
1108 F(MISALIGNSSE),
1109 F(3DNOWPREFETCH),
1110 F(OSVW),
1111 /* IBS */
1112 F(XOP),
1113 /* SKINIT, WDT, LWP */
1114 F(FMA4),
1115 F(TBM),
1116 F(TOPOEXT),
1117 VENDOR_F(PERFCTR_CORE),
1118 );
1119
1120 kvm_cpu_cap_init(CPUID_8000_0001_EDX,
1121 ALIASED_1_EDX_F(FPU),
1122 ALIASED_1_EDX_F(VME),
1123 ALIASED_1_EDX_F(DE),
1124 ALIASED_1_EDX_F(PSE),
1125 ALIASED_1_EDX_F(TSC),
1126 ALIASED_1_EDX_F(MSR),
1127 ALIASED_1_EDX_F(PAE),
1128 ALIASED_1_EDX_F(MCE),
1129 ALIASED_1_EDX_F(CX8),
1130 ALIASED_1_EDX_F(APIC),
1131 /* Reserved */
1132 F(SYSCALL),
1133 ALIASED_1_EDX_F(MTRR),
1134 ALIASED_1_EDX_F(PGE),
1135 ALIASED_1_EDX_F(MCA),
1136 ALIASED_1_EDX_F(CMOV),
1137 ALIASED_1_EDX_F(PAT),
1138 ALIASED_1_EDX_F(PSE36),
1139 /* Reserved */
1140 F(NX),
1141 /* Reserved */
1142 F(MMXEXT),
1143 ALIASED_1_EDX_F(MMX),
1144 ALIASED_1_EDX_F(FXSR),
1145 F(FXSR_OPT),
1146 X86_64_F(GBPAGES),
1147 F(RDTSCP),
1148 /* Reserved */
1149 X86_64_F(LM),
1150 F(3DNOWEXT),
1151 F(3DNOW),
1152 );
1153
1154 if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64))
1155 kvm_cpu_cap_set(X86_FEATURE_GBPAGES);
1156
1157 kvm_cpu_cap_init(CPUID_8000_0007_EDX,
1158 SCATTERED_F(CONSTANT_TSC),
1159 );
1160
1161 kvm_cpu_cap_init(CPUID_8000_0008_EBX,
1162 F(CLZERO),
1163 F(XSAVEERPTR),
1164 F(WBNOINVD),
1165 F(AMD_IBPB),
1166 F(AMD_IBRS),
1167 F(AMD_SSBD),
1168 F(VIRT_SSBD),
1169 F(AMD_SSB_NO),
1170 F(AMD_STIBP),
1171 F(AMD_STIBP_ALWAYS_ON),
1172 F(AMD_IBRS_SAME_MODE),
1173 PASSTHROUGH_F(EFER_LMSLE_MBZ),
1174 F(AMD_PSFD),
1175 F(AMD_IBPB_RET),
1176 );
1177
1178 /*
1179 * AMD has separate bits for each SPEC_CTRL bit.
1180 * arch/x86/kernel/cpu/bugs.c is kind enough to
1181 * record that in cpufeatures so use them.
1182 */
1183 if (boot_cpu_has(X86_FEATURE_IBPB)) {
1184 kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB);
1185 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
1186 !boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB))
1187 kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB_RET);
1188 }
1189 if (boot_cpu_has(X86_FEATURE_IBRS))
1190 kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS);
1191 if (boot_cpu_has(X86_FEATURE_STIBP))
1192 kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP);
1193 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
1194 kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD);
1195 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1196 kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO);
1197 /*
1198 * The preference is to use SPEC CTRL MSR instead of the
1199 * VIRT_SPEC MSR.
1200 */
1201 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
1202 !boot_cpu_has(X86_FEATURE_AMD_SSBD))
1203 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
1204
1205 /* All SVM features required additional vendor module enabling. */
1206 kvm_cpu_cap_init(CPUID_8000_000A_EDX,
1207 VENDOR_F(NPT),
1208 VENDOR_F(VMCBCLEAN),
1209 VENDOR_F(FLUSHBYASID),
1210 VENDOR_F(NRIPS),
1211 VENDOR_F(TSCRATEMSR),
1212 VENDOR_F(V_VMSAVE_VMLOAD),
1213 VENDOR_F(LBRV),
1214 VENDOR_F(PAUSEFILTER),
1215 VENDOR_F(PFTHRESHOLD),
1216 VENDOR_F(VGIF),
1217 VENDOR_F(VNMI),
1218 VENDOR_F(SVME_ADDR_CHK),
1219 );
1220
1221 kvm_cpu_cap_init(CPUID_8000_001F_EAX,
1222 VENDOR_F(SME),
1223 VENDOR_F(SEV),
1224 /* VM_PAGE_FLUSH */
1225 VENDOR_F(SEV_ES),
1226 F(SME_COHERENT),
1227 );
1228
1229 kvm_cpu_cap_init(CPUID_8000_0021_EAX,
1230 F(NO_NESTED_DATA_BP),
1231 F(WRMSR_XX_BASE_NS),
1232 /*
1233 * Synthesize "LFENCE is serializing" into the AMD-defined entry
1234 * in KVM's supported CPUID, i.e. if the feature is reported as
1235 * supported by the kernel. LFENCE_RDTSC was a Linux-defined
1236 * synthetic feature long before AMD joined the bandwagon, e.g.
1237 * LFENCE is serializing on most CPUs that support SSE2. On
1238 * CPUs that don't support AMD's leaf, ANDing with the raw host
1239 * CPUID will drop the flags, and reporting support in AMD's
1240 * leaf can make it easier for userspace to detect the feature.
1241 */
1242 SYNTHESIZED_F(LFENCE_RDTSC),
1243 /* SmmPgCfgLock */
1244 /* 4: Resv */
1245 SYNTHESIZED_F(VERW_CLEAR),
1246 F(NULL_SEL_CLR_BASE),
1247 /* UpperAddressIgnore */
1248 F(AUTOIBRS),
1249 F(PREFETCHI),
1250 EMULATED_F(NO_SMM_CTL_MSR),
1251 /* PrefetchCtlMsr */
1252 /* GpOnUserCpuid */
1253 /* EPSF */
1254 F(ERAPS),
1255 SYNTHESIZED_F(SBPB),
1256 SYNTHESIZED_F(IBPB_BRTYPE),
1257 SYNTHESIZED_F(SRSO_NO),
1258 F(SRSO_USER_KERNEL_NO),
1259 );
1260
1261 kvm_cpu_cap_init(CPUID_8000_0021_ECX,
1262 SYNTHESIZED_F(TSA_SQ_NO),
1263 SYNTHESIZED_F(TSA_L1_NO),
1264 );
1265
1266 kvm_cpu_cap_init(CPUID_8000_0022_EAX,
1267 F(PERFMON_V2),
1268 );
1269
1270 if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
1271 kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE);
1272
1273 kvm_cpu_cap_init(CPUID_C000_0001_EDX,
1274 F(XSTORE),
1275 F(XSTORE_EN),
1276 F(XCRYPT),
1277 F(XCRYPT_EN),
1278 F(ACE2),
1279 F(ACE2_EN),
1280 F(PHE),
1281 F(PHE_EN),
1282 F(PMM),
1283 F(PMM_EN),
1284 );
1285
1286 /*
1287 * Hide RDTSCP and RDPID if either feature is reported as supported but
1288 * probing MSR_TSC_AUX failed. This is purely a sanity check and
1289 * should never happen, but the guest will likely crash if RDTSCP or
1290 * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in
1291 * the past. For example, the sanity check may fire if this instance of
1292 * KVM is running as L1 on top of an older, broken KVM.
1293 */
1294 if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) ||
1295 kvm_cpu_cap_has(X86_FEATURE_RDPID)) &&
1296 !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) {
1297 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
1298 kvm_cpu_cap_clear(X86_FEATURE_RDPID);
1299 }
1300 }
1301 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_initialize_cpu_caps);
1302
1303 #undef F
1304 #undef SCATTERED_F
1305 #undef X86_64_F
1306 #undef EMULATED_F
1307 #undef SYNTHESIZED_F
1308 #undef PASSTHROUGH_F
1309 #undef ALIASED_1_EDX_F
1310 #undef VENDOR_F
1311 #undef RUNTIME_F
1312
1313 struct kvm_cpuid_array {
1314 struct kvm_cpuid_entry2 *entries;
1315 int maxnent;
1316 int nent;
1317 };
1318
get_next_cpuid(struct kvm_cpuid_array * array)1319 static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array)
1320 {
1321 if (array->nent >= array->maxnent)
1322 return NULL;
1323
1324 return &array->entries[array->nent++];
1325 }
1326
do_host_cpuid(struct kvm_cpuid_array * array,u32 function,u32 index)1327 static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
1328 u32 function, u32 index)
1329 {
1330 struct kvm_cpuid_entry2 *entry = get_next_cpuid(array);
1331
1332 if (!entry)
1333 return NULL;
1334
1335 memset(entry, 0, sizeof(*entry));
1336 entry->function = function;
1337 entry->index = index;
1338 switch (function & 0xC0000000) {
1339 case 0x40000000:
1340 /* Hypervisor leaves are always synthesized by __do_cpuid_func. */
1341 return entry;
1342
1343 case 0x80000000:
1344 /*
1345 * 0x80000021 is sometimes synthesized by __do_cpuid_func, which
1346 * would result in out-of-bounds calls to do_host_cpuid.
1347 */
1348 {
1349 static int max_cpuid_80000000;
1350 if (!READ_ONCE(max_cpuid_80000000))
1351 WRITE_ONCE(max_cpuid_80000000, cpuid_eax(0x80000000));
1352 if (function > READ_ONCE(max_cpuid_80000000))
1353 return entry;
1354 }
1355 break;
1356
1357 default:
1358 break;
1359 }
1360
1361 cpuid_count(entry->function, entry->index,
1362 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
1363
1364 if (cpuid_function_is_indexed(function))
1365 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1366
1367 return entry;
1368 }
1369
cpuid_func_emulated(struct kvm_cpuid_entry2 * entry,u32 func,bool include_partially_emulated)1370 static int cpuid_func_emulated(struct kvm_cpuid_entry2 *entry, u32 func,
1371 bool include_partially_emulated)
1372 {
1373 memset(entry, 0, sizeof(*entry));
1374
1375 entry->function = func;
1376 entry->index = 0;
1377 entry->flags = 0;
1378
1379 switch (func) {
1380 case 0:
1381 entry->eax = 7;
1382 return 1;
1383 case 1:
1384 entry->ecx = feature_bit(MOVBE);
1385 /*
1386 * KVM allows userspace to enumerate MONITOR+MWAIT support to
1387 * the guest, but the MWAIT feature flag is never advertised
1388 * to userspace because MONITOR+MWAIT aren't virtualized by
1389 * hardware, can't be faithfully emulated in software (KVM
1390 * emulates them as NOPs), and allowing the guest to execute
1391 * them natively requires enabling a per-VM capability.
1392 */
1393 if (include_partially_emulated)
1394 entry->ecx |= feature_bit(MWAIT);
1395 return 1;
1396 case 7:
1397 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1398 entry->eax = 0;
1399 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
1400 entry->ecx = feature_bit(RDPID);
1401 return 1;
1402 default:
1403 return 0;
1404 }
1405 }
1406
__do_cpuid_func_emulated(struct kvm_cpuid_array * array,u32 func)1407 static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
1408 {
1409 if (array->nent >= array->maxnent)
1410 return -E2BIG;
1411
1412 array->nent += cpuid_func_emulated(&array->entries[array->nent], func, false);
1413 return 0;
1414 }
1415
__do_cpuid_func(struct kvm_cpuid_array * array,u32 function)1416 static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
1417 {
1418 struct kvm_cpuid_entry2 *entry;
1419 int r, i, max_idx;
1420
1421 /* all calls to cpuid_count() should be made on the same cpu */
1422 get_cpu();
1423
1424 r = -E2BIG;
1425
1426 entry = do_host_cpuid(array, function, 0);
1427 if (!entry)
1428 goto out;
1429
1430 switch (function) {
1431 case 0:
1432 /* Limited to the highest leaf implemented in KVM. */
1433 entry->eax = min(entry->eax, 0x24U);
1434 break;
1435 case 1:
1436 cpuid_entry_override(entry, CPUID_1_EDX);
1437 cpuid_entry_override(entry, CPUID_1_ECX);
1438 break;
1439 case 2:
1440 /*
1441 * On ancient CPUs, function 2 entries are STATEFUL. That is,
1442 * CPUID(function=2, index=0) may return different results each
1443 * time, with the least-significant byte in EAX enumerating the
1444 * number of times software should do CPUID(2, 0).
1445 *
1446 * Modern CPUs, i.e. every CPU KVM has *ever* run on are less
1447 * idiotic. Intel's SDM states that EAX & 0xff "will always
1448 * return 01H. Software should ignore this value and not
1449 * interpret it as an informational descriptor", while AMD's
1450 * APM states that CPUID(2) is reserved.
1451 *
1452 * WARN if a frankenstein CPU that supports virtualization and
1453 * a stateful CPUID.0x2 is encountered.
1454 */
1455 WARN_ON_ONCE((entry->eax & 0xff) > 1);
1456 break;
1457 /* functions 4 and 0x8000001d have additional index. */
1458 case 4:
1459 case 0x8000001d:
1460 /*
1461 * Read entries until the cache type in the previous entry is
1462 * zero, i.e. indicates an invalid entry.
1463 */
1464 for (i = 1; entry->eax & 0x1f; ++i) {
1465 entry = do_host_cpuid(array, function, i);
1466 if (!entry)
1467 goto out;
1468 }
1469 break;
1470 case 6: /* Thermal management */
1471 entry->eax = 0x4; /* allow ARAT */
1472 entry->ebx = 0;
1473 entry->ecx = 0;
1474 entry->edx = 0;
1475 break;
1476 /* function 7 has additional index. */
1477 case 7:
1478 max_idx = entry->eax = min(entry->eax, 2u);
1479 cpuid_entry_override(entry, CPUID_7_0_EBX);
1480 cpuid_entry_override(entry, CPUID_7_ECX);
1481 cpuid_entry_override(entry, CPUID_7_EDX);
1482
1483 /* KVM only supports up to 0x7.2, capped above via min(). */
1484 if (max_idx >= 1) {
1485 entry = do_host_cpuid(array, function, 1);
1486 if (!entry)
1487 goto out;
1488
1489 cpuid_entry_override(entry, CPUID_7_1_EAX);
1490 cpuid_entry_override(entry, CPUID_7_1_ECX);
1491 cpuid_entry_override(entry, CPUID_7_1_EDX);
1492 entry->ebx = 0;
1493 }
1494 if (max_idx >= 2) {
1495 entry = do_host_cpuid(array, function, 2);
1496 if (!entry)
1497 goto out;
1498
1499 cpuid_entry_override(entry, CPUID_7_2_EDX);
1500 entry->ecx = 0;
1501 entry->ebx = 0;
1502 entry->eax = 0;
1503 }
1504 break;
1505 case 0xa: { /* Architectural Performance Monitoring */
1506 union cpuid10_eax eax = { };
1507 union cpuid10_edx edx = { };
1508
1509 if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
1510 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1511 break;
1512 }
1513
1514 eax.split.version_id = kvm_pmu_cap.version;
1515 eax.split.num_counters = kvm_pmu_cap.num_counters_gp;
1516 eax.split.bit_width = kvm_pmu_cap.bit_width_gp;
1517 eax.split.mask_length = kvm_pmu_cap.events_mask_len;
1518 edx.split.num_counters_fixed = kvm_pmu_cap.num_counters_fixed;
1519 edx.split.bit_width_fixed = kvm_pmu_cap.bit_width_fixed;
1520
1521 if (kvm_pmu_cap.version)
1522 edx.split.anythread_deprecated = 1;
1523
1524 entry->eax = eax.full;
1525 entry->ebx = kvm_pmu_cap.events_mask;
1526 entry->ecx = 0;
1527 entry->edx = edx.full;
1528 break;
1529 }
1530 case 0x1f:
1531 case 0xb:
1532 /*
1533 * No topology; a valid topology is indicated by the presence
1534 * of subleaf 1.
1535 */
1536 entry->eax = entry->ebx = entry->ecx = 0;
1537 break;
1538 case 0xd: {
1539 u64 permitted_xcr0 = kvm_get_filtered_xcr0();
1540 u64 permitted_xss = kvm_caps.supported_xss;
1541
1542 entry->eax &= permitted_xcr0;
1543 entry->ebx = xstate_required_size(permitted_xcr0, false);
1544 entry->ecx = entry->ebx;
1545 entry->edx &= permitted_xcr0 >> 32;
1546 if (!permitted_xcr0)
1547 break;
1548
1549 entry = do_host_cpuid(array, function, 1);
1550 if (!entry)
1551 goto out;
1552
1553 cpuid_entry_override(entry, CPUID_D_1_EAX);
1554 if (entry->eax & (feature_bit(XSAVES) | feature_bit(XSAVEC)))
1555 entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss,
1556 true);
1557 else {
1558 WARN_ON_ONCE(permitted_xss != 0);
1559 entry->ebx = 0;
1560 }
1561 entry->ecx &= permitted_xss;
1562 entry->edx &= permitted_xss >> 32;
1563
1564 for (i = 2; i < 64; ++i) {
1565 bool s_state;
1566 if (permitted_xcr0 & BIT_ULL(i))
1567 s_state = false;
1568 else if (permitted_xss & BIT_ULL(i))
1569 s_state = true;
1570 else
1571 continue;
1572
1573 entry = do_host_cpuid(array, function, i);
1574 if (!entry)
1575 goto out;
1576
1577 /*
1578 * The supported check above should have filtered out
1579 * invalid sub-leafs. Only valid sub-leafs should
1580 * reach this point, and they should have a non-zero
1581 * save state size. Furthermore, check whether the
1582 * processor agrees with permitted_xcr0/permitted_xss
1583 * on whether this is an XCR0- or IA32_XSS-managed area.
1584 */
1585 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
1586 --array->nent;
1587 continue;
1588 }
1589
1590 if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
1591 entry->ecx &= ~BIT_ULL(2);
1592 entry->edx = 0;
1593 }
1594 break;
1595 }
1596 case 0x12:
1597 /* Intel SGX */
1598 if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) {
1599 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1600 break;
1601 }
1602
1603 /*
1604 * Index 0: Sub-features, MISCSELECT (a.k.a extended features)
1605 * and max enclave sizes. The SGX sub-features and MISCSELECT
1606 * are restricted by kernel and KVM capabilities (like most
1607 * feature flags), while enclave size is unrestricted.
1608 */
1609 cpuid_entry_override(entry, CPUID_12_EAX);
1610 entry->ebx &= SGX_MISC_EXINFO;
1611
1612 entry = do_host_cpuid(array, function, 1);
1613 if (!entry)
1614 goto out;
1615
1616 /*
1617 * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la
1618 * feature flags. Advertise all supported flags, including
1619 * privileged attributes that require explicit opt-in from
1620 * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is
1621 * expected to derive it from supported XCR0.
1622 */
1623 entry->eax &= SGX_ATTR_PRIV_MASK | SGX_ATTR_UNPRIV_MASK;
1624 entry->ebx &= 0;
1625 break;
1626 /* Intel PT */
1627 case 0x14:
1628 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) {
1629 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1630 break;
1631 }
1632
1633 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1634 if (!do_host_cpuid(array, function, i))
1635 goto out;
1636 }
1637 break;
1638 /* Intel AMX TILE */
1639 case 0x1d:
1640 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1641 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1642 break;
1643 }
1644
1645 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1646 if (!do_host_cpuid(array, function, i))
1647 goto out;
1648 }
1649 break;
1650 case 0x1e: /* TMUL information */
1651 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1652 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1653 break;
1654 }
1655
1656 max_idx = entry->eax = min(entry->eax, 1u);
1657
1658 /* KVM only supports up to 0x1e.0x1, capped above via min(). */
1659 if (max_idx >= 1) {
1660 entry = do_host_cpuid(array, function, 1);
1661 if (!entry)
1662 goto out;
1663
1664 cpuid_entry_override(entry, CPUID_1E_1_EAX);
1665 entry->ebx = 0;
1666 entry->ecx = 0;
1667 entry->edx = 0;
1668 }
1669 break;
1670 case 0x24: {
1671 u8 avx10_version;
1672
1673 if (!kvm_cpu_cap_has(X86_FEATURE_AVX10)) {
1674 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1675 break;
1676 }
1677
1678 max_idx = entry->eax = min(entry->eax, 1u);
1679 /*
1680 * The AVX10 version is encoded in EBX[7:0]. Note, the version
1681 * is guaranteed to be >=1 if AVX10 is supported. Note #2, the
1682 * version needs to be captured before overriding EBX features!
1683 */
1684 avx10_version = min_t(u8, entry->ebx & 0xff, 2);
1685 cpuid_entry_override(entry, CPUID_24_0_EBX);
1686 entry->ebx |= avx10_version;
1687
1688 entry->ecx = 0;
1689 entry->edx = 0;
1690
1691 /* KVM only supports up to 0x24.0x1, capped above via min(). */
1692 if (max_idx >= 1) {
1693 entry = do_host_cpuid(array, function, 1);
1694 if (!entry)
1695 goto out;
1696
1697 cpuid_entry_override(entry, CPUID_24_1_ECX);
1698 entry->eax = 0;
1699 entry->ebx = 0;
1700 entry->edx = 0;
1701 }
1702 break;
1703 }
1704 case KVM_CPUID_SIGNATURE: {
1705 const u32 *sigptr = (const u32 *)KVM_SIGNATURE;
1706 entry->eax = KVM_CPUID_FEATURES;
1707 entry->ebx = sigptr[0];
1708 entry->ecx = sigptr[1];
1709 entry->edx = sigptr[2];
1710 break;
1711 }
1712 case KVM_CPUID_FEATURES:
1713 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
1714 (1 << KVM_FEATURE_NOP_IO_DELAY) |
1715 (1 << KVM_FEATURE_CLOCKSOURCE2) |
1716 (1 << KVM_FEATURE_ASYNC_PF) |
1717 (1 << KVM_FEATURE_PV_EOI) |
1718 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
1719 (1 << KVM_FEATURE_PV_UNHALT) |
1720 (1 << KVM_FEATURE_PV_TLB_FLUSH) |
1721 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
1722 (1 << KVM_FEATURE_PV_SEND_IPI) |
1723 (1 << KVM_FEATURE_POLL_CONTROL) |
1724 (1 << KVM_FEATURE_PV_SCHED_YIELD) |
1725 (1 << KVM_FEATURE_ASYNC_PF_INT);
1726
1727 if (sched_info_on())
1728 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
1729
1730 entry->ebx = 0;
1731 entry->ecx = 0;
1732 entry->edx = 0;
1733 break;
1734 case 0x80000000:
1735 entry->eax = min(entry->eax, 0x80000022);
1736 /*
1737 * Serializing LFENCE is reported in a multitude of ways, and
1738 * NullSegClearsBase is not reported in CPUID on Zen2; help
1739 * userspace by providing the CPUID leaf ourselves.
1740 *
1741 * However, only do it if the host has CPUID leaf 0x8000001d.
1742 * QEMU thinks that it can query the host blindly for that
1743 * CPUID leaf if KVM reports that it supports 0x8000001d or
1744 * above. The processor merrily returns values from the
1745 * highest Intel leaf which QEMU tries to use as the guest's
1746 * 0x8000001d. Even worse, this can result in an infinite
1747 * loop if said highest leaf has no subleaves indexed by ECX.
1748 */
1749 if (entry->eax >= 0x8000001d &&
1750 (static_cpu_has(X86_FEATURE_LFENCE_RDTSC)
1751 || !static_cpu_has_bug(X86_BUG_NULL_SEG)))
1752 entry->eax = max(entry->eax, 0x80000021);
1753 break;
1754 case 0x80000001:
1755 entry->ebx &= ~GENMASK(27, 16);
1756 cpuid_entry_override(entry, CPUID_8000_0001_EDX);
1757 cpuid_entry_override(entry, CPUID_8000_0001_ECX);
1758 break;
1759 case 0x80000005:
1760 /* Pass host L1 cache and TLB info. */
1761 break;
1762 case 0x80000006:
1763 /* Drop reserved bits, pass host L2 cache and TLB info. */
1764 entry->edx &= ~GENMASK(17, 16);
1765 break;
1766 case 0x80000007: /* Advanced power management */
1767 cpuid_entry_override(entry, CPUID_8000_0007_EDX);
1768
1769 /* mask against host */
1770 entry->edx &= boot_cpu_data.x86_power;
1771 entry->eax = entry->ebx = entry->ecx = 0;
1772 break;
1773 case 0x80000008: {
1774 /*
1775 * GuestPhysAddrSize (EAX[23:16]) is intended for software
1776 * use.
1777 *
1778 * KVM's ABI is to report the effective MAXPHYADDR for the
1779 * guest in PhysAddrSize (phys_as), and the maximum
1780 * *addressable* GPA in GuestPhysAddrSize (g_phys_as).
1781 *
1782 * GuestPhysAddrSize is valid if and only if TDP is enabled,
1783 * in which case the max GPA that can be addressed by KVM may
1784 * be less than the max GPA that can be legally generated by
1785 * the guest, e.g. if MAXPHYADDR>48 but the CPU doesn't
1786 * support 5-level TDP.
1787 */
1788 unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U);
1789 unsigned int phys_as, g_phys_as;
1790
1791 /*
1792 * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
1793 * the guest operates in the same PA space as the host, i.e.
1794 * reductions in MAXPHYADDR for memory encryption affect shadow
1795 * paging, too.
1796 *
1797 * If TDP is enabled, use the raw bare metal MAXPHYADDR as
1798 * reductions to the HPAs do not affect GPAs. The max
1799 * addressable GPA is the same as the max effective GPA, except
1800 * that it's capped at 48 bits if 5-level TDP isn't supported
1801 * (hardware processes bits 51:48 only when walking the fifth
1802 * level page table).
1803 */
1804 if (!tdp_enabled) {
1805 phys_as = boot_cpu_data.x86_phys_bits;
1806 g_phys_as = 0;
1807 } else {
1808 phys_as = entry->eax & 0xff;
1809 g_phys_as = phys_as;
1810 if (kvm_mmu_get_max_tdp_level() < 5)
1811 g_phys_as = min(g_phys_as, 48U);
1812 }
1813
1814 entry->eax = phys_as | (virt_as << 8) | (g_phys_as << 16);
1815 entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
1816 entry->edx = 0;
1817 cpuid_entry_override(entry, CPUID_8000_0008_EBX);
1818 break;
1819 }
1820 case 0x8000000A:
1821 if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) {
1822 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1823 break;
1824 }
1825 entry->eax = 1; /* SVM revision 1 */
1826 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
1827 ASID emulation to nested SVM */
1828 entry->ecx = 0; /* Reserved */
1829 cpuid_entry_override(entry, CPUID_8000_000A_EDX);
1830 break;
1831 case 0x80000019:
1832 entry->ecx = entry->edx = 0;
1833 break;
1834 case 0x8000001a:
1835 entry->eax &= GENMASK(2, 0);
1836 entry->ebx = entry->ecx = entry->edx = 0;
1837 break;
1838 case 0x8000001e:
1839 /* Do not return host topology information. */
1840 entry->eax = entry->ebx = entry->ecx = 0;
1841 entry->edx = 0; /* reserved */
1842 break;
1843 case 0x8000001F:
1844 if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
1845 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1846 } else {
1847 cpuid_entry_override(entry, CPUID_8000_001F_EAX);
1848 /* Clear NumVMPL since KVM does not support VMPL. */
1849 entry->ebx &= ~GENMASK(31, 12);
1850 /*
1851 * Enumerate '0' for "PA bits reduction", the adjusted
1852 * MAXPHYADDR is enumerated directly (see 0x80000008).
1853 */
1854 entry->ebx &= ~GENMASK(11, 6);
1855 }
1856 break;
1857 case 0x80000020:
1858 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1859 break;
1860 case 0x80000021:
1861 entry->edx = 0;
1862 cpuid_entry_override(entry, CPUID_8000_0021_EAX);
1863
1864 if (kvm_cpu_cap_has(X86_FEATURE_ERAPS))
1865 entry->ebx &= GENMASK(23, 16);
1866 else
1867 entry->ebx = 0;
1868
1869 cpuid_entry_override(entry, CPUID_8000_0021_ECX);
1870 break;
1871 /* AMD Extended Performance Monitoring and Debug */
1872 case 0x80000022: {
1873 union cpuid_0x80000022_ebx ebx = { };
1874
1875 entry->ecx = entry->edx = 0;
1876 if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
1877 entry->eax = entry->ebx = 0;
1878 break;
1879 }
1880
1881 cpuid_entry_override(entry, CPUID_8000_0022_EAX);
1882
1883 ebx.split.num_core_pmc = kvm_pmu_cap.num_counters_gp;
1884 entry->ebx = ebx.full;
1885 break;
1886 }
1887 /*Add support for Centaur's CPUID instruction*/
1888 case 0xC0000000:
1889 /*Just support up to 0xC0000004 now*/
1890 entry->eax = min(entry->eax, 0xC0000004);
1891 break;
1892 case 0xC0000001:
1893 cpuid_entry_override(entry, CPUID_C000_0001_EDX);
1894 break;
1895 case 3: /* Processor serial number */
1896 case 5: /* MONITOR/MWAIT */
1897 case 0xC0000002:
1898 case 0xC0000003:
1899 case 0xC0000004:
1900 default:
1901 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1902 break;
1903 }
1904
1905 r = 0;
1906
1907 out:
1908 put_cpu();
1909
1910 return r;
1911 }
1912
do_cpuid_func(struct kvm_cpuid_array * array,u32 func,unsigned int type)1913 static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1914 unsigned int type)
1915 {
1916 if (type == KVM_GET_EMULATED_CPUID)
1917 return __do_cpuid_func_emulated(array, func);
1918
1919 return __do_cpuid_func(array, func);
1920 }
1921
1922 #define CENTAUR_CPUID_SIGNATURE 0xC0000000
1923
get_cpuid_func(struct kvm_cpuid_array * array,u32 func,unsigned int type)1924 static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1925 unsigned int type)
1926 {
1927 u32 limit;
1928 int r;
1929
1930 if (func == CENTAUR_CPUID_SIGNATURE &&
1931 boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR &&
1932 boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
1933 return 0;
1934
1935 r = do_cpuid_func(array, func, type);
1936 if (r)
1937 return r;
1938
1939 limit = array->entries[array->nent - 1].eax;
1940 for (func = func + 1; func <= limit; ++func) {
1941 r = do_cpuid_func(array, func, type);
1942 if (r)
1943 break;
1944 }
1945
1946 return r;
1947 }
1948
sanity_check_entries(struct kvm_cpuid_entry2 __user * entries,__u32 num_entries,unsigned int ioctl_type)1949 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
1950 __u32 num_entries, unsigned int ioctl_type)
1951 {
1952 int i;
1953 __u32 pad[3];
1954
1955 if (ioctl_type != KVM_GET_EMULATED_CPUID)
1956 return false;
1957
1958 /*
1959 * We want to make sure that ->padding is being passed clean from
1960 * userspace in case we want to use it for something in the future.
1961 *
1962 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
1963 * have to give ourselves satisfied only with the emulated side. /me
1964 * sheds a tear.
1965 */
1966 for (i = 0; i < num_entries; i++) {
1967 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
1968 return true;
1969
1970 if (pad[0] || pad[1] || pad[2])
1971 return true;
1972 }
1973 return false;
1974 }
1975
kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 * cpuid,struct kvm_cpuid_entry2 __user * entries,unsigned int type)1976 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
1977 struct kvm_cpuid_entry2 __user *entries,
1978 unsigned int type)
1979 {
1980 static const u32 funcs[] = {
1981 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE,
1982 };
1983
1984 struct kvm_cpuid_array array = {
1985 .nent = 0,
1986 };
1987 int r, i;
1988
1989 if (cpuid->nent < 1)
1990 return -E2BIG;
1991 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1992 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1993
1994 if (sanity_check_entries(entries, cpuid->nent, type))
1995 return -EINVAL;
1996
1997 array.entries = kvzalloc_objs(struct kvm_cpuid_entry2, cpuid->nent);
1998 if (!array.entries)
1999 return -ENOMEM;
2000
2001 array.maxnent = cpuid->nent;
2002
2003 for (i = 0; i < ARRAY_SIZE(funcs); i++) {
2004 r = get_cpuid_func(&array, funcs[i], type);
2005 if (r)
2006 goto out_free;
2007 }
2008 cpuid->nent = array.nent;
2009
2010 if (copy_to_user(entries, array.entries,
2011 array.nent * sizeof(struct kvm_cpuid_entry2)))
2012 r = -EFAULT;
2013
2014 out_free:
2015 kvfree(array.entries);
2016 return r;
2017 }
2018
2019 /*
2020 * Intel CPUID semantics treats any query for an out-of-range leaf as if the
2021 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
2022 * returns all zeroes for any undefined leaf, whether or not the leaf is in
2023 * range. Centaur/VIA follows Intel semantics.
2024 *
2025 * A leaf is considered out-of-range if its function is higher than the maximum
2026 * supported leaf of its associated class or if its associated class does not
2027 * exist.
2028 *
2029 * There are three primary classes to be considered, with their respective
2030 * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary
2031 * class exists if a guest CPUID entry for its <base> leaf exists. For a given
2032 * class, CPUID.<base>.EAX contains the max supported leaf for the class.
2033 *
2034 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
2035 * - Hypervisor: 0x40000000 - 0x4fffffff
2036 * - Extended: 0x80000000 - 0xbfffffff
2037 * - Centaur: 0xc0000000 - 0xcfffffff
2038 *
2039 * The Hypervisor class is further subdivided into sub-classes that each act as
2040 * their own independent class associated with a 0x100 byte range. E.g. if Qemu
2041 * is advertising support for both HyperV and KVM, the resulting Hypervisor
2042 * CPUID sub-classes are:
2043 *
2044 * - HyperV: 0x40000000 - 0x400000ff
2045 * - KVM: 0x40000100 - 0x400001ff
2046 */
2047 static struct kvm_cpuid_entry2 *
get_out_of_range_cpuid_entry(struct kvm_vcpu * vcpu,u32 * fn_ptr,u32 index)2048 get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
2049 {
2050 struct kvm_cpuid_entry2 *basic, *class;
2051 u32 function = *fn_ptr;
2052
2053 basic = kvm_find_cpuid_entry(vcpu, 0);
2054 if (!basic)
2055 return NULL;
2056
2057 if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) ||
2058 is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx))
2059 return NULL;
2060
2061 if (function >= 0x40000000 && function <= 0x4fffffff)
2062 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00);
2063 else if (function >= 0xc0000000)
2064 class = kvm_find_cpuid_entry(vcpu, 0xc0000000);
2065 else
2066 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000);
2067
2068 if (class && function <= class->eax)
2069 return NULL;
2070
2071 /*
2072 * Leaf specific adjustments are also applied when redirecting to the
2073 * max basic entry, e.g. if the max basic leaf is 0xb but there is no
2074 * entry for CPUID.0xb.index (see below), then the output value for EDX
2075 * needs to be pulled from CPUID.0xb.1.
2076 */
2077 *fn_ptr = basic->eax;
2078
2079 /*
2080 * The class does not exist or the requested function is out of range;
2081 * the effective CPUID entry is the max basic leaf. Note, the index of
2082 * the original requested leaf is observed!
2083 */
2084 return kvm_find_cpuid_entry_index(vcpu, basic->eax, index);
2085 }
2086
kvm_cpuid(struct kvm_vcpu * vcpu,u32 * eax,u32 * ebx,u32 * ecx,u32 * edx,bool exact_only)2087 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
2088 u32 *ecx, u32 *edx, bool exact_only)
2089 {
2090 u32 orig_function = *eax, function = *eax, index = *ecx;
2091 struct kvm_cpuid_entry2 *entry;
2092 bool exact, used_max_basic = false;
2093
2094 if (vcpu->arch.cpuid_dynamic_bits_dirty)
2095 kvm_update_cpuid_runtime(vcpu);
2096
2097 entry = kvm_find_cpuid_entry_index(vcpu, function, index);
2098 exact = !!entry;
2099
2100 if (!entry && !exact_only) {
2101 entry = get_out_of_range_cpuid_entry(vcpu, &function, index);
2102 used_max_basic = !!entry;
2103 }
2104
2105 if (entry) {
2106 *eax = entry->eax;
2107 *ebx = entry->ebx;
2108 *ecx = entry->ecx;
2109 *edx = entry->edx;
2110 if (function == 7 && index == 0) {
2111 u64 data;
2112 if ((*ebx & (feature_bit(RTM) | feature_bit(HLE))) &&
2113 !kvm_msr_read(vcpu, MSR_IA32_TSX_CTRL, &data) &&
2114 (data & TSX_CTRL_CPUID_CLEAR))
2115 *ebx &= ~(feature_bit(RTM) | feature_bit(HLE));
2116 } else if (function == 0x80000007) {
2117 if (kvm_hv_invtsc_suppressed(vcpu))
2118 *edx &= ~feature_bit(CONSTANT_TSC);
2119 } else if (IS_ENABLED(CONFIG_KVM_XEN) &&
2120 kvm_xen_is_tsc_leaf(vcpu, function)) {
2121 /*
2122 * Update guest TSC frequency information if necessary.
2123 * Ignore failures, there is no sane value that can be
2124 * provided if KVM can't get the TSC frequency.
2125 */
2126 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu))
2127 kvm_guest_time_update(vcpu);
2128
2129 if (index == 1) {
2130 *ecx = vcpu->arch.pvclock_tsc_mul;
2131 *edx = vcpu->arch.pvclock_tsc_shift;
2132 } else if (index == 2) {
2133 *eax = vcpu->arch.hw_tsc_khz;
2134 }
2135 }
2136 } else {
2137 *eax = *ebx = *ecx = *edx = 0;
2138 /*
2139 * When leaf 0BH or 1FH is defined, CL is pass-through
2140 * and EDX is always the x2APIC ID, even for undefined
2141 * subleaves. Index 1 will exist iff the leaf is
2142 * implemented, so we pass through CL iff leaf 1
2143 * exists. EDX can be copied from any existing index.
2144 */
2145 if (function == 0xb || function == 0x1f) {
2146 entry = kvm_find_cpuid_entry_index(vcpu, function, 1);
2147 if (entry) {
2148 *ecx = index & 0xff;
2149 *edx = entry->edx;
2150 }
2151 }
2152 }
2153 trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact,
2154 used_max_basic);
2155 return exact;
2156 }
2157 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpuid);
2158
kvm_emulate_cpuid(struct kvm_vcpu * vcpu)2159 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2160 {
2161 u32 eax, ebx, ecx, edx;
2162
2163 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
2164 return 1;
2165
2166 eax = kvm_rax_read(vcpu);
2167 ecx = kvm_rcx_read(vcpu);
2168 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
2169 kvm_rax_write(vcpu, eax);
2170 kvm_rbx_write(vcpu, ebx);
2171 kvm_rcx_write(vcpu, ecx);
2172 kvm_rdx_write(vcpu, edx);
2173 return kvm_skip_emulated_instruction(vcpu);
2174 }
2175 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_cpuid);
2176