xref: /linux/arch/x86/kvm/cpuid.c (revision 8b6d678fede700db6466d73f11fcbad496fa515e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  * cpuid support routines
5  *
6  * derived from arch/x86/kvm/x86.c
7  *
8  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9  * Copyright IBM Corporation, 2008
10  */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/kvm_host.h>
14 #include "linux/lockdep.h"
15 #include <linux/export.h>
16 #include <linux/vmalloc.h>
17 #include <linux/uaccess.h>
18 #include <linux/sched/stat.h>
19 
20 #include <asm/processor.h>
21 #include <asm/user.h>
22 #include <asm/fpu/xstate.h>
23 #include <asm/sgx.h>
24 #include <asm/cpuid.h>
25 #include "cpuid.h"
26 #include "lapic.h"
27 #include "mmu.h"
28 #include "trace.h"
29 #include "pmu.h"
30 #include "xen.h"
31 
32 /*
33  * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
34  * aligned to sizeof(unsigned long) because it's not accessed via bitops.
35  */
36 u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
37 EXPORT_SYMBOL_GPL(kvm_cpu_caps);
38 
39 u32 xstate_required_size(u64 xstate_bv, bool compacted)
40 {
41 	int feature_bit = 0;
42 	u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
43 
44 	xstate_bv &= XFEATURE_MASK_EXTEND;
45 	while (xstate_bv) {
46 		if (xstate_bv & 0x1) {
47 		        u32 eax, ebx, ecx, edx, offset;
48 		        cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
49 			/* ECX[1]: 64B alignment in compacted form */
50 			if (compacted)
51 				offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret;
52 			else
53 				offset = ebx;
54 			ret = max(ret, offset + eax);
55 		}
56 
57 		xstate_bv >>= 1;
58 		feature_bit++;
59 	}
60 
61 	return ret;
62 }
63 
64 #define F feature_bit
65 
66 /* Scattered Flag - For features that are scattered by cpufeatures.h. */
67 #define SF(name)						\
68 ({								\
69 	BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES);	\
70 	(boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0);	\
71 })
72 
73 /*
74  * Magic value used by KVM when querying userspace-provided CPUID entries and
75  * doesn't care about the CPIUD index because the index of the function in
76  * question is not significant.  Note, this magic value must have at least one
77  * bit set in bits[63:32] and must be consumed as a u64 by cpuid_entry2_find()
78  * to avoid false positives when processing guest CPUID input.
79  */
80 #define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull
81 
82 static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
83 	struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index)
84 {
85 	struct kvm_cpuid_entry2 *e;
86 	int i;
87 
88 	/*
89 	 * KVM has a semi-arbitrary rule that querying the guest's CPUID model
90 	 * with IRQs disabled is disallowed.  The CPUID model can legitimately
91 	 * have over one hundred entries, i.e. the lookup is slow, and IRQs are
92 	 * typically disabled in KVM only when KVM is in a performance critical
93 	 * path, e.g. the core VM-Enter/VM-Exit run loop.  Nothing will break
94 	 * if this rule is violated, this assertion is purely to flag potential
95 	 * performance issues.  If this fires, consider moving the lookup out
96 	 * of the hotpath, e.g. by caching information during CPUID updates.
97 	 */
98 	lockdep_assert_irqs_enabled();
99 
100 	for (i = 0; i < nent; i++) {
101 		e = &entries[i];
102 
103 		if (e->function != function)
104 			continue;
105 
106 		/*
107 		 * If the index isn't significant, use the first entry with a
108 		 * matching function.  It's userspace's responsibility to not
109 		 * provide "duplicate" entries in all cases.
110 		 */
111 		if (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index)
112 			return e;
113 
114 
115 		/*
116 		 * Similarly, use the first matching entry if KVM is doing a
117 		 * lookup (as opposed to emulating CPUID) for a function that's
118 		 * architecturally defined as not having a significant index.
119 		 */
120 		if (index == KVM_CPUID_INDEX_NOT_SIGNIFICANT) {
121 			/*
122 			 * Direct lookups from KVM should not diverge from what
123 			 * KVM defines internally (the architectural behavior).
124 			 */
125 			WARN_ON_ONCE(cpuid_function_is_indexed(function));
126 			return e;
127 		}
128 	}
129 
130 	return NULL;
131 }
132 
133 static int kvm_check_cpuid(struct kvm_vcpu *vcpu,
134 			   struct kvm_cpuid_entry2 *entries,
135 			   int nent)
136 {
137 	struct kvm_cpuid_entry2 *best;
138 	u64 xfeatures;
139 
140 	/*
141 	 * The existing code assumes virtual address is 48-bit or 57-bit in the
142 	 * canonical address checks; exit if it is ever changed.
143 	 */
144 	best = cpuid_entry2_find(entries, nent, 0x80000008,
145 				 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
146 	if (best) {
147 		int vaddr_bits = (best->eax & 0xff00) >> 8;
148 
149 		if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
150 			return -EINVAL;
151 	}
152 
153 	/*
154 	 * Exposing dynamic xfeatures to the guest requires additional
155 	 * enabling in the FPU, e.g. to expand the guest XSAVE state size.
156 	 */
157 	best = cpuid_entry2_find(entries, nent, 0xd, 0);
158 	if (!best)
159 		return 0;
160 
161 	xfeatures = best->eax | ((u64)best->edx << 32);
162 	xfeatures &= XFEATURE_MASK_USER_DYNAMIC;
163 	if (!xfeatures)
164 		return 0;
165 
166 	return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures);
167 }
168 
169 /* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */
170 static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
171 				 int nent)
172 {
173 	struct kvm_cpuid_entry2 *orig;
174 	int i;
175 
176 	if (nent != vcpu->arch.cpuid_nent)
177 		return -EINVAL;
178 
179 	for (i = 0; i < nent; i++) {
180 		orig = &vcpu->arch.cpuid_entries[i];
181 		if (e2[i].function != orig->function ||
182 		    e2[i].index != orig->index ||
183 		    e2[i].flags != orig->flags ||
184 		    e2[i].eax != orig->eax || e2[i].ebx != orig->ebx ||
185 		    e2[i].ecx != orig->ecx || e2[i].edx != orig->edx)
186 			return -EINVAL;
187 	}
188 
189 	return 0;
190 }
191 
192 static struct kvm_hypervisor_cpuid __kvm_get_hypervisor_cpuid(struct kvm_cpuid_entry2 *entries,
193 							      int nent, const char *sig)
194 {
195 	struct kvm_hypervisor_cpuid cpuid = {};
196 	struct kvm_cpuid_entry2 *entry;
197 	u32 base;
198 
199 	for_each_possible_hypervisor_cpuid_base(base) {
200 		entry = cpuid_entry2_find(entries, nent, base, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
201 
202 		if (entry) {
203 			u32 signature[3];
204 
205 			signature[0] = entry->ebx;
206 			signature[1] = entry->ecx;
207 			signature[2] = entry->edx;
208 
209 			if (!memcmp(signature, sig, sizeof(signature))) {
210 				cpuid.base = base;
211 				cpuid.limit = entry->eax;
212 				break;
213 			}
214 		}
215 	}
216 
217 	return cpuid;
218 }
219 
220 static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu,
221 							    const char *sig)
222 {
223 	return __kvm_get_hypervisor_cpuid(vcpu->arch.cpuid_entries,
224 					  vcpu->arch.cpuid_nent, sig);
225 }
226 
227 static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_cpuid_entry2 *entries,
228 							      int nent, u32 kvm_cpuid_base)
229 {
230 	return cpuid_entry2_find(entries, nent, kvm_cpuid_base | KVM_CPUID_FEATURES,
231 				 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
232 }
233 
234 static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
235 {
236 	u32 base = vcpu->arch.kvm_cpuid.base;
237 
238 	if (!base)
239 		return NULL;
240 
241 	return __kvm_find_kvm_cpuid_features(vcpu->arch.cpuid_entries,
242 					     vcpu->arch.cpuid_nent, base);
243 }
244 
245 void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
246 {
247 	struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu);
248 
249 	/*
250 	 * save the feature bitmap to avoid cpuid lookup for every PV
251 	 * operation
252 	 */
253 	if (best)
254 		vcpu->arch.pv_cpuid.features = best->eax;
255 }
256 
257 /*
258  * Calculate guest's supported XCR0 taking into account guest CPUID data and
259  * KVM's supported XCR0 (comprised of host's XCR0 and KVM_SUPPORTED_XCR0).
260  */
261 static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent)
262 {
263 	struct kvm_cpuid_entry2 *best;
264 
265 	best = cpuid_entry2_find(entries, nent, 0xd, 0);
266 	if (!best)
267 		return 0;
268 
269 	return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0;
270 }
271 
272 static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries,
273 				       int nent)
274 {
275 	struct kvm_cpuid_entry2 *best;
276 	struct kvm_hypervisor_cpuid kvm_cpuid;
277 
278 	best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
279 	if (best) {
280 		/* Update OSXSAVE bit */
281 		if (boot_cpu_has(X86_FEATURE_XSAVE))
282 			cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
283 					   kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE));
284 
285 		cpuid_entry_change(best, X86_FEATURE_APIC,
286 			   vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
287 	}
288 
289 	best = cpuid_entry2_find(entries, nent, 7, 0);
290 	if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
291 		cpuid_entry_change(best, X86_FEATURE_OSPKE,
292 				   kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE));
293 
294 	best = cpuid_entry2_find(entries, nent, 0xD, 0);
295 	if (best)
296 		best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
297 
298 	best = cpuid_entry2_find(entries, nent, 0xD, 1);
299 	if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
300 		     cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
301 		best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
302 
303 	kvm_cpuid = __kvm_get_hypervisor_cpuid(entries, nent, KVM_SIGNATURE);
304 	if (kvm_cpuid.base) {
305 		best = __kvm_find_kvm_cpuid_features(entries, nent, kvm_cpuid.base);
306 		if (kvm_hlt_in_guest(vcpu->kvm) && best)
307 			best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
308 	}
309 
310 	if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
311 		best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
312 		if (best)
313 			cpuid_entry_change(best, X86_FEATURE_MWAIT,
314 					   vcpu->arch.ia32_misc_enable_msr &
315 					   MSR_IA32_MISC_ENABLE_MWAIT);
316 	}
317 }
318 
319 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
320 {
321 	__kvm_update_cpuid_runtime(vcpu, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
322 }
323 EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
324 
325 static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent)
326 {
327 #ifdef CONFIG_KVM_HYPERV
328 	struct kvm_cpuid_entry2 *entry;
329 
330 	entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE,
331 				  KVM_CPUID_INDEX_NOT_SIGNIFICANT);
332 	return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX;
333 #else
334 	return false;
335 #endif
336 }
337 
338 static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
339 {
340 	struct kvm_lapic *apic = vcpu->arch.apic;
341 	struct kvm_cpuid_entry2 *best;
342 	bool allow_gbpages;
343 
344 	BUILD_BUG_ON(KVM_NR_GOVERNED_FEATURES > KVM_MAX_NR_GOVERNED_FEATURES);
345 	bitmap_zero(vcpu->arch.governed_features.enabled,
346 		    KVM_MAX_NR_GOVERNED_FEATURES);
347 
348 	/*
349 	 * If TDP is enabled, let the guest use GBPAGES if they're supported in
350 	 * hardware.  The hardware page walker doesn't let KVM disable GBPAGES,
351 	 * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
352 	 * walk for performance and complexity reasons.  Not to mention KVM
353 	 * _can't_ solve the problem because GVA->GPA walks aren't visible to
354 	 * KVM once a TDP translation is installed.  Mimic hardware behavior so
355 	 * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
356 	 * If TDP is disabled, honor *only* guest CPUID as KVM has full control
357 	 * and can install smaller shadow pages if the host lacks 1GiB support.
358 	 */
359 	allow_gbpages = tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
360 				      guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
361 	if (allow_gbpages)
362 		kvm_governed_feature_set(vcpu, X86_FEATURE_GBPAGES);
363 
364 	best = kvm_find_cpuid_entry(vcpu, 1);
365 	if (best && apic) {
366 		if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
367 			apic->lapic_timer.timer_mode_mask = 3 << 17;
368 		else
369 			apic->lapic_timer.timer_mode_mask = 1 << 17;
370 
371 		kvm_apic_set_version(vcpu);
372 	}
373 
374 	vcpu->arch.guest_supported_xcr0 =
375 		cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
376 
377 	kvm_update_pv_runtime(vcpu);
378 
379 	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
380 	vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
381 
382 	kvm_pmu_refresh(vcpu);
383 	vcpu->arch.cr4_guest_rsvd_bits =
384 	    __cr4_reserved_bits(guest_cpuid_has, vcpu);
385 
386 	kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu->arch.cpuid_entries,
387 						    vcpu->arch.cpuid_nent));
388 
389 	/* Invoke the vendor callback only after the above state is updated. */
390 	static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
391 
392 	/*
393 	 * Except for the MMU, which needs to do its thing any vendor specific
394 	 * adjustments to the reserved GPA bits.
395 	 */
396 	kvm_mmu_after_set_cpuid(vcpu);
397 }
398 
399 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
400 {
401 	struct kvm_cpuid_entry2 *best;
402 
403 	best = kvm_find_cpuid_entry(vcpu, 0x80000000);
404 	if (!best || best->eax < 0x80000008)
405 		goto not_found;
406 	best = kvm_find_cpuid_entry(vcpu, 0x80000008);
407 	if (best)
408 		return best->eax & 0xff;
409 not_found:
410 	return 36;
411 }
412 
413 /*
414  * This "raw" version returns the reserved GPA bits without any adjustments for
415  * encryption technologies that usurp bits.  The raw mask should be used if and
416  * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs.
417  */
418 u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
419 {
420 	return rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
421 }
422 
423 static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
424                         int nent)
425 {
426 	int r;
427 
428 	__kvm_update_cpuid_runtime(vcpu, e2, nent);
429 
430 	/*
431 	 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
432 	 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
433 	 * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
434 	 * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
435 	 * the core vCPU model on the fly. It would've been better to forbid any
436 	 * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately
437 	 * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do
438 	 * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
439 	 * whether the supplied CPUID data is equal to what's already set.
440 	 */
441 	if (kvm_vcpu_has_run(vcpu)) {
442 		r = kvm_cpuid_check_equal(vcpu, e2, nent);
443 		if (r)
444 			return r;
445 
446 		kvfree(e2);
447 		return 0;
448 	}
449 
450 #ifdef CONFIG_KVM_HYPERV
451 	if (kvm_cpuid_has_hyperv(e2, nent)) {
452 		r = kvm_hv_vcpu_init(vcpu);
453 		if (r)
454 			return r;
455 	}
456 #endif
457 
458 	r = kvm_check_cpuid(vcpu, e2, nent);
459 	if (r)
460 		return r;
461 
462 	kvfree(vcpu->arch.cpuid_entries);
463 	vcpu->arch.cpuid_entries = e2;
464 	vcpu->arch.cpuid_nent = nent;
465 
466 	vcpu->arch.kvm_cpuid = kvm_get_hypervisor_cpuid(vcpu, KVM_SIGNATURE);
467 #ifdef CONFIG_KVM_XEN
468 	vcpu->arch.xen.cpuid = kvm_get_hypervisor_cpuid(vcpu, XEN_SIGNATURE);
469 #endif
470 	kvm_vcpu_after_set_cpuid(vcpu);
471 
472 	return 0;
473 }
474 
475 /* when an old userspace process fills a new kernel module */
476 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
477 			     struct kvm_cpuid *cpuid,
478 			     struct kvm_cpuid_entry __user *entries)
479 {
480 	int r, i;
481 	struct kvm_cpuid_entry *e = NULL;
482 	struct kvm_cpuid_entry2 *e2 = NULL;
483 
484 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
485 		return -E2BIG;
486 
487 	if (cpuid->nent) {
488 		e = vmemdup_array_user(entries, cpuid->nent, sizeof(*e));
489 		if (IS_ERR(e))
490 			return PTR_ERR(e);
491 
492 		e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT);
493 		if (!e2) {
494 			r = -ENOMEM;
495 			goto out_free_cpuid;
496 		}
497 	}
498 	for (i = 0; i < cpuid->nent; i++) {
499 		e2[i].function = e[i].function;
500 		e2[i].eax = e[i].eax;
501 		e2[i].ebx = e[i].ebx;
502 		e2[i].ecx = e[i].ecx;
503 		e2[i].edx = e[i].edx;
504 		e2[i].index = 0;
505 		e2[i].flags = 0;
506 		e2[i].padding[0] = 0;
507 		e2[i].padding[1] = 0;
508 		e2[i].padding[2] = 0;
509 	}
510 
511 	r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
512 	if (r)
513 		kvfree(e2);
514 
515 out_free_cpuid:
516 	kvfree(e);
517 
518 	return r;
519 }
520 
521 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
522 			      struct kvm_cpuid2 *cpuid,
523 			      struct kvm_cpuid_entry2 __user *entries)
524 {
525 	struct kvm_cpuid_entry2 *e2 = NULL;
526 	int r;
527 
528 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
529 		return -E2BIG;
530 
531 	if (cpuid->nent) {
532 		e2 = vmemdup_array_user(entries, cpuid->nent, sizeof(*e2));
533 		if (IS_ERR(e2))
534 			return PTR_ERR(e2);
535 	}
536 
537 	r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
538 	if (r)
539 		kvfree(e2);
540 
541 	return r;
542 }
543 
544 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
545 			      struct kvm_cpuid2 *cpuid,
546 			      struct kvm_cpuid_entry2 __user *entries)
547 {
548 	if (cpuid->nent < vcpu->arch.cpuid_nent)
549 		return -E2BIG;
550 
551 	if (copy_to_user(entries, vcpu->arch.cpuid_entries,
552 			 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
553 		return -EFAULT;
554 
555 	cpuid->nent = vcpu->arch.cpuid_nent;
556 	return 0;
557 }
558 
559 /* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */
560 static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf)
561 {
562 	const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32);
563 	struct kvm_cpuid_entry2 entry;
564 
565 	reverse_cpuid_check(leaf);
566 
567 	cpuid_count(cpuid.function, cpuid.index,
568 		    &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
569 
570 	kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg);
571 }
572 
573 static __always_inline
574 void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask)
575 {
576 	/* Use kvm_cpu_cap_mask for leafs that aren't KVM-only. */
577 	BUILD_BUG_ON(leaf < NCAPINTS);
578 
579 	kvm_cpu_caps[leaf] = mask;
580 
581 	__kvm_cpu_cap_mask(leaf);
582 }
583 
584 static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
585 {
586 	/* Use kvm_cpu_cap_init_kvm_defined for KVM-only leafs. */
587 	BUILD_BUG_ON(leaf >= NCAPINTS);
588 
589 	kvm_cpu_caps[leaf] &= mask;
590 
591 	__kvm_cpu_cap_mask(leaf);
592 }
593 
594 void kvm_set_cpu_caps(void)
595 {
596 #ifdef CONFIG_X86_64
597 	unsigned int f_gbpages = F(GBPAGES);
598 	unsigned int f_lm = F(LM);
599 	unsigned int f_xfd = F(XFD);
600 #else
601 	unsigned int f_gbpages = 0;
602 	unsigned int f_lm = 0;
603 	unsigned int f_xfd = 0;
604 #endif
605 	memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps));
606 
607 	BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) >
608 		     sizeof(boot_cpu_data.x86_capability));
609 
610 	memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
611 	       sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)));
612 
613 	kvm_cpu_cap_mask(CPUID_1_ECX,
614 		/*
615 		 * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not*
616 		 * advertised to guests via CPUID!
617 		 */
618 		F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
619 		0 /* DS-CPL, VMX, SMX, EST */ |
620 		0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
621 		F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) |
622 		F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
623 		F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
624 		0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
625 		F(F16C) | F(RDRAND)
626 	);
627 	/* KVM emulates x2apic in software irrespective of host support. */
628 	kvm_cpu_cap_set(X86_FEATURE_X2APIC);
629 
630 	kvm_cpu_cap_mask(CPUID_1_EDX,
631 		F(FPU) | F(VME) | F(DE) | F(PSE) |
632 		F(TSC) | F(MSR) | F(PAE) | F(MCE) |
633 		F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
634 		F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
635 		F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
636 		0 /* Reserved, DS, ACPI */ | F(MMX) |
637 		F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
638 		0 /* HTT, TM, Reserved, PBE */
639 	);
640 
641 	kvm_cpu_cap_mask(CPUID_7_0_EBX,
642 		F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) |
643 		F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) |
644 		F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) |
645 		F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) |
646 		F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) |
647 		F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) |
648 		F(AVX512VL));
649 
650 	kvm_cpu_cap_mask(CPUID_7_ECX,
651 		F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
652 		F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
653 		F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
654 		F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ |
655 		F(SGX_LC) | F(BUS_LOCK_DETECT)
656 	);
657 	/* Set LA57 based on hardware capability. */
658 	if (cpuid_ecx(7) & F(LA57))
659 		kvm_cpu_cap_set(X86_FEATURE_LA57);
660 
661 	/*
662 	 * PKU not yet implemented for shadow paging and requires OSPKE
663 	 * to be set on the host. Clear it if that is not the case
664 	 */
665 	if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
666 		kvm_cpu_cap_clear(X86_FEATURE_PKU);
667 
668 	kvm_cpu_cap_mask(CPUID_7_EDX,
669 		F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
670 		F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
671 		F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
672 		F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) |
673 		F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) | F(FLUSH_L1D)
674 	);
675 
676 	/* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
677 	kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST);
678 	kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES);
679 
680 	if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
681 		kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL);
682 	if (boot_cpu_has(X86_FEATURE_STIBP))
683 		kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP);
684 	if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
685 		kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
686 
687 	kvm_cpu_cap_mask(CPUID_7_1_EAX,
688 		F(AVX_VNNI) | F(AVX512_BF16) | F(CMPCCXADD) |
689 		F(FZRM) | F(FSRS) | F(FSRC) |
690 		F(AMX_FP16) | F(AVX_IFMA) | F(LAM)
691 	);
692 
693 	kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX,
694 		F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI) |
695 		F(AMX_COMPLEX)
696 	);
697 
698 	kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX,
699 		F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) |
700 		F(BHI_CTRL) | F(MCDT_NO)
701 	);
702 
703 	kvm_cpu_cap_mask(CPUID_D_1_EAX,
704 		F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
705 	);
706 
707 	kvm_cpu_cap_init_kvm_defined(CPUID_12_EAX,
708 		SF(SGX1) | SF(SGX2) | SF(SGX_EDECCSSA)
709 	);
710 
711 	kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
712 		F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
713 		F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
714 		F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
715 		0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
716 		F(TOPOEXT) | 0 /* PERFCTR_CORE */
717 	);
718 
719 	kvm_cpu_cap_mask(CPUID_8000_0001_EDX,
720 		F(FPU) | F(VME) | F(DE) | F(PSE) |
721 		F(TSC) | F(MSR) | F(PAE) | F(MCE) |
722 		F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
723 		F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
724 		F(PAT) | F(PSE36) | 0 /* Reserved */ |
725 		F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
726 		F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
727 		0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
728 	);
729 
730 	if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64))
731 		kvm_cpu_cap_set(X86_FEATURE_GBPAGES);
732 
733 	kvm_cpu_cap_init_kvm_defined(CPUID_8000_0007_EDX,
734 		SF(CONSTANT_TSC)
735 	);
736 
737 	kvm_cpu_cap_mask(CPUID_8000_0008_EBX,
738 		F(CLZERO) | F(XSAVEERPTR) |
739 		F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
740 		F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) |
741 		F(AMD_PSFD)
742 	);
743 
744 	/*
745 	 * AMD has separate bits for each SPEC_CTRL bit.
746 	 * arch/x86/kernel/cpu/bugs.c is kind enough to
747 	 * record that in cpufeatures so use them.
748 	 */
749 	if (boot_cpu_has(X86_FEATURE_IBPB))
750 		kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB);
751 	if (boot_cpu_has(X86_FEATURE_IBRS))
752 		kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS);
753 	if (boot_cpu_has(X86_FEATURE_STIBP))
754 		kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP);
755 	if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
756 		kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD);
757 	if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
758 		kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO);
759 	/*
760 	 * The preference is to use SPEC CTRL MSR instead of the
761 	 * VIRT_SPEC MSR.
762 	 */
763 	if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
764 	    !boot_cpu_has(X86_FEATURE_AMD_SSBD))
765 		kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
766 
767 	/*
768 	 * Hide all SVM features by default, SVM will set the cap bits for
769 	 * features it emulates and/or exposes for L1.
770 	 */
771 	kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
772 
773 	kvm_cpu_cap_mask(CPUID_8000_001F_EAX,
774 		0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) |
775 		F(SME_COHERENT));
776 
777 	kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
778 		F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ |
779 		F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ |
780 		F(WRMSR_XX_BASE_NS)
781 	);
782 
783 	kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB);
784 	kvm_cpu_cap_check_and_set(X86_FEATURE_IBPB_BRTYPE);
785 	kvm_cpu_cap_check_and_set(X86_FEATURE_SRSO_NO);
786 
787 	kvm_cpu_cap_init_kvm_defined(CPUID_8000_0022_EAX,
788 		F(PERFMON_V2)
789 	);
790 
791 	/*
792 	 * Synthesize "LFENCE is serializing" into the AMD-defined entry in
793 	 * KVM's supported CPUID if the feature is reported as supported by the
794 	 * kernel.  LFENCE_RDTSC was a Linux-defined synthetic feature long
795 	 * before AMD joined the bandwagon, e.g. LFENCE is serializing on most
796 	 * CPUs that support SSE2.  On CPUs that don't support AMD's leaf,
797 	 * kvm_cpu_cap_mask() will unfortunately drop the flag due to ANDing
798 	 * the mask with the raw host CPUID, and reporting support in AMD's
799 	 * leaf can make it easier for userspace to detect the feature.
800 	 */
801 	if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
802 		kvm_cpu_cap_set(X86_FEATURE_LFENCE_RDTSC);
803 	if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
804 		kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE);
805 	kvm_cpu_cap_set(X86_FEATURE_NO_SMM_CTL_MSR);
806 
807 	kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
808 		F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
809 		F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
810 		F(PMM) | F(PMM_EN)
811 	);
812 
813 	/*
814 	 * Hide RDTSCP and RDPID if either feature is reported as supported but
815 	 * probing MSR_TSC_AUX failed.  This is purely a sanity check and
816 	 * should never happen, but the guest will likely crash if RDTSCP or
817 	 * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in
818 	 * the past.  For example, the sanity check may fire if this instance of
819 	 * KVM is running as L1 on top of an older, broken KVM.
820 	 */
821 	if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) ||
822 		     kvm_cpu_cap_has(X86_FEATURE_RDPID)) &&
823 		     !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) {
824 		kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
825 		kvm_cpu_cap_clear(X86_FEATURE_RDPID);
826 	}
827 }
828 EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
829 
830 struct kvm_cpuid_array {
831 	struct kvm_cpuid_entry2 *entries;
832 	int maxnent;
833 	int nent;
834 };
835 
836 static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array)
837 {
838 	if (array->nent >= array->maxnent)
839 		return NULL;
840 
841 	return &array->entries[array->nent++];
842 }
843 
844 static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
845 					      u32 function, u32 index)
846 {
847 	struct kvm_cpuid_entry2 *entry = get_next_cpuid(array);
848 
849 	if (!entry)
850 		return NULL;
851 
852 	memset(entry, 0, sizeof(*entry));
853 	entry->function = function;
854 	entry->index = index;
855 	switch (function & 0xC0000000) {
856 	case 0x40000000:
857 		/* Hypervisor leaves are always synthesized by __do_cpuid_func.  */
858 		return entry;
859 
860 	case 0x80000000:
861 		/*
862 		 * 0x80000021 is sometimes synthesized by __do_cpuid_func, which
863 		 * would result in out-of-bounds calls to do_host_cpuid.
864 		 */
865 		{
866 			static int max_cpuid_80000000;
867 			if (!READ_ONCE(max_cpuid_80000000))
868 				WRITE_ONCE(max_cpuid_80000000, cpuid_eax(0x80000000));
869 			if (function > READ_ONCE(max_cpuid_80000000))
870 				return entry;
871 		}
872 		break;
873 
874 	default:
875 		break;
876 	}
877 
878 	cpuid_count(entry->function, entry->index,
879 		    &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
880 
881 	if (cpuid_function_is_indexed(function))
882 		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
883 
884 	return entry;
885 }
886 
887 static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
888 {
889 	struct kvm_cpuid_entry2 *entry;
890 
891 	if (array->nent >= array->maxnent)
892 		return -E2BIG;
893 
894 	entry = &array->entries[array->nent];
895 	entry->function = func;
896 	entry->index = 0;
897 	entry->flags = 0;
898 
899 	switch (func) {
900 	case 0:
901 		entry->eax = 7;
902 		++array->nent;
903 		break;
904 	case 1:
905 		entry->ecx = F(MOVBE);
906 		++array->nent;
907 		break;
908 	case 7:
909 		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
910 		entry->eax = 0;
911 		if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
912 			entry->ecx = F(RDPID);
913 		++array->nent;
914 		break;
915 	default:
916 		break;
917 	}
918 
919 	return 0;
920 }
921 
922 static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
923 {
924 	struct kvm_cpuid_entry2 *entry;
925 	int r, i, max_idx;
926 
927 	/* all calls to cpuid_count() should be made on the same cpu */
928 	get_cpu();
929 
930 	r = -E2BIG;
931 
932 	entry = do_host_cpuid(array, function, 0);
933 	if (!entry)
934 		goto out;
935 
936 	switch (function) {
937 	case 0:
938 		/* Limited to the highest leaf implemented in KVM. */
939 		entry->eax = min(entry->eax, 0x1fU);
940 		break;
941 	case 1:
942 		cpuid_entry_override(entry, CPUID_1_EDX);
943 		cpuid_entry_override(entry, CPUID_1_ECX);
944 		break;
945 	case 2:
946 		/*
947 		 * On ancient CPUs, function 2 entries are STATEFUL.  That is,
948 		 * CPUID(function=2, index=0) may return different results each
949 		 * time, with the least-significant byte in EAX enumerating the
950 		 * number of times software should do CPUID(2, 0).
951 		 *
952 		 * Modern CPUs, i.e. every CPU KVM has *ever* run on are less
953 		 * idiotic.  Intel's SDM states that EAX & 0xff "will always
954 		 * return 01H. Software should ignore this value and not
955 		 * interpret it as an informational descriptor", while AMD's
956 		 * APM states that CPUID(2) is reserved.
957 		 *
958 		 * WARN if a frankenstein CPU that supports virtualization and
959 		 * a stateful CPUID.0x2 is encountered.
960 		 */
961 		WARN_ON_ONCE((entry->eax & 0xff) > 1);
962 		break;
963 	/* functions 4 and 0x8000001d have additional index. */
964 	case 4:
965 	case 0x8000001d:
966 		/*
967 		 * Read entries until the cache type in the previous entry is
968 		 * zero, i.e. indicates an invalid entry.
969 		 */
970 		for (i = 1; entry->eax & 0x1f; ++i) {
971 			entry = do_host_cpuid(array, function, i);
972 			if (!entry)
973 				goto out;
974 		}
975 		break;
976 	case 6: /* Thermal management */
977 		entry->eax = 0x4; /* allow ARAT */
978 		entry->ebx = 0;
979 		entry->ecx = 0;
980 		entry->edx = 0;
981 		break;
982 	/* function 7 has additional index. */
983 	case 7:
984 		max_idx = entry->eax = min(entry->eax, 2u);
985 		cpuid_entry_override(entry, CPUID_7_0_EBX);
986 		cpuid_entry_override(entry, CPUID_7_ECX);
987 		cpuid_entry_override(entry, CPUID_7_EDX);
988 
989 		/* KVM only supports up to 0x7.2, capped above via min(). */
990 		if (max_idx >= 1) {
991 			entry = do_host_cpuid(array, function, 1);
992 			if (!entry)
993 				goto out;
994 
995 			cpuid_entry_override(entry, CPUID_7_1_EAX);
996 			cpuid_entry_override(entry, CPUID_7_1_EDX);
997 			entry->ebx = 0;
998 			entry->ecx = 0;
999 		}
1000 		if (max_idx >= 2) {
1001 			entry = do_host_cpuid(array, function, 2);
1002 			if (!entry)
1003 				goto out;
1004 
1005 			cpuid_entry_override(entry, CPUID_7_2_EDX);
1006 			entry->ecx = 0;
1007 			entry->ebx = 0;
1008 			entry->eax = 0;
1009 		}
1010 		break;
1011 	case 0xa: { /* Architectural Performance Monitoring */
1012 		union cpuid10_eax eax;
1013 		union cpuid10_edx edx;
1014 
1015 		if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
1016 			entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1017 			break;
1018 		}
1019 
1020 		eax.split.version_id = kvm_pmu_cap.version;
1021 		eax.split.num_counters = kvm_pmu_cap.num_counters_gp;
1022 		eax.split.bit_width = kvm_pmu_cap.bit_width_gp;
1023 		eax.split.mask_length = kvm_pmu_cap.events_mask_len;
1024 		edx.split.num_counters_fixed = kvm_pmu_cap.num_counters_fixed;
1025 		edx.split.bit_width_fixed = kvm_pmu_cap.bit_width_fixed;
1026 
1027 		if (kvm_pmu_cap.version)
1028 			edx.split.anythread_deprecated = 1;
1029 		edx.split.reserved1 = 0;
1030 		edx.split.reserved2 = 0;
1031 
1032 		entry->eax = eax.full;
1033 		entry->ebx = kvm_pmu_cap.events_mask;
1034 		entry->ecx = 0;
1035 		entry->edx = edx.full;
1036 		break;
1037 	}
1038 	case 0x1f:
1039 	case 0xb:
1040 		/*
1041 		 * No topology; a valid topology is indicated by the presence
1042 		 * of subleaf 1.
1043 		 */
1044 		entry->eax = entry->ebx = entry->ecx = 0;
1045 		break;
1046 	case 0xd: {
1047 		u64 permitted_xcr0 = kvm_get_filtered_xcr0();
1048 		u64 permitted_xss = kvm_caps.supported_xss;
1049 
1050 		entry->eax &= permitted_xcr0;
1051 		entry->ebx = xstate_required_size(permitted_xcr0, false);
1052 		entry->ecx = entry->ebx;
1053 		entry->edx &= permitted_xcr0 >> 32;
1054 		if (!permitted_xcr0)
1055 			break;
1056 
1057 		entry = do_host_cpuid(array, function, 1);
1058 		if (!entry)
1059 			goto out;
1060 
1061 		cpuid_entry_override(entry, CPUID_D_1_EAX);
1062 		if (entry->eax & (F(XSAVES)|F(XSAVEC)))
1063 			entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss,
1064 							  true);
1065 		else {
1066 			WARN_ON_ONCE(permitted_xss != 0);
1067 			entry->ebx = 0;
1068 		}
1069 		entry->ecx &= permitted_xss;
1070 		entry->edx &= permitted_xss >> 32;
1071 
1072 		for (i = 2; i < 64; ++i) {
1073 			bool s_state;
1074 			if (permitted_xcr0 & BIT_ULL(i))
1075 				s_state = false;
1076 			else if (permitted_xss & BIT_ULL(i))
1077 				s_state = true;
1078 			else
1079 				continue;
1080 
1081 			entry = do_host_cpuid(array, function, i);
1082 			if (!entry)
1083 				goto out;
1084 
1085 			/*
1086 			 * The supported check above should have filtered out
1087 			 * invalid sub-leafs.  Only valid sub-leafs should
1088 			 * reach this point, and they should have a non-zero
1089 			 * save state size.  Furthermore, check whether the
1090 			 * processor agrees with permitted_xcr0/permitted_xss
1091 			 * on whether this is an XCR0- or IA32_XSS-managed area.
1092 			 */
1093 			if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
1094 				--array->nent;
1095 				continue;
1096 			}
1097 
1098 			if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
1099 				entry->ecx &= ~BIT_ULL(2);
1100 			entry->edx = 0;
1101 		}
1102 		break;
1103 	}
1104 	case 0x12:
1105 		/* Intel SGX */
1106 		if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) {
1107 			entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1108 			break;
1109 		}
1110 
1111 		/*
1112 		 * Index 0: Sub-features, MISCSELECT (a.k.a extended features)
1113 		 * and max enclave sizes.   The SGX sub-features and MISCSELECT
1114 		 * are restricted by kernel and KVM capabilities (like most
1115 		 * feature flags), while enclave size is unrestricted.
1116 		 */
1117 		cpuid_entry_override(entry, CPUID_12_EAX);
1118 		entry->ebx &= SGX_MISC_EXINFO;
1119 
1120 		entry = do_host_cpuid(array, function, 1);
1121 		if (!entry)
1122 			goto out;
1123 
1124 		/*
1125 		 * Index 1: SECS.ATTRIBUTES.  ATTRIBUTES are restricted a la
1126 		 * feature flags.  Advertise all supported flags, including
1127 		 * privileged attributes that require explicit opt-in from
1128 		 * userspace.  ATTRIBUTES.XFRM is not adjusted as userspace is
1129 		 * expected to derive it from supported XCR0.
1130 		 */
1131 		entry->eax &= SGX_ATTR_PRIV_MASK | SGX_ATTR_UNPRIV_MASK;
1132 		entry->ebx &= 0;
1133 		break;
1134 	/* Intel PT */
1135 	case 0x14:
1136 		if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) {
1137 			entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1138 			break;
1139 		}
1140 
1141 		for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1142 			if (!do_host_cpuid(array, function, i))
1143 				goto out;
1144 		}
1145 		break;
1146 	/* Intel AMX TILE */
1147 	case 0x1d:
1148 		if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1149 			entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1150 			break;
1151 		}
1152 
1153 		for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1154 			if (!do_host_cpuid(array, function, i))
1155 				goto out;
1156 		}
1157 		break;
1158 	case 0x1e: /* TMUL information */
1159 		if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1160 			entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1161 			break;
1162 		}
1163 		break;
1164 	case KVM_CPUID_SIGNATURE: {
1165 		const u32 *sigptr = (const u32 *)KVM_SIGNATURE;
1166 		entry->eax = KVM_CPUID_FEATURES;
1167 		entry->ebx = sigptr[0];
1168 		entry->ecx = sigptr[1];
1169 		entry->edx = sigptr[2];
1170 		break;
1171 	}
1172 	case KVM_CPUID_FEATURES:
1173 		entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
1174 			     (1 << KVM_FEATURE_NOP_IO_DELAY) |
1175 			     (1 << KVM_FEATURE_CLOCKSOURCE2) |
1176 			     (1 << KVM_FEATURE_ASYNC_PF) |
1177 			     (1 << KVM_FEATURE_PV_EOI) |
1178 			     (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
1179 			     (1 << KVM_FEATURE_PV_UNHALT) |
1180 			     (1 << KVM_FEATURE_PV_TLB_FLUSH) |
1181 			     (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
1182 			     (1 << KVM_FEATURE_PV_SEND_IPI) |
1183 			     (1 << KVM_FEATURE_POLL_CONTROL) |
1184 			     (1 << KVM_FEATURE_PV_SCHED_YIELD) |
1185 			     (1 << KVM_FEATURE_ASYNC_PF_INT);
1186 
1187 		if (sched_info_on())
1188 			entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
1189 
1190 		entry->ebx = 0;
1191 		entry->ecx = 0;
1192 		entry->edx = 0;
1193 		break;
1194 	case 0x80000000:
1195 		entry->eax = min(entry->eax, 0x80000022);
1196 		/*
1197 		 * Serializing LFENCE is reported in a multitude of ways, and
1198 		 * NullSegClearsBase is not reported in CPUID on Zen2; help
1199 		 * userspace by providing the CPUID leaf ourselves.
1200 		 *
1201 		 * However, only do it if the host has CPUID leaf 0x8000001d.
1202 		 * QEMU thinks that it can query the host blindly for that
1203 		 * CPUID leaf if KVM reports that it supports 0x8000001d or
1204 		 * above.  The processor merrily returns values from the
1205 		 * highest Intel leaf which QEMU tries to use as the guest's
1206 		 * 0x8000001d.  Even worse, this can result in an infinite
1207 		 * loop if said highest leaf has no subleaves indexed by ECX.
1208 		 */
1209 		if (entry->eax >= 0x8000001d &&
1210 		    (static_cpu_has(X86_FEATURE_LFENCE_RDTSC)
1211 		     || !static_cpu_has_bug(X86_BUG_NULL_SEG)))
1212 			entry->eax = max(entry->eax, 0x80000021);
1213 		break;
1214 	case 0x80000001:
1215 		entry->ebx &= ~GENMASK(27, 16);
1216 		cpuid_entry_override(entry, CPUID_8000_0001_EDX);
1217 		cpuid_entry_override(entry, CPUID_8000_0001_ECX);
1218 		break;
1219 	case 0x80000005:
1220 		/*  Pass host L1 cache and TLB info. */
1221 		break;
1222 	case 0x80000006:
1223 		/* Drop reserved bits, pass host L2 cache and TLB info. */
1224 		entry->edx &= ~GENMASK(17, 16);
1225 		break;
1226 	case 0x80000007: /* Advanced power management */
1227 		cpuid_entry_override(entry, CPUID_8000_0007_EDX);
1228 
1229 		/* mask against host */
1230 		entry->edx &= boot_cpu_data.x86_power;
1231 		entry->eax = entry->ebx = entry->ecx = 0;
1232 		break;
1233 	case 0x80000008: {
1234 		unsigned g_phys_as = (entry->eax >> 16) & 0xff;
1235 		unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
1236 		unsigned phys_as = entry->eax & 0xff;
1237 
1238 		/*
1239 		 * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
1240 		 * the guest operates in the same PA space as the host, i.e.
1241 		 * reductions in MAXPHYADDR for memory encryption affect shadow
1242 		 * paging, too.
1243 		 *
1244 		 * If TDP is enabled but an explicit guest MAXPHYADDR is not
1245 		 * provided, use the raw bare metal MAXPHYADDR as reductions to
1246 		 * the HPAs do not affect GPAs.
1247 		 */
1248 		if (!tdp_enabled)
1249 			g_phys_as = boot_cpu_data.x86_phys_bits;
1250 		else if (!g_phys_as)
1251 			g_phys_as = phys_as;
1252 
1253 		entry->eax = g_phys_as | (virt_as << 8);
1254 		entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
1255 		entry->edx = 0;
1256 		cpuid_entry_override(entry, CPUID_8000_0008_EBX);
1257 		break;
1258 	}
1259 	case 0x8000000A:
1260 		if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) {
1261 			entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1262 			break;
1263 		}
1264 		entry->eax = 1; /* SVM revision 1 */
1265 		entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
1266 				   ASID emulation to nested SVM */
1267 		entry->ecx = 0; /* Reserved */
1268 		cpuid_entry_override(entry, CPUID_8000_000A_EDX);
1269 		break;
1270 	case 0x80000019:
1271 		entry->ecx = entry->edx = 0;
1272 		break;
1273 	case 0x8000001a:
1274 		entry->eax &= GENMASK(2, 0);
1275 		entry->ebx = entry->ecx = entry->edx = 0;
1276 		break;
1277 	case 0x8000001e:
1278 		/* Do not return host topology information.  */
1279 		entry->eax = entry->ebx = entry->ecx = 0;
1280 		entry->edx = 0; /* reserved */
1281 		break;
1282 	case 0x8000001F:
1283 		if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
1284 			entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1285 		} else {
1286 			cpuid_entry_override(entry, CPUID_8000_001F_EAX);
1287 			/* Clear NumVMPL since KVM does not support VMPL.  */
1288 			entry->ebx &= ~GENMASK(31, 12);
1289 			/*
1290 			 * Enumerate '0' for "PA bits reduction", the adjusted
1291 			 * MAXPHYADDR is enumerated directly (see 0x80000008).
1292 			 */
1293 			entry->ebx &= ~GENMASK(11, 6);
1294 		}
1295 		break;
1296 	case 0x80000020:
1297 		entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1298 		break;
1299 	case 0x80000021:
1300 		entry->ebx = entry->ecx = entry->edx = 0;
1301 		cpuid_entry_override(entry, CPUID_8000_0021_EAX);
1302 		break;
1303 	/* AMD Extended Performance Monitoring and Debug */
1304 	case 0x80000022: {
1305 		union cpuid_0x80000022_ebx ebx;
1306 
1307 		entry->ecx = entry->edx = 0;
1308 		if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
1309 			entry->eax = entry->ebx;
1310 			break;
1311 		}
1312 
1313 		cpuid_entry_override(entry, CPUID_8000_0022_EAX);
1314 
1315 		if (kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2))
1316 			ebx.split.num_core_pmc = kvm_pmu_cap.num_counters_gp;
1317 		else if (kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE))
1318 			ebx.split.num_core_pmc = AMD64_NUM_COUNTERS_CORE;
1319 		else
1320 			ebx.split.num_core_pmc = AMD64_NUM_COUNTERS;
1321 
1322 		entry->ebx = ebx.full;
1323 		break;
1324 	}
1325 	/*Add support for Centaur's CPUID instruction*/
1326 	case 0xC0000000:
1327 		/*Just support up to 0xC0000004 now*/
1328 		entry->eax = min(entry->eax, 0xC0000004);
1329 		break;
1330 	case 0xC0000001:
1331 		cpuid_entry_override(entry, CPUID_C000_0001_EDX);
1332 		break;
1333 	case 3: /* Processor serial number */
1334 	case 5: /* MONITOR/MWAIT */
1335 	case 0xC0000002:
1336 	case 0xC0000003:
1337 	case 0xC0000004:
1338 	default:
1339 		entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1340 		break;
1341 	}
1342 
1343 	r = 0;
1344 
1345 out:
1346 	put_cpu();
1347 
1348 	return r;
1349 }
1350 
1351 static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1352 			 unsigned int type)
1353 {
1354 	if (type == KVM_GET_EMULATED_CPUID)
1355 		return __do_cpuid_func_emulated(array, func);
1356 
1357 	return __do_cpuid_func(array, func);
1358 }
1359 
1360 #define CENTAUR_CPUID_SIGNATURE 0xC0000000
1361 
1362 static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1363 			  unsigned int type)
1364 {
1365 	u32 limit;
1366 	int r;
1367 
1368 	if (func == CENTAUR_CPUID_SIGNATURE &&
1369 	    boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
1370 		return 0;
1371 
1372 	r = do_cpuid_func(array, func, type);
1373 	if (r)
1374 		return r;
1375 
1376 	limit = array->entries[array->nent - 1].eax;
1377 	for (func = func + 1; func <= limit; ++func) {
1378 		r = do_cpuid_func(array, func, type);
1379 		if (r)
1380 			break;
1381 	}
1382 
1383 	return r;
1384 }
1385 
1386 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
1387 				 __u32 num_entries, unsigned int ioctl_type)
1388 {
1389 	int i;
1390 	__u32 pad[3];
1391 
1392 	if (ioctl_type != KVM_GET_EMULATED_CPUID)
1393 		return false;
1394 
1395 	/*
1396 	 * We want to make sure that ->padding is being passed clean from
1397 	 * userspace in case we want to use it for something in the future.
1398 	 *
1399 	 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
1400 	 * have to give ourselves satisfied only with the emulated side. /me
1401 	 * sheds a tear.
1402 	 */
1403 	for (i = 0; i < num_entries; i++) {
1404 		if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
1405 			return true;
1406 
1407 		if (pad[0] || pad[1] || pad[2])
1408 			return true;
1409 	}
1410 	return false;
1411 }
1412 
1413 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
1414 			    struct kvm_cpuid_entry2 __user *entries,
1415 			    unsigned int type)
1416 {
1417 	static const u32 funcs[] = {
1418 		0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE,
1419 	};
1420 
1421 	struct kvm_cpuid_array array = {
1422 		.nent = 0,
1423 	};
1424 	int r, i;
1425 
1426 	if (cpuid->nent < 1)
1427 		return -E2BIG;
1428 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1429 		cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1430 
1431 	if (sanity_check_entries(entries, cpuid->nent, type))
1432 		return -EINVAL;
1433 
1434 	array.entries = kvcalloc(cpuid->nent, sizeof(struct kvm_cpuid_entry2), GFP_KERNEL);
1435 	if (!array.entries)
1436 		return -ENOMEM;
1437 
1438 	array.maxnent = cpuid->nent;
1439 
1440 	for (i = 0; i < ARRAY_SIZE(funcs); i++) {
1441 		r = get_cpuid_func(&array, funcs[i], type);
1442 		if (r)
1443 			goto out_free;
1444 	}
1445 	cpuid->nent = array.nent;
1446 
1447 	if (copy_to_user(entries, array.entries,
1448 			 array.nent * sizeof(struct kvm_cpuid_entry2)))
1449 		r = -EFAULT;
1450 
1451 out_free:
1452 	kvfree(array.entries);
1453 	return r;
1454 }
1455 
1456 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
1457 						    u32 function, u32 index)
1458 {
1459 	return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1460 				 function, index);
1461 }
1462 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index);
1463 
1464 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
1465 					      u32 function)
1466 {
1467 	return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1468 				 function, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
1469 }
1470 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
1471 
1472 /*
1473  * Intel CPUID semantics treats any query for an out-of-range leaf as if the
1474  * highest basic leaf (i.e. CPUID.0H:EAX) were requested.  AMD CPUID semantics
1475  * returns all zeroes for any undefined leaf, whether or not the leaf is in
1476  * range.  Centaur/VIA follows Intel semantics.
1477  *
1478  * A leaf is considered out-of-range if its function is higher than the maximum
1479  * supported leaf of its associated class or if its associated class does not
1480  * exist.
1481  *
1482  * There are three primary classes to be considered, with their respective
1483  * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive.  A primary
1484  * class exists if a guest CPUID entry for its <base> leaf exists.  For a given
1485  * class, CPUID.<base>.EAX contains the max supported leaf for the class.
1486  *
1487  *  - Basic:      0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
1488  *  - Hypervisor: 0x40000000 - 0x4fffffff
1489  *  - Extended:   0x80000000 - 0xbfffffff
1490  *  - Centaur:    0xc0000000 - 0xcfffffff
1491  *
1492  * The Hypervisor class is further subdivided into sub-classes that each act as
1493  * their own independent class associated with a 0x100 byte range.  E.g. if Qemu
1494  * is advertising support for both HyperV and KVM, the resulting Hypervisor
1495  * CPUID sub-classes are:
1496  *
1497  *  - HyperV:     0x40000000 - 0x400000ff
1498  *  - KVM:        0x40000100 - 0x400001ff
1499  */
1500 static struct kvm_cpuid_entry2 *
1501 get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
1502 {
1503 	struct kvm_cpuid_entry2 *basic, *class;
1504 	u32 function = *fn_ptr;
1505 
1506 	basic = kvm_find_cpuid_entry(vcpu, 0);
1507 	if (!basic)
1508 		return NULL;
1509 
1510 	if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) ||
1511 	    is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx))
1512 		return NULL;
1513 
1514 	if (function >= 0x40000000 && function <= 0x4fffffff)
1515 		class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00);
1516 	else if (function >= 0xc0000000)
1517 		class = kvm_find_cpuid_entry(vcpu, 0xc0000000);
1518 	else
1519 		class = kvm_find_cpuid_entry(vcpu, function & 0x80000000);
1520 
1521 	if (class && function <= class->eax)
1522 		return NULL;
1523 
1524 	/*
1525 	 * Leaf specific adjustments are also applied when redirecting to the
1526 	 * max basic entry, e.g. if the max basic leaf is 0xb but there is no
1527 	 * entry for CPUID.0xb.index (see below), then the output value for EDX
1528 	 * needs to be pulled from CPUID.0xb.1.
1529 	 */
1530 	*fn_ptr = basic->eax;
1531 
1532 	/*
1533 	 * The class does not exist or the requested function is out of range;
1534 	 * the effective CPUID entry is the max basic leaf.  Note, the index of
1535 	 * the original requested leaf is observed!
1536 	 */
1537 	return kvm_find_cpuid_entry_index(vcpu, basic->eax, index);
1538 }
1539 
1540 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
1541 	       u32 *ecx, u32 *edx, bool exact_only)
1542 {
1543 	u32 orig_function = *eax, function = *eax, index = *ecx;
1544 	struct kvm_cpuid_entry2 *entry;
1545 	bool exact, used_max_basic = false;
1546 
1547 	entry = kvm_find_cpuid_entry_index(vcpu, function, index);
1548 	exact = !!entry;
1549 
1550 	if (!entry && !exact_only) {
1551 		entry = get_out_of_range_cpuid_entry(vcpu, &function, index);
1552 		used_max_basic = !!entry;
1553 	}
1554 
1555 	if (entry) {
1556 		*eax = entry->eax;
1557 		*ebx = entry->ebx;
1558 		*ecx = entry->ecx;
1559 		*edx = entry->edx;
1560 		if (function == 7 && index == 0) {
1561 			u64 data;
1562 		        if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
1563 			    (data & TSX_CTRL_CPUID_CLEAR))
1564 				*ebx &= ~(F(RTM) | F(HLE));
1565 		} else if (function == 0x80000007) {
1566 			if (kvm_hv_invtsc_suppressed(vcpu))
1567 				*edx &= ~SF(CONSTANT_TSC);
1568 		}
1569 	} else {
1570 		*eax = *ebx = *ecx = *edx = 0;
1571 		/*
1572 		 * When leaf 0BH or 1FH is defined, CL is pass-through
1573 		 * and EDX is always the x2APIC ID, even for undefined
1574 		 * subleaves. Index 1 will exist iff the leaf is
1575 		 * implemented, so we pass through CL iff leaf 1
1576 		 * exists. EDX can be copied from any existing index.
1577 		 */
1578 		if (function == 0xb || function == 0x1f) {
1579 			entry = kvm_find_cpuid_entry_index(vcpu, function, 1);
1580 			if (entry) {
1581 				*ecx = index & 0xff;
1582 				*edx = entry->edx;
1583 			}
1584 		}
1585 	}
1586 	trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact,
1587 			used_max_basic);
1588 	return exact;
1589 }
1590 EXPORT_SYMBOL_GPL(kvm_cpuid);
1591 
1592 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1593 {
1594 	u32 eax, ebx, ecx, edx;
1595 
1596 	if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
1597 		return 1;
1598 
1599 	eax = kvm_rax_read(vcpu);
1600 	ecx = kvm_rcx_read(vcpu);
1601 	kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
1602 	kvm_rax_write(vcpu, eax);
1603 	kvm_rbx_write(vcpu, ebx);
1604 	kvm_rcx_write(vcpu, ecx);
1605 	kvm_rdx_write(vcpu, edx);
1606 	return kvm_skip_emulated_instruction(vcpu);
1607 }
1608 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1609