xref: /linux/arch/x86/kvm/cpuid.c (revision 55f3538c4923e9dfca132e99ebec370e8094afda)
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  * cpuid support routines
4  *
5  * derived from arch/x86/kvm/x86.c
6  *
7  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8  * Copyright IBM Corporation, 2008
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2.  See
11  * the COPYING file in the top-level directory.
12  *
13  */
14 
15 #include <linux/kvm_host.h>
16 #include <linux/export.h>
17 #include <linux/vmalloc.h>
18 #include <linux/uaccess.h>
19 #include <linux/sched/stat.h>
20 
21 #include <asm/processor.h>
22 #include <asm/user.h>
23 #include <asm/fpu/xstate.h>
24 #include "cpuid.h"
25 #include "lapic.h"
26 #include "mmu.h"
27 #include "trace.h"
28 #include "pmu.h"
29 
30 static u32 xstate_required_size(u64 xstate_bv, bool compacted)
31 {
32 	int feature_bit = 0;
33 	u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
34 
35 	xstate_bv &= XFEATURE_MASK_EXTEND;
36 	while (xstate_bv) {
37 		if (xstate_bv & 0x1) {
38 		        u32 eax, ebx, ecx, edx, offset;
39 		        cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
40 			offset = compacted ? ret : ebx;
41 			ret = max(ret, offset + eax);
42 		}
43 
44 		xstate_bv >>= 1;
45 		feature_bit++;
46 	}
47 
48 	return ret;
49 }
50 
51 bool kvm_mpx_supported(void)
52 {
53 	return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
54 		 && kvm_x86_ops->mpx_supported());
55 }
56 EXPORT_SYMBOL_GPL(kvm_mpx_supported);
57 
58 u64 kvm_supported_xcr0(void)
59 {
60 	u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
61 
62 	if (!kvm_mpx_supported())
63 		xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
64 
65 	return xcr0;
66 }
67 
68 #define F(x) bit(X86_FEATURE_##x)
69 
70 /* For scattered features from cpufeatures.h; we currently expose none */
71 #define KF(x) bit(KVM_CPUID_BIT_##x)
72 
73 int kvm_update_cpuid(struct kvm_vcpu *vcpu)
74 {
75 	struct kvm_cpuid_entry2 *best;
76 	struct kvm_lapic *apic = vcpu->arch.apic;
77 
78 	best = kvm_find_cpuid_entry(vcpu, 1, 0);
79 	if (!best)
80 		return 0;
81 
82 	/* Update OSXSAVE bit */
83 	if (boot_cpu_has(X86_FEATURE_XSAVE) && best->function == 0x1) {
84 		best->ecx &= ~F(OSXSAVE);
85 		if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
86 			best->ecx |= F(OSXSAVE);
87 	}
88 
89 	best->edx &= ~F(APIC);
90 	if (vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)
91 		best->edx |= F(APIC);
92 
93 	if (apic) {
94 		if (best->ecx & F(TSC_DEADLINE_TIMER))
95 			apic->lapic_timer.timer_mode_mask = 3 << 17;
96 		else
97 			apic->lapic_timer.timer_mode_mask = 1 << 17;
98 	}
99 
100 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
101 	if (best) {
102 		/* Update OSPKE bit */
103 		if (boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) {
104 			best->ecx &= ~F(OSPKE);
105 			if (kvm_read_cr4_bits(vcpu, X86_CR4_PKE))
106 				best->ecx |= F(OSPKE);
107 		}
108 	}
109 
110 	best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
111 	if (!best) {
112 		vcpu->arch.guest_supported_xcr0 = 0;
113 		vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
114 	} else {
115 		vcpu->arch.guest_supported_xcr0 =
116 			(best->eax | ((u64)best->edx << 32)) &
117 			kvm_supported_xcr0();
118 		vcpu->arch.guest_xstate_size = best->ebx =
119 			xstate_required_size(vcpu->arch.xcr0, false);
120 	}
121 
122 	best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
123 	if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
124 		best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
125 
126 	/*
127 	 * The existing code assumes virtual address is 48-bit or 57-bit in the
128 	 * canonical address checks; exit if it is ever changed.
129 	 */
130 	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
131 	if (best) {
132 		int vaddr_bits = (best->eax & 0xff00) >> 8;
133 
134 		if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
135 			return -EINVAL;
136 	}
137 
138 	/* Update physical-address width */
139 	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
140 	kvm_mmu_reset_context(vcpu);
141 
142 	kvm_pmu_refresh(vcpu);
143 	return 0;
144 }
145 
146 static int is_efer_nx(void)
147 {
148 	unsigned long long efer = 0;
149 
150 	rdmsrl_safe(MSR_EFER, &efer);
151 	return efer & EFER_NX;
152 }
153 
154 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
155 {
156 	int i;
157 	struct kvm_cpuid_entry2 *e, *entry;
158 
159 	entry = NULL;
160 	for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
161 		e = &vcpu->arch.cpuid_entries[i];
162 		if (e->function == 0x80000001) {
163 			entry = e;
164 			break;
165 		}
166 	}
167 	if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
168 		entry->edx &= ~F(NX);
169 		printk(KERN_INFO "kvm: guest NX capability removed\n");
170 	}
171 }
172 
173 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
174 {
175 	struct kvm_cpuid_entry2 *best;
176 
177 	best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
178 	if (!best || best->eax < 0x80000008)
179 		goto not_found;
180 	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
181 	if (best)
182 		return best->eax & 0xff;
183 not_found:
184 	return 36;
185 }
186 EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
187 
188 /* when an old userspace process fills a new kernel module */
189 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
190 			     struct kvm_cpuid *cpuid,
191 			     struct kvm_cpuid_entry __user *entries)
192 {
193 	int r, i;
194 	struct kvm_cpuid_entry *cpuid_entries = NULL;
195 
196 	r = -E2BIG;
197 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
198 		goto out;
199 	r = -ENOMEM;
200 	if (cpuid->nent) {
201 		cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
202 					cpuid->nent);
203 		if (!cpuid_entries)
204 			goto out;
205 		r = -EFAULT;
206 		if (copy_from_user(cpuid_entries, entries,
207 				   cpuid->nent * sizeof(struct kvm_cpuid_entry)))
208 			goto out;
209 	}
210 	for (i = 0; i < cpuid->nent; i++) {
211 		vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
212 		vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
213 		vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
214 		vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
215 		vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
216 		vcpu->arch.cpuid_entries[i].index = 0;
217 		vcpu->arch.cpuid_entries[i].flags = 0;
218 		vcpu->arch.cpuid_entries[i].padding[0] = 0;
219 		vcpu->arch.cpuid_entries[i].padding[1] = 0;
220 		vcpu->arch.cpuid_entries[i].padding[2] = 0;
221 	}
222 	vcpu->arch.cpuid_nent = cpuid->nent;
223 	cpuid_fix_nx_cap(vcpu);
224 	kvm_apic_set_version(vcpu);
225 	kvm_x86_ops->cpuid_update(vcpu);
226 	r = kvm_update_cpuid(vcpu);
227 
228 out:
229 	vfree(cpuid_entries);
230 	return r;
231 }
232 
233 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
234 			      struct kvm_cpuid2 *cpuid,
235 			      struct kvm_cpuid_entry2 __user *entries)
236 {
237 	int r;
238 
239 	r = -E2BIG;
240 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
241 		goto out;
242 	r = -EFAULT;
243 	if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
244 			   cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
245 		goto out;
246 	vcpu->arch.cpuid_nent = cpuid->nent;
247 	kvm_apic_set_version(vcpu);
248 	kvm_x86_ops->cpuid_update(vcpu);
249 	r = kvm_update_cpuid(vcpu);
250 out:
251 	return r;
252 }
253 
254 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
255 			      struct kvm_cpuid2 *cpuid,
256 			      struct kvm_cpuid_entry2 __user *entries)
257 {
258 	int r;
259 
260 	r = -E2BIG;
261 	if (cpuid->nent < vcpu->arch.cpuid_nent)
262 		goto out;
263 	r = -EFAULT;
264 	if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
265 			 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
266 		goto out;
267 	return 0;
268 
269 out:
270 	cpuid->nent = vcpu->arch.cpuid_nent;
271 	return r;
272 }
273 
274 static void cpuid_mask(u32 *word, int wordnum)
275 {
276 	*word &= boot_cpu_data.x86_capability[wordnum];
277 }
278 
279 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
280 			   u32 index)
281 {
282 	entry->function = function;
283 	entry->index = index;
284 	cpuid_count(entry->function, entry->index,
285 		    &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
286 	entry->flags = 0;
287 }
288 
289 static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
290 				   u32 func, u32 index, int *nent, int maxnent)
291 {
292 	switch (func) {
293 	case 0:
294 		entry->eax = 1;		/* only one leaf currently */
295 		++*nent;
296 		break;
297 	case 1:
298 		entry->ecx = F(MOVBE);
299 		++*nent;
300 		break;
301 	default:
302 		break;
303 	}
304 
305 	entry->function = func;
306 	entry->index = index;
307 
308 	return 0;
309 }
310 
311 static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
312 				 u32 index, int *nent, int maxnent)
313 {
314 	int r;
315 	unsigned f_nx = is_efer_nx() ? F(NX) : 0;
316 #ifdef CONFIG_X86_64
317 	unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
318 				? F(GBPAGES) : 0;
319 	unsigned f_lm = F(LM);
320 #else
321 	unsigned f_gbpages = 0;
322 	unsigned f_lm = 0;
323 #endif
324 	unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
325 	unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
326 	unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
327 	unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
328 
329 	/* cpuid 1.edx */
330 	const u32 kvm_cpuid_1_edx_x86_features =
331 		F(FPU) | F(VME) | F(DE) | F(PSE) |
332 		F(TSC) | F(MSR) | F(PAE) | F(MCE) |
333 		F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
334 		F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
335 		F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
336 		0 /* Reserved, DS, ACPI */ | F(MMX) |
337 		F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
338 		0 /* HTT, TM, Reserved, PBE */;
339 	/* cpuid 0x80000001.edx */
340 	const u32 kvm_cpuid_8000_0001_edx_x86_features =
341 		F(FPU) | F(VME) | F(DE) | F(PSE) |
342 		F(TSC) | F(MSR) | F(PAE) | F(MCE) |
343 		F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
344 		F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
345 		F(PAT) | F(PSE36) | 0 /* Reserved */ |
346 		f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
347 		F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
348 		0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
349 	/* cpuid 1.ecx */
350 	const u32 kvm_cpuid_1_ecx_x86_features =
351 		/* NOTE: MONITOR (and MWAIT) are emulated as NOP,
352 		 * but *not* advertised to guests via CPUID ! */
353 		F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
354 		0 /* DS-CPL, VMX, SMX, EST */ |
355 		0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
356 		F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
357 		F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
358 		F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
359 		0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
360 		F(F16C) | F(RDRAND);
361 	/* cpuid 0x80000001.ecx */
362 	const u32 kvm_cpuid_8000_0001_ecx_x86_features =
363 		F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
364 		F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
365 		F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
366 		0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
367 
368 	/* cpuid 0x80000008.ebx */
369 	const u32 kvm_cpuid_8000_0008_ebx_x86_features =
370 		F(IBPB) | F(IBRS);
371 
372 	/* cpuid 0xC0000001.edx */
373 	const u32 kvm_cpuid_C000_0001_edx_x86_features =
374 		F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
375 		F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
376 		F(PMM) | F(PMM_EN);
377 
378 	/* cpuid 7.0.ebx */
379 	const u32 kvm_cpuid_7_0_ebx_x86_features =
380 		F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
381 		F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
382 		F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
383 		F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
384 		F(SHA_NI) | F(AVX512BW) | F(AVX512VL);
385 
386 	/* cpuid 0xD.1.eax */
387 	const u32 kvm_cpuid_D_1_eax_x86_features =
388 		F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
389 
390 	/* cpuid 7.0.ecx*/
391 	const u32 kvm_cpuid_7_0_ecx_x86_features =
392 		F(AVX512VBMI) | F(LA57) | F(PKU) |
393 		0 /*OSPKE*/ | F(AVX512_VPOPCNTDQ);
394 
395 	/* cpuid 7.0.edx*/
396 	const u32 kvm_cpuid_7_0_edx_x86_features =
397 		F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
398 		F(ARCH_CAPABILITIES);
399 
400 	/* all calls to cpuid_count() should be made on the same cpu */
401 	get_cpu();
402 
403 	r = -E2BIG;
404 
405 	if (*nent >= maxnent)
406 		goto out;
407 
408 	do_cpuid_1_ent(entry, function, index);
409 	++*nent;
410 
411 	switch (function) {
412 	case 0:
413 		entry->eax = min(entry->eax, (u32)0xd);
414 		break;
415 	case 1:
416 		entry->edx &= kvm_cpuid_1_edx_x86_features;
417 		cpuid_mask(&entry->edx, CPUID_1_EDX);
418 		entry->ecx &= kvm_cpuid_1_ecx_x86_features;
419 		cpuid_mask(&entry->ecx, CPUID_1_ECX);
420 		/* we support x2apic emulation even if host does not support
421 		 * it since we emulate x2apic in software */
422 		entry->ecx |= F(X2APIC);
423 		break;
424 	/* function 2 entries are STATEFUL. That is, repeated cpuid commands
425 	 * may return different values. This forces us to get_cpu() before
426 	 * issuing the first command, and also to emulate this annoying behavior
427 	 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
428 	case 2: {
429 		int t, times = entry->eax & 0xff;
430 
431 		entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
432 		entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
433 		for (t = 1; t < times; ++t) {
434 			if (*nent >= maxnent)
435 				goto out;
436 
437 			do_cpuid_1_ent(&entry[t], function, 0);
438 			entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
439 			++*nent;
440 		}
441 		break;
442 	}
443 	/* function 4 has additional index. */
444 	case 4: {
445 		int i, cache_type;
446 
447 		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
448 		/* read more entries until cache_type is zero */
449 		for (i = 1; ; ++i) {
450 			if (*nent >= maxnent)
451 				goto out;
452 
453 			cache_type = entry[i - 1].eax & 0x1f;
454 			if (!cache_type)
455 				break;
456 			do_cpuid_1_ent(&entry[i], function, i);
457 			entry[i].flags |=
458 			       KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
459 			++*nent;
460 		}
461 		break;
462 	}
463 	case 6: /* Thermal management */
464 		entry->eax = 0x4; /* allow ARAT */
465 		entry->ebx = 0;
466 		entry->ecx = 0;
467 		entry->edx = 0;
468 		break;
469 	case 7: {
470 		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
471 		/* Mask ebx against host capability word 9 */
472 		if (index == 0) {
473 			entry->ebx &= kvm_cpuid_7_0_ebx_x86_features;
474 			cpuid_mask(&entry->ebx, CPUID_7_0_EBX);
475 			// TSC_ADJUST is emulated
476 			entry->ebx |= F(TSC_ADJUST);
477 			entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
478 			cpuid_mask(&entry->ecx, CPUID_7_ECX);
479 			/* PKU is not yet implemented for shadow paging. */
480 			if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
481 				entry->ecx &= ~F(PKU);
482 			entry->edx &= kvm_cpuid_7_0_edx_x86_features;
483 			cpuid_mask(&entry->edx, CPUID_7_EDX);
484 		} else {
485 			entry->ebx = 0;
486 			entry->ecx = 0;
487 			entry->edx = 0;
488 		}
489 		entry->eax = 0;
490 		break;
491 	}
492 	case 9:
493 		break;
494 	case 0xa: { /* Architectural Performance Monitoring */
495 		struct x86_pmu_capability cap;
496 		union cpuid10_eax eax;
497 		union cpuid10_edx edx;
498 
499 		perf_get_x86_pmu_capability(&cap);
500 
501 		/*
502 		 * Only support guest architectural pmu on a host
503 		 * with architectural pmu.
504 		 */
505 		if (!cap.version)
506 			memset(&cap, 0, sizeof(cap));
507 
508 		eax.split.version_id = min(cap.version, 2);
509 		eax.split.num_counters = cap.num_counters_gp;
510 		eax.split.bit_width = cap.bit_width_gp;
511 		eax.split.mask_length = cap.events_mask_len;
512 
513 		edx.split.num_counters_fixed = cap.num_counters_fixed;
514 		edx.split.bit_width_fixed = cap.bit_width_fixed;
515 		edx.split.reserved = 0;
516 
517 		entry->eax = eax.full;
518 		entry->ebx = cap.events_mask;
519 		entry->ecx = 0;
520 		entry->edx = edx.full;
521 		break;
522 	}
523 	/* function 0xb has additional index. */
524 	case 0xb: {
525 		int i, level_type;
526 
527 		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
528 		/* read more entries until level_type is zero */
529 		for (i = 1; ; ++i) {
530 			if (*nent >= maxnent)
531 				goto out;
532 
533 			level_type = entry[i - 1].ecx & 0xff00;
534 			if (!level_type)
535 				break;
536 			do_cpuid_1_ent(&entry[i], function, i);
537 			entry[i].flags |=
538 			       KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
539 			++*nent;
540 		}
541 		break;
542 	}
543 	case 0xd: {
544 		int idx, i;
545 		u64 supported = kvm_supported_xcr0();
546 
547 		entry->eax &= supported;
548 		entry->ebx = xstate_required_size(supported, false);
549 		entry->ecx = entry->ebx;
550 		entry->edx &= supported >> 32;
551 		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
552 		if (!supported)
553 			break;
554 
555 		for (idx = 1, i = 1; idx < 64; ++idx) {
556 			u64 mask = ((u64)1 << idx);
557 			if (*nent >= maxnent)
558 				goto out;
559 
560 			do_cpuid_1_ent(&entry[i], function, idx);
561 			if (idx == 1) {
562 				entry[i].eax &= kvm_cpuid_D_1_eax_x86_features;
563 				cpuid_mask(&entry[i].eax, CPUID_D_1_EAX);
564 				entry[i].ebx = 0;
565 				if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
566 					entry[i].ebx =
567 						xstate_required_size(supported,
568 								     true);
569 			} else {
570 				if (entry[i].eax == 0 || !(supported & mask))
571 					continue;
572 				if (WARN_ON_ONCE(entry[i].ecx & 1))
573 					continue;
574 			}
575 			entry[i].ecx = 0;
576 			entry[i].edx = 0;
577 			entry[i].flags |=
578 			       KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
579 			++*nent;
580 			++i;
581 		}
582 		break;
583 	}
584 	case KVM_CPUID_SIGNATURE: {
585 		static const char signature[12] = "KVMKVMKVM\0\0";
586 		const u32 *sigptr = (const u32 *)signature;
587 		entry->eax = KVM_CPUID_FEATURES;
588 		entry->ebx = sigptr[0];
589 		entry->ecx = sigptr[1];
590 		entry->edx = sigptr[2];
591 		break;
592 	}
593 	case KVM_CPUID_FEATURES:
594 		entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
595 			     (1 << KVM_FEATURE_NOP_IO_DELAY) |
596 			     (1 << KVM_FEATURE_CLOCKSOURCE2) |
597 			     (1 << KVM_FEATURE_ASYNC_PF) |
598 			     (1 << KVM_FEATURE_PV_EOI) |
599 			     (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
600 			     (1 << KVM_FEATURE_PV_UNHALT);
601 
602 		if (sched_info_on())
603 			entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
604 
605 		entry->ebx = 0;
606 		entry->ecx = 0;
607 		entry->edx = 0;
608 		break;
609 	case 0x80000000:
610 		entry->eax = min(entry->eax, 0x8000001a);
611 		break;
612 	case 0x80000001:
613 		entry->edx &= kvm_cpuid_8000_0001_edx_x86_features;
614 		cpuid_mask(&entry->edx, CPUID_8000_0001_EDX);
615 		entry->ecx &= kvm_cpuid_8000_0001_ecx_x86_features;
616 		cpuid_mask(&entry->ecx, CPUID_8000_0001_ECX);
617 		break;
618 	case 0x80000007: /* Advanced power management */
619 		/* invariant TSC is CPUID.80000007H:EDX[8] */
620 		entry->edx &= (1 << 8);
621 		/* mask against host */
622 		entry->edx &= boot_cpu_data.x86_power;
623 		entry->eax = entry->ebx = entry->ecx = 0;
624 		break;
625 	case 0x80000008: {
626 		unsigned g_phys_as = (entry->eax >> 16) & 0xff;
627 		unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
628 		unsigned phys_as = entry->eax & 0xff;
629 
630 		if (!g_phys_as)
631 			g_phys_as = phys_as;
632 		entry->eax = g_phys_as | (virt_as << 8);
633 		entry->edx = 0;
634 		/* IBRS and IBPB aren't necessarily present in hardware cpuid */
635 		if (boot_cpu_has(X86_FEATURE_IBPB))
636 			entry->ebx |= F(IBPB);
637 		if (boot_cpu_has(X86_FEATURE_IBRS))
638 			entry->ebx |= F(IBRS);
639 		entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
640 		cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
641 		break;
642 	}
643 	case 0x80000019:
644 		entry->ecx = entry->edx = 0;
645 		break;
646 	case 0x8000001a:
647 		break;
648 	case 0x8000001d:
649 		break;
650 	/*Add support for Centaur's CPUID instruction*/
651 	case 0xC0000000:
652 		/*Just support up to 0xC0000004 now*/
653 		entry->eax = min(entry->eax, 0xC0000004);
654 		break;
655 	case 0xC0000001:
656 		entry->edx &= kvm_cpuid_C000_0001_edx_x86_features;
657 		cpuid_mask(&entry->edx, CPUID_C000_0001_EDX);
658 		break;
659 	case 3: /* Processor serial number */
660 	case 5: /* MONITOR/MWAIT */
661 	case 0xC0000002:
662 	case 0xC0000003:
663 	case 0xC0000004:
664 	default:
665 		entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
666 		break;
667 	}
668 
669 	kvm_x86_ops->set_supported_cpuid(function, entry);
670 
671 	r = 0;
672 
673 out:
674 	put_cpu();
675 
676 	return r;
677 }
678 
679 static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func,
680 			u32 idx, int *nent, int maxnent, unsigned int type)
681 {
682 	if (type == KVM_GET_EMULATED_CPUID)
683 		return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent);
684 
685 	return __do_cpuid_ent(entry, func, idx, nent, maxnent);
686 }
687 
688 #undef F
689 
690 struct kvm_cpuid_param {
691 	u32 func;
692 	u32 idx;
693 	bool has_leaf_count;
694 	bool (*qualifier)(const struct kvm_cpuid_param *param);
695 };
696 
697 static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
698 {
699 	return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
700 }
701 
702 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
703 				 __u32 num_entries, unsigned int ioctl_type)
704 {
705 	int i;
706 	__u32 pad[3];
707 
708 	if (ioctl_type != KVM_GET_EMULATED_CPUID)
709 		return false;
710 
711 	/*
712 	 * We want to make sure that ->padding is being passed clean from
713 	 * userspace in case we want to use it for something in the future.
714 	 *
715 	 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
716 	 * have to give ourselves satisfied only with the emulated side. /me
717 	 * sheds a tear.
718 	 */
719 	for (i = 0; i < num_entries; i++) {
720 		if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
721 			return true;
722 
723 		if (pad[0] || pad[1] || pad[2])
724 			return true;
725 	}
726 	return false;
727 }
728 
729 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
730 			    struct kvm_cpuid_entry2 __user *entries,
731 			    unsigned int type)
732 {
733 	struct kvm_cpuid_entry2 *cpuid_entries;
734 	int limit, nent = 0, r = -E2BIG, i;
735 	u32 func;
736 	static const struct kvm_cpuid_param param[] = {
737 		{ .func = 0, .has_leaf_count = true },
738 		{ .func = 0x80000000, .has_leaf_count = true },
739 		{ .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true },
740 		{ .func = KVM_CPUID_SIGNATURE },
741 		{ .func = KVM_CPUID_FEATURES },
742 	};
743 
744 	if (cpuid->nent < 1)
745 		goto out;
746 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
747 		cpuid->nent = KVM_MAX_CPUID_ENTRIES;
748 
749 	if (sanity_check_entries(entries, cpuid->nent, type))
750 		return -EINVAL;
751 
752 	r = -ENOMEM;
753 	cpuid_entries = vzalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
754 	if (!cpuid_entries)
755 		goto out;
756 
757 	r = 0;
758 	for (i = 0; i < ARRAY_SIZE(param); i++) {
759 		const struct kvm_cpuid_param *ent = &param[i];
760 
761 		if (ent->qualifier && !ent->qualifier(ent))
762 			continue;
763 
764 		r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
765 				&nent, cpuid->nent, type);
766 
767 		if (r)
768 			goto out_free;
769 
770 		if (!ent->has_leaf_count)
771 			continue;
772 
773 		limit = cpuid_entries[nent - 1].eax;
774 		for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
775 			r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
776 				     &nent, cpuid->nent, type);
777 
778 		if (r)
779 			goto out_free;
780 	}
781 
782 	r = -EFAULT;
783 	if (copy_to_user(entries, cpuid_entries,
784 			 nent * sizeof(struct kvm_cpuid_entry2)))
785 		goto out_free;
786 	cpuid->nent = nent;
787 	r = 0;
788 
789 out_free:
790 	vfree(cpuid_entries);
791 out:
792 	return r;
793 }
794 
795 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
796 {
797 	struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
798 	struct kvm_cpuid_entry2 *ej;
799 	int j = i;
800 	int nent = vcpu->arch.cpuid_nent;
801 
802 	e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
803 	/* when no next entry is found, the current entry[i] is reselected */
804 	do {
805 		j = (j + 1) % nent;
806 		ej = &vcpu->arch.cpuid_entries[j];
807 	} while (ej->function != e->function);
808 
809 	ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
810 
811 	return j;
812 }
813 
814 /* find an entry with matching function, matching index (if needed), and that
815  * should be read next (if it's stateful) */
816 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
817 	u32 function, u32 index)
818 {
819 	if (e->function != function)
820 		return 0;
821 	if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
822 		return 0;
823 	if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
824 	    !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
825 		return 0;
826 	return 1;
827 }
828 
829 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
830 					      u32 function, u32 index)
831 {
832 	int i;
833 	struct kvm_cpuid_entry2 *best = NULL;
834 
835 	for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
836 		struct kvm_cpuid_entry2 *e;
837 
838 		e = &vcpu->arch.cpuid_entries[i];
839 		if (is_matching_cpuid_entry(e, function, index)) {
840 			if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
841 				move_to_next_stateful_cpuid_entry(vcpu, i);
842 			best = e;
843 			break;
844 		}
845 	}
846 	return best;
847 }
848 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
849 
850 /*
851  * If no match is found, check whether we exceed the vCPU's limit
852  * and return the content of the highest valid _standard_ leaf instead.
853  * This is to satisfy the CPUID specification.
854  */
855 static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
856                                                   u32 function, u32 index)
857 {
858 	struct kvm_cpuid_entry2 *maxlevel;
859 
860 	maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
861 	if (!maxlevel || maxlevel->eax >= function)
862 		return NULL;
863 	if (function & 0x80000000) {
864 		maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
865 		if (!maxlevel)
866 			return NULL;
867 	}
868 	return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
869 }
870 
871 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
872 	       u32 *ecx, u32 *edx, bool check_limit)
873 {
874 	u32 function = *eax, index = *ecx;
875 	struct kvm_cpuid_entry2 *best;
876 	bool entry_found = true;
877 
878 	best = kvm_find_cpuid_entry(vcpu, function, index);
879 
880 	if (!best) {
881 		entry_found = false;
882 		if (!check_limit)
883 			goto out;
884 
885 		best = check_cpuid_limit(vcpu, function, index);
886 	}
887 
888 out:
889 	if (best) {
890 		*eax = best->eax;
891 		*ebx = best->ebx;
892 		*ecx = best->ecx;
893 		*edx = best->edx;
894 	} else
895 		*eax = *ebx = *ecx = *edx = 0;
896 	trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, entry_found);
897 	return entry_found;
898 }
899 EXPORT_SYMBOL_GPL(kvm_cpuid);
900 
901 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
902 {
903 	u32 eax, ebx, ecx, edx;
904 
905 	if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
906 		return 1;
907 
908 	eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
909 	ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
910 	kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true);
911 	kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
912 	kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
913 	kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
914 	kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
915 	return kvm_skip_emulated_instruction(vcpu);
916 }
917 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
918