xref: /linux/tools/testing/selftests/kvm/x86/msrs_test.c (revision a8b9cca99cf454799ef799f8af9bdb55389cfd94)
19c38ddb3SSean Christopherson // SPDX-License-Identifier: GPL-2.0-only
29c38ddb3SSean Christopherson #include <asm/msr-index.h>
39c38ddb3SSean Christopherson 
49c38ddb3SSean Christopherson #include <stdint.h>
59c38ddb3SSean Christopherson 
69c38ddb3SSean Christopherson #include "kvm_util.h"
79c38ddb3SSean Christopherson #include "processor.h"
89c38ddb3SSean Christopherson 
99c38ddb3SSean Christopherson /* Use HYPERVISOR for MSRs that are emulated unconditionally (as is HYPERVISOR). */
109c38ddb3SSean Christopherson #define X86_FEATURE_NONE X86_FEATURE_HYPERVISOR
119c38ddb3SSean Christopherson 
129c38ddb3SSean Christopherson struct kvm_msr {
139c38ddb3SSean Christopherson 	const struct kvm_x86_cpu_feature feature;
149c38ddb3SSean Christopherson 	const struct kvm_x86_cpu_feature feature2;
159c38ddb3SSean Christopherson 	const char *name;
169c38ddb3SSean Christopherson 	const u64 reset_val;
179c38ddb3SSean Christopherson 	const u64 write_val;
189c38ddb3SSean Christopherson 	const u64 rsvd_val;
199c38ddb3SSean Christopherson 	const u32 index;
209c38ddb3SSean Christopherson };
219c38ddb3SSean Christopherson 
229c38ddb3SSean Christopherson #define ____MSR_TEST(msr, str, val, rsvd, reset, feat, f2)		\
239c38ddb3SSean Christopherson {									\
249c38ddb3SSean Christopherson 	.index = msr,							\
259c38ddb3SSean Christopherson 	.name = str,							\
269c38ddb3SSean Christopherson 	.write_val = val,						\
279c38ddb3SSean Christopherson 	.rsvd_val = rsvd,						\
289c38ddb3SSean Christopherson 	.reset_val = reset,						\
299c38ddb3SSean Christopherson 	.feature = X86_FEATURE_ ##feat,					\
309c38ddb3SSean Christopherson 	.feature2 = X86_FEATURE_ ##f2,					\
319c38ddb3SSean Christopherson }
329c38ddb3SSean Christopherson 
339c38ddb3SSean Christopherson #define __MSR_TEST(msr, str, val, rsvd, reset, feat)			\
349c38ddb3SSean Christopherson 	____MSR_TEST(msr, str, val, rsvd, reset, feat, feat)
359c38ddb3SSean Christopherson 
369c38ddb3SSean Christopherson #define MSR_TEST_NON_ZERO(msr, val, rsvd, reset, feat)			\
379c38ddb3SSean Christopherson 	__MSR_TEST(msr, #msr, val, rsvd, reset, feat)
389c38ddb3SSean Christopherson 
399c38ddb3SSean Christopherson #define MSR_TEST(msr, val, rsvd, feat)					\
409c38ddb3SSean Christopherson 	__MSR_TEST(msr, #msr, val, rsvd, 0, feat)
419c38ddb3SSean Christopherson 
429c38ddb3SSean Christopherson #define MSR_TEST2(msr, val, rsvd, feat, f2)				\
439c38ddb3SSean Christopherson 	____MSR_TEST(msr, #msr, val, rsvd, 0, feat, f2)
449c38ddb3SSean Christopherson 
459c38ddb3SSean Christopherson /*
469c38ddb3SSean Christopherson  * Note, use a page aligned value for the canonical value so that the value
479c38ddb3SSean Christopherson  * is compatible with MSRs that use bits 11:0 for things other than addresses.
489c38ddb3SSean Christopherson  */
499c38ddb3SSean Christopherson static const u64 canonical_val = 0x123456789000ull;
509c38ddb3SSean Christopherson 
519c38ddb3SSean Christopherson /*
529c38ddb3SSean Christopherson  * Arbitrary value with bits set in every byte, but not all bits set.  This is
539c38ddb3SSean Christopherson  * also a non-canonical value, but that's coincidental (any 64-bit value with
549c38ddb3SSean Christopherson  * an alternating 0s/1s pattern will be non-canonical).
559c38ddb3SSean Christopherson  */
569c38ddb3SSean Christopherson static const u64 u64_val = 0xaaaa5555aaaa5555ull;
579c38ddb3SSean Christopherson 
589c38ddb3SSean Christopherson #define MSR_TEST_CANONICAL(msr, feat)					\
599c38ddb3SSean Christopherson 	__MSR_TEST(msr, #msr, canonical_val, NONCANONICAL, 0, feat)
609c38ddb3SSean Christopherson 
619c38ddb3SSean Christopherson /*
629c38ddb3SSean Christopherson  * The main struct must be scoped to a function due to the use of structures to
639c38ddb3SSean Christopherson  * define features.  For the global structure, allocate enough space for the
649c38ddb3SSean Christopherson  * foreseeable future without getting too ridiculous, to minimize maintenance
659c38ddb3SSean Christopherson  * costs (bumping the array size every time an MSR is added is really annoying).
669c38ddb3SSean Christopherson  */
679c38ddb3SSean Christopherson static struct kvm_msr msrs[128];
689c38ddb3SSean Christopherson static int idx;
699c38ddb3SSean Christopherson 
709c38ddb3SSean Christopherson static bool ignore_unsupported_msrs;
719c38ddb3SSean Christopherson 
729c38ddb3SSean Christopherson static u64 fixup_rdmsr_val(u32 msr, u64 want)
739c38ddb3SSean Christopherson {
749c38ddb3SSean Christopherson 	/*
759c38ddb3SSean Christopherson 	 * AMD CPUs drop bits 63:32 on some MSRs that Intel CPUs support.  KVM
769c38ddb3SSean Christopherson 	 * is supposed to emulate that behavior based on guest vendor model
779c38ddb3SSean Christopherson 	 * (which is the same as the host vendor model for this test).
789c38ddb3SSean Christopherson 	 */
799c38ddb3SSean Christopherson 	if (!host_cpu_is_amd)
809c38ddb3SSean Christopherson 		return want;
819c38ddb3SSean Christopherson 
829c38ddb3SSean Christopherson 	switch (msr) {
839c38ddb3SSean Christopherson 	case MSR_IA32_SYSENTER_ESP:
849c38ddb3SSean Christopherson 	case MSR_IA32_SYSENTER_EIP:
859c38ddb3SSean Christopherson 	case MSR_TSC_AUX:
869c38ddb3SSean Christopherson 		return want & GENMASK_ULL(31, 0);
879c38ddb3SSean Christopherson 	default:
889c38ddb3SSean Christopherson 		return want;
899c38ddb3SSean Christopherson 	}
909c38ddb3SSean Christopherson }
919c38ddb3SSean Christopherson 
929c38ddb3SSean Christopherson static void __rdmsr(u32 msr, u64 want)
939c38ddb3SSean Christopherson {
949c38ddb3SSean Christopherson 	u64 val;
959c38ddb3SSean Christopherson 	u8 vec;
969c38ddb3SSean Christopherson 
979c38ddb3SSean Christopherson 	vec = rdmsr_safe(msr, &val);
989c38ddb3SSean Christopherson 	__GUEST_ASSERT(!vec, "Unexpected %s on RDMSR(0x%x)", ex_str(vec), msr);
999c38ddb3SSean Christopherson 
1009c38ddb3SSean Christopherson 	__GUEST_ASSERT(val == want, "Wanted 0x%lx from RDMSR(0x%x), got 0x%lx",
1019c38ddb3SSean Christopherson 		       want, msr, val);
1029c38ddb3SSean Christopherson }
1039c38ddb3SSean Christopherson 
1049c38ddb3SSean Christopherson static void __wrmsr(u32 msr, u64 val)
1059c38ddb3SSean Christopherson {
1069c38ddb3SSean Christopherson 	u8 vec;
1079c38ddb3SSean Christopherson 
1089c38ddb3SSean Christopherson 	vec = wrmsr_safe(msr, val);
1099c38ddb3SSean Christopherson 	__GUEST_ASSERT(!vec, "Unexpected %s on WRMSR(0x%x, 0x%lx)",
1109c38ddb3SSean Christopherson 		       ex_str(vec), msr, val);
1119c38ddb3SSean Christopherson 	__rdmsr(msr, fixup_rdmsr_val(msr, val));
1129c38ddb3SSean Christopherson }
1139c38ddb3SSean Christopherson 
1149c38ddb3SSean Christopherson static void guest_test_supported_msr(const struct kvm_msr *msr)
1159c38ddb3SSean Christopherson {
1169c38ddb3SSean Christopherson 	__rdmsr(msr->index, msr->reset_val);
1179c38ddb3SSean Christopherson 	__wrmsr(msr->index, msr->write_val);
1189c38ddb3SSean Christopherson 	GUEST_SYNC(fixup_rdmsr_val(msr->index, msr->write_val));
1199c38ddb3SSean Christopherson 
1209c38ddb3SSean Christopherson 	__rdmsr(msr->index, msr->reset_val);
1219c38ddb3SSean Christopherson }
1229c38ddb3SSean Christopherson 
1239c38ddb3SSean Christopherson static void guest_test_unsupported_msr(const struct kvm_msr *msr)
1249c38ddb3SSean Christopherson {
1259c38ddb3SSean Christopherson 	u64 val;
1269c38ddb3SSean Christopherson 	u8 vec;
1279c38ddb3SSean Christopherson 
1289c38ddb3SSean Christopherson 	/*
1299c38ddb3SSean Christopherson 	 * KVM's ABI with respect to ignore_msrs is a mess and largely beyond
1309c38ddb3SSean Christopherson 	 * repair, just skip the unsupported MSR tests.
1319c38ddb3SSean Christopherson 	 */
1329c38ddb3SSean Christopherson 	if (ignore_unsupported_msrs)
1339c38ddb3SSean Christopherson 		goto skip_wrmsr_gp;
1349c38ddb3SSean Christopherson 
13527c41353SSean Christopherson 	/*
13627c41353SSean Christopherson 	 * {S,U}_CET exist if IBT or SHSTK is supported, but with bits that are
13727c41353SSean Christopherson 	 * writable only if their associated feature is supported.  Skip the
13827c41353SSean Christopherson 	 * RDMSR #GP test if the secondary feature is supported, but perform
13927c41353SSean Christopherson 	 * the WRMSR #GP test as the to-be-written value is tied to the primary
14027c41353SSean Christopherson 	 * feature.  For all other MSRs, simply do nothing.
14127c41353SSean Christopherson 	 */
14227c41353SSean Christopherson 	if (this_cpu_has(msr->feature2)) {
14327c41353SSean Christopherson 		if  (msr->index != MSR_IA32_U_CET &&
14427c41353SSean Christopherson 		     msr->index != MSR_IA32_S_CET)
1459c38ddb3SSean Christopherson 			goto skip_wrmsr_gp;
1469c38ddb3SSean Christopherson 
14727c41353SSean Christopherson 		goto skip_rdmsr_gp;
14827c41353SSean Christopherson 	}
14927c41353SSean Christopherson 
1509c38ddb3SSean Christopherson 	vec = rdmsr_safe(msr->index, &val);
1519c38ddb3SSean Christopherson 	__GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on RDMSR(0x%x), got %s",
1529c38ddb3SSean Christopherson 		       msr->index, ex_str(vec));
1539c38ddb3SSean Christopherson 
15427c41353SSean Christopherson skip_rdmsr_gp:
1559c38ddb3SSean Christopherson 	vec = wrmsr_safe(msr->index, msr->write_val);
1569c38ddb3SSean Christopherson 	__GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",
1579c38ddb3SSean Christopherson 		       msr->index, msr->write_val, ex_str(vec));
1589c38ddb3SSean Christopherson 
1599c38ddb3SSean Christopherson skip_wrmsr_gp:
1609c38ddb3SSean Christopherson 	GUEST_SYNC(0);
1619c38ddb3SSean Christopherson }
1629c38ddb3SSean Christopherson 
1639c38ddb3SSean Christopherson void guest_test_reserved_val(const struct kvm_msr *msr)
1649c38ddb3SSean Christopherson {
1659c38ddb3SSean Christopherson 	/* Skip reserved value checks as well, ignore_msrs is trully a mess. */
1669c38ddb3SSean Christopherson 	if (ignore_unsupported_msrs)
1679c38ddb3SSean Christopherson 		return;
1689c38ddb3SSean Christopherson 
1699c38ddb3SSean Christopherson 	/*
1709c38ddb3SSean Christopherson 	 * If the CPU will truncate the written value (e.g. SYSENTER on AMD),
1719c38ddb3SSean Christopherson 	 * expect success and a truncated value, not #GP.
1729c38ddb3SSean Christopherson 	 */
1739c38ddb3SSean Christopherson 	if (!this_cpu_has(msr->feature) ||
1749c38ddb3SSean Christopherson 	    msr->rsvd_val == fixup_rdmsr_val(msr->index, msr->rsvd_val)) {
1759c38ddb3SSean Christopherson 		u8 vec = wrmsr_safe(msr->index, msr->rsvd_val);
1769c38ddb3SSean Christopherson 
1779c38ddb3SSean Christopherson 		__GUEST_ASSERT(vec == GP_VECTOR,
1789c38ddb3SSean Christopherson 			       "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",
1799c38ddb3SSean Christopherson 			       msr->index, msr->rsvd_val, ex_str(vec));
1809c38ddb3SSean Christopherson 	} else {
1819c38ddb3SSean Christopherson 		__wrmsr(msr->index, msr->rsvd_val);
1829c38ddb3SSean Christopherson 		__wrmsr(msr->index, msr->reset_val);
1839c38ddb3SSean Christopherson 	}
1849c38ddb3SSean Christopherson }
1859c38ddb3SSean Christopherson 
1869c38ddb3SSean Christopherson static void guest_main(void)
1879c38ddb3SSean Christopherson {
1889c38ddb3SSean Christopherson 	for (;;) {
1899c38ddb3SSean Christopherson 		const struct kvm_msr *msr = &msrs[READ_ONCE(idx)];
1909c38ddb3SSean Christopherson 
1919c38ddb3SSean Christopherson 		if (this_cpu_has(msr->feature))
1929c38ddb3SSean Christopherson 			guest_test_supported_msr(msr);
1939c38ddb3SSean Christopherson 		else
1949c38ddb3SSean Christopherson 			guest_test_unsupported_msr(msr);
1959c38ddb3SSean Christopherson 
1969c38ddb3SSean Christopherson 		if (msr->rsvd_val)
1979c38ddb3SSean Christopherson 			guest_test_reserved_val(msr);
1989c38ddb3SSean Christopherson 
1999c38ddb3SSean Christopherson 		GUEST_SYNC(msr->reset_val);
2009c38ddb3SSean Christopherson 	}
2019c38ddb3SSean Christopherson }
2029c38ddb3SSean Christopherson 
2039c38ddb3SSean Christopherson static void host_test_msr(struct kvm_vcpu *vcpu, u64 guest_val)
2049c38ddb3SSean Christopherson {
2059c38ddb3SSean Christopherson 	u64 reset_val = msrs[idx].reset_val;
2069c38ddb3SSean Christopherson 	u32 msr = msrs[idx].index;
2079c38ddb3SSean Christopherson 	u64 val;
2089c38ddb3SSean Christopherson 
2099c38ddb3SSean Christopherson 	if (!kvm_cpu_has(msrs[idx].feature))
2109c38ddb3SSean Christopherson 		return;
2119c38ddb3SSean Christopherson 
2129c38ddb3SSean Christopherson 	val = vcpu_get_msr(vcpu, msr);
2139c38ddb3SSean Christopherson 	TEST_ASSERT(val == guest_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",
2149c38ddb3SSean Christopherson 		    guest_val, msr, val);
2159c38ddb3SSean Christopherson 
2169c38ddb3SSean Christopherson 	vcpu_set_msr(vcpu, msr, reset_val);
2179c38ddb3SSean Christopherson 
2189c38ddb3SSean Christopherson 	val = vcpu_get_msr(vcpu, msr);
2199c38ddb3SSean Christopherson 	TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",
2209c38ddb3SSean Christopherson 		    reset_val, msr, val);
2219c38ddb3SSean Christopherson }
2229c38ddb3SSean Christopherson 
2239c38ddb3SSean Christopherson static void do_vcpu_run(struct kvm_vcpu *vcpu)
2249c38ddb3SSean Christopherson {
2259c38ddb3SSean Christopherson 	struct ucall uc;
2269c38ddb3SSean Christopherson 
2279c38ddb3SSean Christopherson 	for (;;) {
2289c38ddb3SSean Christopherson 		vcpu_run(vcpu);
2299c38ddb3SSean Christopherson 
2309c38ddb3SSean Christopherson 		switch (get_ucall(vcpu, &uc)) {
2319c38ddb3SSean Christopherson 		case UCALL_SYNC:
2329c38ddb3SSean Christopherson 			host_test_msr(vcpu, uc.args[1]);
2339c38ddb3SSean Christopherson 			return;
2349c38ddb3SSean Christopherson 		case UCALL_PRINTF:
2359c38ddb3SSean Christopherson 			pr_info("%s", uc.buffer);
2369c38ddb3SSean Christopherson 			break;
2379c38ddb3SSean Christopherson 		case UCALL_ABORT:
2389c38ddb3SSean Christopherson 			REPORT_GUEST_ASSERT(uc);
2399c38ddb3SSean Christopherson 		case UCALL_DONE:
2409c38ddb3SSean Christopherson 			TEST_FAIL("Unexpected UCALL_DONE");
2419c38ddb3SSean Christopherson 		default:
2429c38ddb3SSean Christopherson 			TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
2439c38ddb3SSean Christopherson 		}
2449c38ddb3SSean Christopherson 	}
2459c38ddb3SSean Christopherson }
2469c38ddb3SSean Christopherson 
2479c38ddb3SSean Christopherson static void vcpus_run(struct kvm_vcpu **vcpus, const int NR_VCPUS)
2489c38ddb3SSean Christopherson {
2499c38ddb3SSean Christopherson 	int i;
2509c38ddb3SSean Christopherson 
2519c38ddb3SSean Christopherson 	for (i = 0; i < NR_VCPUS; i++)
2529c38ddb3SSean Christopherson 		do_vcpu_run(vcpus[i]);
2539c38ddb3SSean Christopherson }
2549c38ddb3SSean Christopherson 
2559c38ddb3SSean Christopherson #define MISC_ENABLES_RESET_VAL (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
2569c38ddb3SSean Christopherson 
2579c38ddb3SSean Christopherson static void test_msrs(void)
2589c38ddb3SSean Christopherson {
2599c38ddb3SSean Christopherson 	const struct kvm_msr __msrs[] = {
2609c38ddb3SSean Christopherson 		MSR_TEST_NON_ZERO(MSR_IA32_MISC_ENABLE,
2619c38ddb3SSean Christopherson 				  MISC_ENABLES_RESET_VAL | MSR_IA32_MISC_ENABLE_FAST_STRING,
2629c38ddb3SSean Christopherson 				  MSR_IA32_MISC_ENABLE_FAST_STRING, MISC_ENABLES_RESET_VAL, NONE),
2639c38ddb3SSean Christopherson 		MSR_TEST_NON_ZERO(MSR_IA32_CR_PAT, 0x07070707, 0, 0x7040600070406, NONE),
2649c38ddb3SSean Christopherson 
2659c38ddb3SSean Christopherson 		/*
2669c38ddb3SSean Christopherson 		 * TSC_AUX is supported if RDTSCP *or* RDPID is supported.  Add
2679c38ddb3SSean Christopherson 		 * entries for each features so that TSC_AUX doesn't exists for
2689c38ddb3SSean Christopherson 		 * the "unsupported" vCPU, and obviously to test both cases.
2699c38ddb3SSean Christopherson 		 */
2709c38ddb3SSean Christopherson 		MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDTSCP, RDPID),
2719c38ddb3SSean Christopherson 		MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDPID, RDTSCP),
2729c38ddb3SSean Christopherson 
2739c38ddb3SSean Christopherson 		MSR_TEST(MSR_IA32_SYSENTER_CS, 0x1234, 0, NONE),
2749c38ddb3SSean Christopherson 		/*
2759c38ddb3SSean Christopherson 		 * SYSENTER_{ESP,EIP} are technically non-canonical on Intel,
2769c38ddb3SSean Christopherson 		 * but KVM doesn't emulate that behavior on emulated writes,
2779c38ddb3SSean Christopherson 		 * i.e. this test will observe different behavior if the MSR
2789c38ddb3SSean Christopherson 		 * writes are handed by hardware vs. KVM.  KVM's behavior is
2799c38ddb3SSean Christopherson 		 * intended (though far from ideal), so don't bother testing
2809c38ddb3SSean Christopherson 		 * non-canonical values.
2819c38ddb3SSean Christopherson 		 */
2829c38ddb3SSean Christopherson 		MSR_TEST(MSR_IA32_SYSENTER_ESP, canonical_val, 0, NONE),
2839c38ddb3SSean Christopherson 		MSR_TEST(MSR_IA32_SYSENTER_EIP, canonical_val, 0, NONE),
2849c38ddb3SSean Christopherson 
2859c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_FS_BASE, LM),
2869c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_GS_BASE, LM),
2879c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_KERNEL_GS_BASE, LM),
2889c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_LSTAR, LM),
2899c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_CSTAR, LM),
2909c38ddb3SSean Christopherson 		MSR_TEST(MSR_SYSCALL_MASK, 0xffffffff, 0, LM),
2919c38ddb3SSean Christopherson 
29227c41353SSean Christopherson 		MSR_TEST2(MSR_IA32_S_CET, CET_SHSTK_EN, CET_RESERVED, SHSTK, IBT),
29327c41353SSean Christopherson 		MSR_TEST2(MSR_IA32_S_CET, CET_ENDBR_EN, CET_RESERVED, IBT, SHSTK),
29427c41353SSean Christopherson 		MSR_TEST2(MSR_IA32_U_CET, CET_SHSTK_EN, CET_RESERVED, SHSTK, IBT),
29527c41353SSean Christopherson 		MSR_TEST2(MSR_IA32_U_CET, CET_ENDBR_EN, CET_RESERVED, IBT, SHSTK),
2969c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_IA32_PL0_SSP, SHSTK),
2979c38ddb3SSean Christopherson 		MSR_TEST(MSR_IA32_PL0_SSP, canonical_val, canonical_val | 1, SHSTK),
2989c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_IA32_PL1_SSP, SHSTK),
2999c38ddb3SSean Christopherson 		MSR_TEST(MSR_IA32_PL1_SSP, canonical_val, canonical_val | 1, SHSTK),
3009c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_IA32_PL2_SSP, SHSTK),
3019c38ddb3SSean Christopherson 		MSR_TEST(MSR_IA32_PL2_SSP, canonical_val, canonical_val | 1, SHSTK),
3029c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_IA32_PL3_SSP, SHSTK),
3039c38ddb3SSean Christopherson 		MSR_TEST(MSR_IA32_PL3_SSP, canonical_val, canonical_val | 1, SHSTK),
3049c38ddb3SSean Christopherson 	};
3059c38ddb3SSean Christopherson 
306*a8b9cca9SSean Christopherson 	const struct kvm_x86_cpu_feature feat_none = X86_FEATURE_NONE;
307*a8b9cca9SSean Christopherson 	const struct kvm_x86_cpu_feature feat_lm = X86_FEATURE_LM;
308*a8b9cca9SSean Christopherson 
3099c38ddb3SSean Christopherson 	/*
310*a8b9cca9SSean Christopherson 	 * Create three vCPUs, but run them on the same task, to validate KVM's
3119c38ddb3SSean Christopherson 	 * context switching of MSR state.  Don't pin the task to a pCPU to
312*a8b9cca9SSean Christopherson 	 * also validate KVM's handling of cross-pCPU migration.  Use the full
313*a8b9cca9SSean Christopherson 	 * set of features for the first two vCPUs, but clear all features in
314*a8b9cca9SSean Christopherson 	 * third vCPU in order to test both positive and negative paths.
3159c38ddb3SSean Christopherson 	 */
316*a8b9cca9SSean Christopherson 	const int NR_VCPUS = 3;
3179c38ddb3SSean Christopherson 	struct kvm_vcpu *vcpus[NR_VCPUS];
3189c38ddb3SSean Christopherson 	struct kvm_vm *vm;
3199c38ddb3SSean Christopherson 
3209c38ddb3SSean Christopherson 	kvm_static_assert(sizeof(__msrs) <= sizeof(msrs));
3219c38ddb3SSean Christopherson 	kvm_static_assert(ARRAY_SIZE(__msrs) <= ARRAY_SIZE(msrs));
3229c38ddb3SSean Christopherson 	memcpy(msrs, __msrs, sizeof(__msrs));
3239c38ddb3SSean Christopherson 
3249c38ddb3SSean Christopherson 	ignore_unsupported_msrs = kvm_is_ignore_msrs();
3259c38ddb3SSean Christopherson 
3269c38ddb3SSean Christopherson 	vm = vm_create_with_vcpus(NR_VCPUS, guest_main, vcpus);
3279c38ddb3SSean Christopherson 
3289c38ddb3SSean Christopherson 	sync_global_to_guest(vm, msrs);
3299c38ddb3SSean Christopherson 	sync_global_to_guest(vm, ignore_unsupported_msrs);
3309c38ddb3SSean Christopherson 
331*a8b9cca9SSean Christopherson 	/*
332*a8b9cca9SSean Christopherson 	 * Clear features in the "unsupported features" vCPU.  This needs to be
333*a8b9cca9SSean Christopherson 	 * done before the first vCPU run as KVM's ABI is that guest CPUID is
334*a8b9cca9SSean Christopherson 	 * immutable once the vCPU has been run.
335*a8b9cca9SSean Christopherson 	 */
336*a8b9cca9SSean Christopherson 	for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {
337*a8b9cca9SSean Christopherson 		/*
338*a8b9cca9SSean Christopherson 		 * Don't clear LM; selftests are 64-bit only, and KVM doesn't
339*a8b9cca9SSean Christopherson 		 * honor LM=0 for MSRs that are supposed to exist if and only
340*a8b9cca9SSean Christopherson 		 * if the vCPU is a 64-bit model.  Ditto for NONE; clearing a
341*a8b9cca9SSean Christopherson 		 * fake feature flag will result in false failures.
342*a8b9cca9SSean Christopherson 		 */
343*a8b9cca9SSean Christopherson 		if (memcmp(&msrs[idx].feature, &feat_lm, sizeof(feat_lm)) &&
344*a8b9cca9SSean Christopherson 		    memcmp(&msrs[idx].feature, &feat_none, sizeof(feat_none)))
345*a8b9cca9SSean Christopherson 			vcpu_clear_cpuid_feature(vcpus[2], msrs[idx].feature);
346*a8b9cca9SSean Christopherson 	}
347*a8b9cca9SSean Christopherson 
3489c38ddb3SSean Christopherson 	for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {
3499c38ddb3SSean Christopherson 		sync_global_to_guest(vm, idx);
3509c38ddb3SSean Christopherson 
3519c38ddb3SSean Christopherson 		vcpus_run(vcpus, NR_VCPUS);
3529c38ddb3SSean Christopherson 		vcpus_run(vcpus, NR_VCPUS);
3539c38ddb3SSean Christopherson 	}
3549c38ddb3SSean Christopherson 
3559c38ddb3SSean Christopherson 	kvm_vm_free(vm);
3569c38ddb3SSean Christopherson }
3579c38ddb3SSean Christopherson 
3589c38ddb3SSean Christopherson int main(void)
3599c38ddb3SSean Christopherson {
3609c38ddb3SSean Christopherson 	test_msrs();
3619c38ddb3SSean Christopherson }
362