xref: /linux/tools/testing/selftests/kvm/x86/msrs_test.c (revision 256e3417065b2721f77bcd37331796b59483ef3b)
19c38ddb3SSean Christopherson // SPDX-License-Identifier: GPL-2.0-only
29c38ddb3SSean Christopherson #include <asm/msr-index.h>
39c38ddb3SSean Christopherson 
49c38ddb3SSean Christopherson #include <stdint.h>
59c38ddb3SSean Christopherson 
69c38ddb3SSean Christopherson #include "kvm_util.h"
79c38ddb3SSean Christopherson #include "processor.h"
89c38ddb3SSean Christopherson 
99c38ddb3SSean Christopherson /* Use HYPERVISOR for MSRs that are emulated unconditionally (as is HYPERVISOR). */
109c38ddb3SSean Christopherson #define X86_FEATURE_NONE X86_FEATURE_HYPERVISOR
119c38ddb3SSean Christopherson 
129c38ddb3SSean Christopherson struct kvm_msr {
139c38ddb3SSean Christopherson 	const struct kvm_x86_cpu_feature feature;
149c38ddb3SSean Christopherson 	const struct kvm_x86_cpu_feature feature2;
159c38ddb3SSean Christopherson 	const char *name;
169c38ddb3SSean Christopherson 	const u64 reset_val;
179c38ddb3SSean Christopherson 	const u64 write_val;
189c38ddb3SSean Christopherson 	const u64 rsvd_val;
199c38ddb3SSean Christopherson 	const u32 index;
203469fd20SSean Christopherson 	const bool is_kvm_defined;
219c38ddb3SSean Christopherson };
229c38ddb3SSean Christopherson 
233469fd20SSean Christopherson #define ____MSR_TEST(msr, str, val, rsvd, reset, feat, f2, is_kvm)	\
249c38ddb3SSean Christopherson {									\
259c38ddb3SSean Christopherson 	.index = msr,							\
269c38ddb3SSean Christopherson 	.name = str,							\
279c38ddb3SSean Christopherson 	.write_val = val,						\
289c38ddb3SSean Christopherson 	.rsvd_val = rsvd,						\
299c38ddb3SSean Christopherson 	.reset_val = reset,						\
309c38ddb3SSean Christopherson 	.feature = X86_FEATURE_ ##feat,					\
319c38ddb3SSean Christopherson 	.feature2 = X86_FEATURE_ ##f2,					\
323469fd20SSean Christopherson 	.is_kvm_defined = is_kvm,					\
339c38ddb3SSean Christopherson }
349c38ddb3SSean Christopherson 
359c38ddb3SSean Christopherson #define __MSR_TEST(msr, str, val, rsvd, reset, feat)			\
363469fd20SSean Christopherson 	____MSR_TEST(msr, str, val, rsvd, reset, feat, feat, false)
379c38ddb3SSean Christopherson 
389c38ddb3SSean Christopherson #define MSR_TEST_NON_ZERO(msr, val, rsvd, reset, feat)			\
399c38ddb3SSean Christopherson 	__MSR_TEST(msr, #msr, val, rsvd, reset, feat)
409c38ddb3SSean Christopherson 
419c38ddb3SSean Christopherson #define MSR_TEST(msr, val, rsvd, feat)					\
429c38ddb3SSean Christopherson 	__MSR_TEST(msr, #msr, val, rsvd, 0, feat)
439c38ddb3SSean Christopherson 
449c38ddb3SSean Christopherson #define MSR_TEST2(msr, val, rsvd, feat, f2)				\
453469fd20SSean Christopherson 	____MSR_TEST(msr, #msr, val, rsvd, 0, feat, f2, false)
469c38ddb3SSean Christopherson 
479c38ddb3SSean Christopherson /*
489c38ddb3SSean Christopherson  * Note, use a page aligned value for the canonical value so that the value
499c38ddb3SSean Christopherson  * is compatible with MSRs that use bits 11:0 for things other than addresses.
509c38ddb3SSean Christopherson  */
519c38ddb3SSean Christopherson static const u64 canonical_val = 0x123456789000ull;
529c38ddb3SSean Christopherson 
539c38ddb3SSean Christopherson /*
549c38ddb3SSean Christopherson  * Arbitrary value with bits set in every byte, but not all bits set.  This is
559c38ddb3SSean Christopherson  * also a non-canonical value, but that's coincidental (any 64-bit value with
569c38ddb3SSean Christopherson  * an alternating 0s/1s pattern will be non-canonical).
579c38ddb3SSean Christopherson  */
589c38ddb3SSean Christopherson static const u64 u64_val = 0xaaaa5555aaaa5555ull;
599c38ddb3SSean Christopherson 
609c38ddb3SSean Christopherson #define MSR_TEST_CANONICAL(msr, feat)					\
619c38ddb3SSean Christopherson 	__MSR_TEST(msr, #msr, canonical_val, NONCANONICAL, 0, feat)
629c38ddb3SSean Christopherson 
633469fd20SSean Christopherson #define MSR_TEST_KVM(msr, val, rsvd, feat)				\
643469fd20SSean Christopherson 	____MSR_TEST(KVM_REG_ ##msr, #msr, val, rsvd, 0, feat, feat, true)
653469fd20SSean Christopherson 
669c38ddb3SSean Christopherson /*
679c38ddb3SSean Christopherson  * The main struct must be scoped to a function due to the use of structures to
689c38ddb3SSean Christopherson  * define features.  For the global structure, allocate enough space for the
699c38ddb3SSean Christopherson  * foreseeable future without getting too ridiculous, to minimize maintenance
709c38ddb3SSean Christopherson  * costs (bumping the array size every time an MSR is added is really annoying).
719c38ddb3SSean Christopherson  */
729c38ddb3SSean Christopherson static struct kvm_msr msrs[128];
739c38ddb3SSean Christopherson static int idx;
749c38ddb3SSean Christopherson 
759c38ddb3SSean Christopherson static bool ignore_unsupported_msrs;
769c38ddb3SSean Christopherson 
fixup_rdmsr_val(u32 msr,u64 want)779c38ddb3SSean Christopherson static u64 fixup_rdmsr_val(u32 msr, u64 want)
789c38ddb3SSean Christopherson {
799c38ddb3SSean Christopherson 	/*
809c38ddb3SSean Christopherson 	 * AMD CPUs drop bits 63:32 on some MSRs that Intel CPUs support.  KVM
819c38ddb3SSean Christopherson 	 * is supposed to emulate that behavior based on guest vendor model
829c38ddb3SSean Christopherson 	 * (which is the same as the host vendor model for this test).
839c38ddb3SSean Christopherson 	 */
849c38ddb3SSean Christopherson 	if (!host_cpu_is_amd)
859c38ddb3SSean Christopherson 		return want;
869c38ddb3SSean Christopherson 
879c38ddb3SSean Christopherson 	switch (msr) {
889c38ddb3SSean Christopherson 	case MSR_IA32_SYSENTER_ESP:
899c38ddb3SSean Christopherson 	case MSR_IA32_SYSENTER_EIP:
909c38ddb3SSean Christopherson 	case MSR_TSC_AUX:
919c38ddb3SSean Christopherson 		return want & GENMASK_ULL(31, 0);
929c38ddb3SSean Christopherson 	default:
939c38ddb3SSean Christopherson 		return want;
949c38ddb3SSean Christopherson 	}
959c38ddb3SSean Christopherson }
969c38ddb3SSean Christopherson 
__rdmsr(u32 msr,u64 want)979c38ddb3SSean Christopherson static void __rdmsr(u32 msr, u64 want)
989c38ddb3SSean Christopherson {
999c38ddb3SSean Christopherson 	u64 val;
1009c38ddb3SSean Christopherson 	u8 vec;
1019c38ddb3SSean Christopherson 
1029c38ddb3SSean Christopherson 	vec = rdmsr_safe(msr, &val);
1039c38ddb3SSean Christopherson 	__GUEST_ASSERT(!vec, "Unexpected %s on RDMSR(0x%x)", ex_str(vec), msr);
1049c38ddb3SSean Christopherson 
1059c38ddb3SSean Christopherson 	__GUEST_ASSERT(val == want, "Wanted 0x%lx from RDMSR(0x%x), got 0x%lx",
1069c38ddb3SSean Christopherson 		       want, msr, val);
1079c38ddb3SSean Christopherson }
1089c38ddb3SSean Christopherson 
__wrmsr(u32 msr,u64 val)1099c38ddb3SSean Christopherson static void __wrmsr(u32 msr, u64 val)
1109c38ddb3SSean Christopherson {
1119c38ddb3SSean Christopherson 	u8 vec;
1129c38ddb3SSean Christopherson 
1139c38ddb3SSean Christopherson 	vec = wrmsr_safe(msr, val);
1149c38ddb3SSean Christopherson 	__GUEST_ASSERT(!vec, "Unexpected %s on WRMSR(0x%x, 0x%lx)",
1159c38ddb3SSean Christopherson 		       ex_str(vec), msr, val);
1169c38ddb3SSean Christopherson 	__rdmsr(msr, fixup_rdmsr_val(msr, val));
1179c38ddb3SSean Christopherson }
1189c38ddb3SSean Christopherson 
guest_test_supported_msr(const struct kvm_msr * msr)1199c38ddb3SSean Christopherson static void guest_test_supported_msr(const struct kvm_msr *msr)
1209c38ddb3SSean Christopherson {
1219c38ddb3SSean Christopherson 	__rdmsr(msr->index, msr->reset_val);
1229c38ddb3SSean Christopherson 	__wrmsr(msr->index, msr->write_val);
1239c38ddb3SSean Christopherson 	GUEST_SYNC(fixup_rdmsr_val(msr->index, msr->write_val));
1249c38ddb3SSean Christopherson 
1259c38ddb3SSean Christopherson 	__rdmsr(msr->index, msr->reset_val);
1269c38ddb3SSean Christopherson }
1279c38ddb3SSean Christopherson 
guest_test_unsupported_msr(const struct kvm_msr * msr)1289c38ddb3SSean Christopherson static void guest_test_unsupported_msr(const struct kvm_msr *msr)
1299c38ddb3SSean Christopherson {
1309c38ddb3SSean Christopherson 	u64 val;
1319c38ddb3SSean Christopherson 	u8 vec;
1329c38ddb3SSean Christopherson 
1339c38ddb3SSean Christopherson 	/*
1349c38ddb3SSean Christopherson 	 * KVM's ABI with respect to ignore_msrs is a mess and largely beyond
1359c38ddb3SSean Christopherson 	 * repair, just skip the unsupported MSR tests.
1369c38ddb3SSean Christopherson 	 */
1379c38ddb3SSean Christopherson 	if (ignore_unsupported_msrs)
1389c38ddb3SSean Christopherson 		goto skip_wrmsr_gp;
1399c38ddb3SSean Christopherson 
14027c41353SSean Christopherson 	/*
14127c41353SSean Christopherson 	 * {S,U}_CET exist if IBT or SHSTK is supported, but with bits that are
14227c41353SSean Christopherson 	 * writable only if their associated feature is supported.  Skip the
14327c41353SSean Christopherson 	 * RDMSR #GP test if the secondary feature is supported, but perform
14427c41353SSean Christopherson 	 * the WRMSR #GP test as the to-be-written value is tied to the primary
14527c41353SSean Christopherson 	 * feature.  For all other MSRs, simply do nothing.
14627c41353SSean Christopherson 	 */
14727c41353SSean Christopherson 	if (this_cpu_has(msr->feature2)) {
14827c41353SSean Christopherson 		if  (msr->index != MSR_IA32_U_CET &&
14927c41353SSean Christopherson 		     msr->index != MSR_IA32_S_CET)
1509c38ddb3SSean Christopherson 			goto skip_wrmsr_gp;
1519c38ddb3SSean Christopherson 
15227c41353SSean Christopherson 		goto skip_rdmsr_gp;
15327c41353SSean Christopherson 	}
15427c41353SSean Christopherson 
1559c38ddb3SSean Christopherson 	vec = rdmsr_safe(msr->index, &val);
1569c38ddb3SSean Christopherson 	__GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on RDMSR(0x%x), got %s",
1579c38ddb3SSean Christopherson 		       msr->index, ex_str(vec));
1589c38ddb3SSean Christopherson 
15927c41353SSean Christopherson skip_rdmsr_gp:
1609c38ddb3SSean Christopherson 	vec = wrmsr_safe(msr->index, msr->write_val);
1619c38ddb3SSean Christopherson 	__GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",
1629c38ddb3SSean Christopherson 		       msr->index, msr->write_val, ex_str(vec));
1639c38ddb3SSean Christopherson 
1649c38ddb3SSean Christopherson skip_wrmsr_gp:
1659c38ddb3SSean Christopherson 	GUEST_SYNC(0);
1669c38ddb3SSean Christopherson }
1679c38ddb3SSean Christopherson 
guest_test_reserved_val(const struct kvm_msr * msr)1689c38ddb3SSean Christopherson void guest_test_reserved_val(const struct kvm_msr *msr)
1699c38ddb3SSean Christopherson {
1709c38ddb3SSean Christopherson 	/* Skip reserved value checks as well, ignore_msrs is trully a mess. */
1719c38ddb3SSean Christopherson 	if (ignore_unsupported_msrs)
1729c38ddb3SSean Christopherson 		return;
1739c38ddb3SSean Christopherson 
1749c38ddb3SSean Christopherson 	/*
1759c38ddb3SSean Christopherson 	 * If the CPU will truncate the written value (e.g. SYSENTER on AMD),
1769c38ddb3SSean Christopherson 	 * expect success and a truncated value, not #GP.
1779c38ddb3SSean Christopherson 	 */
1789c38ddb3SSean Christopherson 	if (!this_cpu_has(msr->feature) ||
1799c38ddb3SSean Christopherson 	    msr->rsvd_val == fixup_rdmsr_val(msr->index, msr->rsvd_val)) {
1809c38ddb3SSean Christopherson 		u8 vec = wrmsr_safe(msr->index, msr->rsvd_val);
1819c38ddb3SSean Christopherson 
1829c38ddb3SSean Christopherson 		__GUEST_ASSERT(vec == GP_VECTOR,
1839c38ddb3SSean Christopherson 			       "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",
1849c38ddb3SSean Christopherson 			       msr->index, msr->rsvd_val, ex_str(vec));
1859c38ddb3SSean Christopherson 	} else {
1869c38ddb3SSean Christopherson 		__wrmsr(msr->index, msr->rsvd_val);
1879c38ddb3SSean Christopherson 		__wrmsr(msr->index, msr->reset_val);
1889c38ddb3SSean Christopherson 	}
1899c38ddb3SSean Christopherson }
1909c38ddb3SSean Christopherson 
guest_main(void)1919c38ddb3SSean Christopherson static void guest_main(void)
1929c38ddb3SSean Christopherson {
1939c38ddb3SSean Christopherson 	for (;;) {
1949c38ddb3SSean Christopherson 		const struct kvm_msr *msr = &msrs[READ_ONCE(idx)];
1959c38ddb3SSean Christopherson 
1969c38ddb3SSean Christopherson 		if (this_cpu_has(msr->feature))
1979c38ddb3SSean Christopherson 			guest_test_supported_msr(msr);
1989c38ddb3SSean Christopherson 		else
1999c38ddb3SSean Christopherson 			guest_test_unsupported_msr(msr);
2009c38ddb3SSean Christopherson 
2019c38ddb3SSean Christopherson 		if (msr->rsvd_val)
2029c38ddb3SSean Christopherson 			guest_test_reserved_val(msr);
2039c38ddb3SSean Christopherson 
2049c38ddb3SSean Christopherson 		GUEST_SYNC(msr->reset_val);
2059c38ddb3SSean Christopherson 	}
2069c38ddb3SSean Christopherson }
2079c38ddb3SSean Christopherson 
20880c2b6d8SSean Christopherson static bool has_one_reg;
20980c2b6d8SSean Christopherson static bool use_one_reg;
21080c2b6d8SSean Christopherson 
2113469fd20SSean Christopherson #define KVM_X86_MAX_NR_REGS	1
2123469fd20SSean Christopherson 
vcpu_has_reg(struct kvm_vcpu * vcpu,u64 reg)2133469fd20SSean Christopherson static bool vcpu_has_reg(struct kvm_vcpu *vcpu, u64 reg)
2143469fd20SSean Christopherson {
2153469fd20SSean Christopherson 	struct {
2163469fd20SSean Christopherson 		struct kvm_reg_list list;
2173469fd20SSean Christopherson 		u64 regs[KVM_X86_MAX_NR_REGS];
2183469fd20SSean Christopherson 	} regs = {};
2193469fd20SSean Christopherson 	int r, i;
2203469fd20SSean Christopherson 
2213469fd20SSean Christopherson 	/*
2223469fd20SSean Christopherson 	 * If KVM_GET_REG_LIST succeeds with n=0, i.e. there are no supported
2233469fd20SSean Christopherson 	 * regs, then the vCPU obviously doesn't support the reg.
2243469fd20SSean Christopherson 	 */
2253469fd20SSean Christopherson 	r = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, &regs.list);
2263469fd20SSean Christopherson 	if (!r)
2273469fd20SSean Christopherson 		return false;
2283469fd20SSean Christopherson 
2293469fd20SSean Christopherson 	TEST_ASSERT_EQ(errno, E2BIG);
2303469fd20SSean Christopherson 
2313469fd20SSean Christopherson 	/*
2323469fd20SSean Christopherson 	 * KVM x86 is expected to support enumerating a relative small number
2333469fd20SSean Christopherson 	 * of regs.  The majority of registers supported by KVM_{G,S}ET_ONE_REG
2343469fd20SSean Christopherson 	 * are enumerated via other ioctls, e.g. KVM_GET_MSR_INDEX_LIST.  For
2353469fd20SSean Christopherson 	 * simplicity, hardcode the maximum number of regs and manually update
2363469fd20SSean Christopherson 	 * the test as necessary.
2373469fd20SSean Christopherson 	 */
2383469fd20SSean Christopherson 	TEST_ASSERT(regs.list.n <= KVM_X86_MAX_NR_REGS,
2393469fd20SSean Christopherson 		    "KVM reports %llu regs, test expects at most %u regs, stale test?",
2403469fd20SSean Christopherson 		    regs.list.n, KVM_X86_MAX_NR_REGS);
2413469fd20SSean Christopherson 
2423469fd20SSean Christopherson 	vcpu_ioctl(vcpu, KVM_GET_REG_LIST, &regs.list);
2433469fd20SSean Christopherson 	for (i = 0; i < regs.list.n; i++) {
2443469fd20SSean Christopherson 		if (regs.regs[i] == reg)
2453469fd20SSean Christopherson 			return true;
2463469fd20SSean Christopherson 	}
2473469fd20SSean Christopherson 
2483469fd20SSean Christopherson 	return false;
2493469fd20SSean Christopherson }
2503469fd20SSean Christopherson 
host_test_kvm_reg(struct kvm_vcpu * vcpu)2513469fd20SSean Christopherson static void host_test_kvm_reg(struct kvm_vcpu *vcpu)
2523469fd20SSean Christopherson {
2533469fd20SSean Christopherson 	bool has_reg = vcpu_cpuid_has(vcpu, msrs[idx].feature);
2543469fd20SSean Christopherson 	u64 reset_val = msrs[idx].reset_val;
2553469fd20SSean Christopherson 	u64 write_val = msrs[idx].write_val;
2563469fd20SSean Christopherson 	u64 rsvd_val = msrs[idx].rsvd_val;
2573469fd20SSean Christopherson 	u32 reg = msrs[idx].index;
2583469fd20SSean Christopherson 	u64 val;
2593469fd20SSean Christopherson 	int r;
2603469fd20SSean Christopherson 
2613469fd20SSean Christopherson 	if (!use_one_reg)
2623469fd20SSean Christopherson 		return;
2633469fd20SSean Christopherson 
2643469fd20SSean Christopherson 	TEST_ASSERT_EQ(vcpu_has_reg(vcpu, KVM_X86_REG_KVM(reg)), has_reg);
2653469fd20SSean Christopherson 
2663469fd20SSean Christopherson 	if (!has_reg) {
2673469fd20SSean Christopherson 		r = __vcpu_get_reg(vcpu, KVM_X86_REG_KVM(reg), &val);
2683469fd20SSean Christopherson 		TEST_ASSERT(r && errno == EINVAL,
2693469fd20SSean Christopherson 			    "Expected failure on get_reg(0x%x)", reg);
2703469fd20SSean Christopherson 		rsvd_val = 0;
2713469fd20SSean Christopherson 		goto out;
2723469fd20SSean Christopherson 	}
2733469fd20SSean Christopherson 
2743469fd20SSean Christopherson 	val = vcpu_get_reg(vcpu, KVM_X86_REG_KVM(reg));
2753469fd20SSean Christopherson 	TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_reg(0x%x), got 0x%lx",
2763469fd20SSean Christopherson 		    reset_val, reg, val);
2773469fd20SSean Christopherson 
2783469fd20SSean Christopherson 	vcpu_set_reg(vcpu, KVM_X86_REG_KVM(reg), write_val);
2793469fd20SSean Christopherson 	val = vcpu_get_reg(vcpu, KVM_X86_REG_KVM(reg));
2803469fd20SSean Christopherson 	TEST_ASSERT(val == write_val, "Wanted 0x%lx from get_reg(0x%x), got 0x%lx",
2813469fd20SSean Christopherson 		    write_val, reg, val);
2823469fd20SSean Christopherson 
2833469fd20SSean Christopherson out:
2843469fd20SSean Christopherson 	r = __vcpu_set_reg(vcpu, KVM_X86_REG_KVM(reg), rsvd_val);
2853469fd20SSean Christopherson 	TEST_ASSERT(r, "Expected failure on set_reg(0x%x, 0x%lx)", reg, rsvd_val);
2863469fd20SSean Christopherson }
2873469fd20SSean Christopherson 
host_test_msr(struct kvm_vcpu * vcpu,u64 guest_val)2889c38ddb3SSean Christopherson static void host_test_msr(struct kvm_vcpu *vcpu, u64 guest_val)
2899c38ddb3SSean Christopherson {
2909c38ddb3SSean Christopherson 	u64 reset_val = msrs[idx].reset_val;
2919c38ddb3SSean Christopherson 	u32 msr = msrs[idx].index;
2929c38ddb3SSean Christopherson 	u64 val;
2939c38ddb3SSean Christopherson 
2949c38ddb3SSean Christopherson 	if (!kvm_cpu_has(msrs[idx].feature))
2959c38ddb3SSean Christopherson 		return;
2969c38ddb3SSean Christopherson 
2979c38ddb3SSean Christopherson 	val = vcpu_get_msr(vcpu, msr);
2989c38ddb3SSean Christopherson 	TEST_ASSERT(val == guest_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",
2999c38ddb3SSean Christopherson 		    guest_val, msr, val);
3009c38ddb3SSean Christopherson 
30180c2b6d8SSean Christopherson 	if (use_one_reg)
30280c2b6d8SSean Christopherson 		vcpu_set_reg(vcpu, KVM_X86_REG_MSR(msr), reset_val);
30380c2b6d8SSean Christopherson 	else
3049c38ddb3SSean Christopherson 		vcpu_set_msr(vcpu, msr, reset_val);
3059c38ddb3SSean Christopherson 
3069c38ddb3SSean Christopherson 	val = vcpu_get_msr(vcpu, msr);
3079c38ddb3SSean Christopherson 	TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",
3089c38ddb3SSean Christopherson 		    reset_val, msr, val);
30980c2b6d8SSean Christopherson 
31080c2b6d8SSean Christopherson 	if (!has_one_reg)
31180c2b6d8SSean Christopherson 		return;
31280c2b6d8SSean Christopherson 
31380c2b6d8SSean Christopherson 	val = vcpu_get_reg(vcpu, KVM_X86_REG_MSR(msr));
31480c2b6d8SSean Christopherson 	TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_reg(0x%x), got 0x%lx",
31580c2b6d8SSean Christopherson 		    reset_val, msr, val);
3169c38ddb3SSean Christopherson }
3179c38ddb3SSean Christopherson 
do_vcpu_run(struct kvm_vcpu * vcpu)3189c38ddb3SSean Christopherson static void do_vcpu_run(struct kvm_vcpu *vcpu)
3199c38ddb3SSean Christopherson {
3209c38ddb3SSean Christopherson 	struct ucall uc;
3219c38ddb3SSean Christopherson 
3229c38ddb3SSean Christopherson 	for (;;) {
3239c38ddb3SSean Christopherson 		vcpu_run(vcpu);
3249c38ddb3SSean Christopherson 
3259c38ddb3SSean Christopherson 		switch (get_ucall(vcpu, &uc)) {
3269c38ddb3SSean Christopherson 		case UCALL_SYNC:
3279c38ddb3SSean Christopherson 			host_test_msr(vcpu, uc.args[1]);
3289c38ddb3SSean Christopherson 			return;
3299c38ddb3SSean Christopherson 		case UCALL_PRINTF:
3309c38ddb3SSean Christopherson 			pr_info("%s", uc.buffer);
3319c38ddb3SSean Christopherson 			break;
3329c38ddb3SSean Christopherson 		case UCALL_ABORT:
3339c38ddb3SSean Christopherson 			REPORT_GUEST_ASSERT(uc);
3349c38ddb3SSean Christopherson 		case UCALL_DONE:
3359c38ddb3SSean Christopherson 			TEST_FAIL("Unexpected UCALL_DONE");
3369c38ddb3SSean Christopherson 		default:
3379c38ddb3SSean Christopherson 			TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
3389c38ddb3SSean Christopherson 		}
3399c38ddb3SSean Christopherson 	}
3409c38ddb3SSean Christopherson }
3419c38ddb3SSean Christopherson 
vcpus_run(struct kvm_vcpu ** vcpus,const int NR_VCPUS)3429c38ddb3SSean Christopherson static void vcpus_run(struct kvm_vcpu **vcpus, const int NR_VCPUS)
3439c38ddb3SSean Christopherson {
3449c38ddb3SSean Christopherson 	int i;
3459c38ddb3SSean Christopherson 
3469c38ddb3SSean Christopherson 	for (i = 0; i < NR_VCPUS; i++)
3479c38ddb3SSean Christopherson 		do_vcpu_run(vcpus[i]);
3489c38ddb3SSean Christopherson }
3499c38ddb3SSean Christopherson 
3509c38ddb3SSean Christopherson #define MISC_ENABLES_RESET_VAL (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
3519c38ddb3SSean Christopherson 
test_msrs(void)3529c38ddb3SSean Christopherson static void test_msrs(void)
3539c38ddb3SSean Christopherson {
3549c38ddb3SSean Christopherson 	const struct kvm_msr __msrs[] = {
3559c38ddb3SSean Christopherson 		MSR_TEST_NON_ZERO(MSR_IA32_MISC_ENABLE,
3569c38ddb3SSean Christopherson 				  MISC_ENABLES_RESET_VAL | MSR_IA32_MISC_ENABLE_FAST_STRING,
3579c38ddb3SSean Christopherson 				  MSR_IA32_MISC_ENABLE_FAST_STRING, MISC_ENABLES_RESET_VAL, NONE),
3589c38ddb3SSean Christopherson 		MSR_TEST_NON_ZERO(MSR_IA32_CR_PAT, 0x07070707, 0, 0x7040600070406, NONE),
3599c38ddb3SSean Christopherson 
3609c38ddb3SSean Christopherson 		/*
3619c38ddb3SSean Christopherson 		 * TSC_AUX is supported if RDTSCP *or* RDPID is supported.  Add
3629c38ddb3SSean Christopherson 		 * entries for each features so that TSC_AUX doesn't exists for
3639c38ddb3SSean Christopherson 		 * the "unsupported" vCPU, and obviously to test both cases.
3649c38ddb3SSean Christopherson 		 */
3659c38ddb3SSean Christopherson 		MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDTSCP, RDPID),
3669c38ddb3SSean Christopherson 		MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDPID, RDTSCP),
3679c38ddb3SSean Christopherson 
3689c38ddb3SSean Christopherson 		MSR_TEST(MSR_IA32_SYSENTER_CS, 0x1234, 0, NONE),
3699c38ddb3SSean Christopherson 		/*
3709c38ddb3SSean Christopherson 		 * SYSENTER_{ESP,EIP} are technically non-canonical on Intel,
3719c38ddb3SSean Christopherson 		 * but KVM doesn't emulate that behavior on emulated writes,
3729c38ddb3SSean Christopherson 		 * i.e. this test will observe different behavior if the MSR
3739c38ddb3SSean Christopherson 		 * writes are handed by hardware vs. KVM.  KVM's behavior is
3749c38ddb3SSean Christopherson 		 * intended (though far from ideal), so don't bother testing
3759c38ddb3SSean Christopherson 		 * non-canonical values.
3769c38ddb3SSean Christopherson 		 */
3779c38ddb3SSean Christopherson 		MSR_TEST(MSR_IA32_SYSENTER_ESP, canonical_val, 0, NONE),
3789c38ddb3SSean Christopherson 		MSR_TEST(MSR_IA32_SYSENTER_EIP, canonical_val, 0, NONE),
3799c38ddb3SSean Christopherson 
3809c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_FS_BASE, LM),
3819c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_GS_BASE, LM),
3829c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_KERNEL_GS_BASE, LM),
3839c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_LSTAR, LM),
3849c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_CSTAR, LM),
3859c38ddb3SSean Christopherson 		MSR_TEST(MSR_SYSCALL_MASK, 0xffffffff, 0, LM),
3869c38ddb3SSean Christopherson 
38727c41353SSean Christopherson 		MSR_TEST2(MSR_IA32_S_CET, CET_SHSTK_EN, CET_RESERVED, SHSTK, IBT),
38827c41353SSean Christopherson 		MSR_TEST2(MSR_IA32_S_CET, CET_ENDBR_EN, CET_RESERVED, IBT, SHSTK),
38927c41353SSean Christopherson 		MSR_TEST2(MSR_IA32_U_CET, CET_SHSTK_EN, CET_RESERVED, SHSTK, IBT),
39027c41353SSean Christopherson 		MSR_TEST2(MSR_IA32_U_CET, CET_ENDBR_EN, CET_RESERVED, IBT, SHSTK),
3919c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_IA32_PL0_SSP, SHSTK),
3929c38ddb3SSean Christopherson 		MSR_TEST(MSR_IA32_PL0_SSP, canonical_val, canonical_val | 1, SHSTK),
3939c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_IA32_PL1_SSP, SHSTK),
3949c38ddb3SSean Christopherson 		MSR_TEST(MSR_IA32_PL1_SSP, canonical_val, canonical_val | 1, SHSTK),
3959c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_IA32_PL2_SSP, SHSTK),
3969c38ddb3SSean Christopherson 		MSR_TEST(MSR_IA32_PL2_SSP, canonical_val, canonical_val | 1, SHSTK),
3979c38ddb3SSean Christopherson 		MSR_TEST_CANONICAL(MSR_IA32_PL3_SSP, SHSTK),
3989c38ddb3SSean Christopherson 		MSR_TEST(MSR_IA32_PL3_SSP, canonical_val, canonical_val | 1, SHSTK),
3993469fd20SSean Christopherson 
4003469fd20SSean Christopherson 		MSR_TEST_KVM(GUEST_SSP, canonical_val, NONCANONICAL, SHSTK),
4019c38ddb3SSean Christopherson 	};
4029c38ddb3SSean Christopherson 
403a8b9cca9SSean Christopherson 	const struct kvm_x86_cpu_feature feat_none = X86_FEATURE_NONE;
404a8b9cca9SSean Christopherson 	const struct kvm_x86_cpu_feature feat_lm = X86_FEATURE_LM;
405a8b9cca9SSean Christopherson 
4069c38ddb3SSean Christopherson 	/*
407a8b9cca9SSean Christopherson 	 * Create three vCPUs, but run them on the same task, to validate KVM's
4089c38ddb3SSean Christopherson 	 * context switching of MSR state.  Don't pin the task to a pCPU to
409a8b9cca9SSean Christopherson 	 * also validate KVM's handling of cross-pCPU migration.  Use the full
410a8b9cca9SSean Christopherson 	 * set of features for the first two vCPUs, but clear all features in
411a8b9cca9SSean Christopherson 	 * third vCPU in order to test both positive and negative paths.
4129c38ddb3SSean Christopherson 	 */
413a8b9cca9SSean Christopherson 	const int NR_VCPUS = 3;
4149c38ddb3SSean Christopherson 	struct kvm_vcpu *vcpus[NR_VCPUS];
4159c38ddb3SSean Christopherson 	struct kvm_vm *vm;
4163469fd20SSean Christopherson 	int i;
4179c38ddb3SSean Christopherson 
4189c38ddb3SSean Christopherson 	kvm_static_assert(sizeof(__msrs) <= sizeof(msrs));
4199c38ddb3SSean Christopherson 	kvm_static_assert(ARRAY_SIZE(__msrs) <= ARRAY_SIZE(msrs));
4209c38ddb3SSean Christopherson 	memcpy(msrs, __msrs, sizeof(__msrs));
4219c38ddb3SSean Christopherson 
4229c38ddb3SSean Christopherson 	ignore_unsupported_msrs = kvm_is_ignore_msrs();
4239c38ddb3SSean Christopherson 
4249c38ddb3SSean Christopherson 	vm = vm_create_with_vcpus(NR_VCPUS, guest_main, vcpus);
4259c38ddb3SSean Christopherson 
4269c38ddb3SSean Christopherson 	sync_global_to_guest(vm, msrs);
4279c38ddb3SSean Christopherson 	sync_global_to_guest(vm, ignore_unsupported_msrs);
4289c38ddb3SSean Christopherson 
429a8b9cca9SSean Christopherson 	/*
430a8b9cca9SSean Christopherson 	 * Clear features in the "unsupported features" vCPU.  This needs to be
431a8b9cca9SSean Christopherson 	 * done before the first vCPU run as KVM's ABI is that guest CPUID is
432a8b9cca9SSean Christopherson 	 * immutable once the vCPU has been run.
433a8b9cca9SSean Christopherson 	 */
434a8b9cca9SSean Christopherson 	for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {
435a8b9cca9SSean Christopherson 		/*
436a8b9cca9SSean Christopherson 		 * Don't clear LM; selftests are 64-bit only, and KVM doesn't
437a8b9cca9SSean Christopherson 		 * honor LM=0 for MSRs that are supposed to exist if and only
438a8b9cca9SSean Christopherson 		 * if the vCPU is a 64-bit model.  Ditto for NONE; clearing a
439a8b9cca9SSean Christopherson 		 * fake feature flag will result in false failures.
440a8b9cca9SSean Christopherson 		 */
441a8b9cca9SSean Christopherson 		if (memcmp(&msrs[idx].feature, &feat_lm, sizeof(feat_lm)) &&
442a8b9cca9SSean Christopherson 		    memcmp(&msrs[idx].feature, &feat_none, sizeof(feat_none)))
443a8b9cca9SSean Christopherson 			vcpu_clear_cpuid_feature(vcpus[2], msrs[idx].feature);
444a8b9cca9SSean Christopherson 	}
445a8b9cca9SSean Christopherson 
4469c38ddb3SSean Christopherson 	for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {
447*947ab90cSSean Christopherson 		struct kvm_msr *msr = &msrs[idx];
448*947ab90cSSean Christopherson 
449*947ab90cSSean Christopherson 		if (msr->is_kvm_defined) {
4503469fd20SSean Christopherson 			for (i = 0; i < NR_VCPUS; i++)
4513469fd20SSean Christopherson 				host_test_kvm_reg(vcpus[i]);
4523469fd20SSean Christopherson 			continue;
4533469fd20SSean Christopherson 		}
4543469fd20SSean Christopherson 
455*947ab90cSSean Christopherson 		/*
456*947ab90cSSean Christopherson 		 * Verify KVM_GET_SUPPORTED_CPUID and KVM_GET_MSR_INDEX_LIST
457*947ab90cSSean Christopherson 		 * are consistent with respect to MSRs whose existence is
458*947ab90cSSean Christopherson 		 * enumerated via CPUID.  Skip the check for FS/GS.base MSRs,
459*947ab90cSSean Christopherson 		 * as they aren't reported in the save/restore list since their
460*947ab90cSSean Christopherson 		 * state is managed via SREGS.
461*947ab90cSSean Christopherson 		 */
462*947ab90cSSean Christopherson 		TEST_ASSERT(msr->index == MSR_FS_BASE || msr->index == MSR_GS_BASE ||
463*947ab90cSSean Christopherson 			    kvm_msr_is_in_save_restore_list(msr->index) ==
464*947ab90cSSean Christopherson 			    (kvm_cpu_has(msr->feature) || kvm_cpu_has(msr->feature2)),
465*947ab90cSSean Christopherson 			    "%s %s in save/restore list, but %s according to CPUID", msr->name,
466*947ab90cSSean Christopherson 			    kvm_msr_is_in_save_restore_list(msr->index) ? "is" : "isn't",
467*947ab90cSSean Christopherson 			    (kvm_cpu_has(msr->feature) || kvm_cpu_has(msr->feature2)) ?
468*947ab90cSSean Christopherson 			    "supported" : "unsupported");
469*947ab90cSSean Christopherson 
4709c38ddb3SSean Christopherson 		sync_global_to_guest(vm, idx);
4719c38ddb3SSean Christopherson 
4729c38ddb3SSean Christopherson 		vcpus_run(vcpus, NR_VCPUS);
4739c38ddb3SSean Christopherson 		vcpus_run(vcpus, NR_VCPUS);
4749c38ddb3SSean Christopherson 	}
4759c38ddb3SSean Christopherson 
4769c38ddb3SSean Christopherson 	kvm_vm_free(vm);
4779c38ddb3SSean Christopherson }
4789c38ddb3SSean Christopherson 
main(void)4799c38ddb3SSean Christopherson int main(void)
4809c38ddb3SSean Christopherson {
48180c2b6d8SSean Christopherson 	has_one_reg = kvm_has_cap(KVM_CAP_ONE_REG);
48280c2b6d8SSean Christopherson 
4839c38ddb3SSean Christopherson 	test_msrs();
48480c2b6d8SSean Christopherson 
48580c2b6d8SSean Christopherson 	if (has_one_reg) {
48680c2b6d8SSean Christopherson 		use_one_reg = true;
48780c2b6d8SSean Christopherson 		test_msrs();
48880c2b6d8SSean Christopherson 	}
4899c38ddb3SSean Christopherson }
490