xref: /linux/tools/testing/selftests/kvm/x86/msrs_test.c (revision 9c38ddb3df94acdc86485c7073610f01655309f9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <asm/msr-index.h>
3 
4 #include <stdint.h>
5 
6 #include "kvm_util.h"
7 #include "processor.h"
8 
9 /* Use HYPERVISOR for MSRs that are emulated unconditionally (as is HYPERVISOR). */
10 #define X86_FEATURE_NONE X86_FEATURE_HYPERVISOR
11 
12 struct kvm_msr {
13 	const struct kvm_x86_cpu_feature feature;
14 	const struct kvm_x86_cpu_feature feature2;
15 	const char *name;
16 	const u64 reset_val;
17 	const u64 write_val;
18 	const u64 rsvd_val;
19 	const u32 index;
20 };
21 
22 #define ____MSR_TEST(msr, str, val, rsvd, reset, feat, f2)		\
23 {									\
24 	.index = msr,							\
25 	.name = str,							\
26 	.write_val = val,						\
27 	.rsvd_val = rsvd,						\
28 	.reset_val = reset,						\
29 	.feature = X86_FEATURE_ ##feat,					\
30 	.feature2 = X86_FEATURE_ ##f2,					\
31 }
32 
33 #define __MSR_TEST(msr, str, val, rsvd, reset, feat)			\
34 	____MSR_TEST(msr, str, val, rsvd, reset, feat, feat)
35 
36 #define MSR_TEST_NON_ZERO(msr, val, rsvd, reset, feat)			\
37 	__MSR_TEST(msr, #msr, val, rsvd, reset, feat)
38 
39 #define MSR_TEST(msr, val, rsvd, feat)					\
40 	__MSR_TEST(msr, #msr, val, rsvd, 0, feat)
41 
42 #define MSR_TEST2(msr, val, rsvd, feat, f2)				\
43 	____MSR_TEST(msr, #msr, val, rsvd, 0, feat, f2)
44 
45 /*
46  * Note, use a page aligned value for the canonical value so that the value
47  * is compatible with MSRs that use bits 11:0 for things other than addresses.
48  */
49 static const u64 canonical_val = 0x123456789000ull;
50 
51 /*
52  * Arbitrary value with bits set in every byte, but not all bits set.  This is
53  * also a non-canonical value, but that's coincidental (any 64-bit value with
54  * an alternating 0s/1s pattern will be non-canonical).
55  */
56 static const u64 u64_val = 0xaaaa5555aaaa5555ull;
57 
58 #define MSR_TEST_CANONICAL(msr, feat)					\
59 	__MSR_TEST(msr, #msr, canonical_val, NONCANONICAL, 0, feat)
60 
61 /*
62  * The main struct must be scoped to a function due to the use of structures to
63  * define features.  For the global structure, allocate enough space for the
64  * foreseeable future without getting too ridiculous, to minimize maintenance
65  * costs (bumping the array size every time an MSR is added is really annoying).
66  */
67 static struct kvm_msr msrs[128];
68 static int idx;
69 
70 static bool ignore_unsupported_msrs;
71 
72 static u64 fixup_rdmsr_val(u32 msr, u64 want)
73 {
74 	/*
75 	 * AMD CPUs drop bits 63:32 on some MSRs that Intel CPUs support.  KVM
76 	 * is supposed to emulate that behavior based on guest vendor model
77 	 * (which is the same as the host vendor model for this test).
78 	 */
79 	if (!host_cpu_is_amd)
80 		return want;
81 
82 	switch (msr) {
83 	case MSR_IA32_SYSENTER_ESP:
84 	case MSR_IA32_SYSENTER_EIP:
85 	case MSR_TSC_AUX:
86 		return want & GENMASK_ULL(31, 0);
87 	default:
88 		return want;
89 	}
90 }
91 
92 static void __rdmsr(u32 msr, u64 want)
93 {
94 	u64 val;
95 	u8 vec;
96 
97 	vec = rdmsr_safe(msr, &val);
98 	__GUEST_ASSERT(!vec, "Unexpected %s on RDMSR(0x%x)", ex_str(vec), msr);
99 
100 	__GUEST_ASSERT(val == want, "Wanted 0x%lx from RDMSR(0x%x), got 0x%lx",
101 		       want, msr, val);
102 }
103 
104 static void __wrmsr(u32 msr, u64 val)
105 {
106 	u8 vec;
107 
108 	vec = wrmsr_safe(msr, val);
109 	__GUEST_ASSERT(!vec, "Unexpected %s on WRMSR(0x%x, 0x%lx)",
110 		       ex_str(vec), msr, val);
111 	__rdmsr(msr, fixup_rdmsr_val(msr, val));
112 }
113 
114 static void guest_test_supported_msr(const struct kvm_msr *msr)
115 {
116 	__rdmsr(msr->index, msr->reset_val);
117 	__wrmsr(msr->index, msr->write_val);
118 	GUEST_SYNC(fixup_rdmsr_val(msr->index, msr->write_val));
119 
120 	__rdmsr(msr->index, msr->reset_val);
121 }
122 
123 static void guest_test_unsupported_msr(const struct kvm_msr *msr)
124 {
125 	u64 val;
126 	u8 vec;
127 
128 	/*
129 	 * KVM's ABI with respect to ignore_msrs is a mess and largely beyond
130 	 * repair, just skip the unsupported MSR tests.
131 	 */
132 	if (ignore_unsupported_msrs)
133 		goto skip_wrmsr_gp;
134 
135 	if (this_cpu_has(msr->feature2))
136 		goto skip_wrmsr_gp;
137 
138 	vec = rdmsr_safe(msr->index, &val);
139 	__GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on RDMSR(0x%x), got %s",
140 		       msr->index, ex_str(vec));
141 
142 	vec = wrmsr_safe(msr->index, msr->write_val);
143 	__GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",
144 		       msr->index, msr->write_val, ex_str(vec));
145 
146 skip_wrmsr_gp:
147 	GUEST_SYNC(0);
148 }
149 
150 void guest_test_reserved_val(const struct kvm_msr *msr)
151 {
152 	/* Skip reserved value checks as well, ignore_msrs is trully a mess. */
153 	if (ignore_unsupported_msrs)
154 		return;
155 
156 	/*
157 	 * If the CPU will truncate the written value (e.g. SYSENTER on AMD),
158 	 * expect success and a truncated value, not #GP.
159 	 */
160 	if (!this_cpu_has(msr->feature) ||
161 	    msr->rsvd_val == fixup_rdmsr_val(msr->index, msr->rsvd_val)) {
162 		u8 vec = wrmsr_safe(msr->index, msr->rsvd_val);
163 
164 		__GUEST_ASSERT(vec == GP_VECTOR,
165 			       "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",
166 			       msr->index, msr->rsvd_val, ex_str(vec));
167 	} else {
168 		__wrmsr(msr->index, msr->rsvd_val);
169 		__wrmsr(msr->index, msr->reset_val);
170 	}
171 }
172 
173 static void guest_main(void)
174 {
175 	for (;;) {
176 		const struct kvm_msr *msr = &msrs[READ_ONCE(idx)];
177 
178 		if (this_cpu_has(msr->feature))
179 			guest_test_supported_msr(msr);
180 		else
181 			guest_test_unsupported_msr(msr);
182 
183 		if (msr->rsvd_val)
184 			guest_test_reserved_val(msr);
185 
186 		GUEST_SYNC(msr->reset_val);
187 	}
188 }
189 
190 static void host_test_msr(struct kvm_vcpu *vcpu, u64 guest_val)
191 {
192 	u64 reset_val = msrs[idx].reset_val;
193 	u32 msr = msrs[idx].index;
194 	u64 val;
195 
196 	if (!kvm_cpu_has(msrs[idx].feature))
197 		return;
198 
199 	val = vcpu_get_msr(vcpu, msr);
200 	TEST_ASSERT(val == guest_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",
201 		    guest_val, msr, val);
202 
203 	vcpu_set_msr(vcpu, msr, reset_val);
204 
205 	val = vcpu_get_msr(vcpu, msr);
206 	TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",
207 		    reset_val, msr, val);
208 }
209 
210 static void do_vcpu_run(struct kvm_vcpu *vcpu)
211 {
212 	struct ucall uc;
213 
214 	for (;;) {
215 		vcpu_run(vcpu);
216 
217 		switch (get_ucall(vcpu, &uc)) {
218 		case UCALL_SYNC:
219 			host_test_msr(vcpu, uc.args[1]);
220 			return;
221 		case UCALL_PRINTF:
222 			pr_info("%s", uc.buffer);
223 			break;
224 		case UCALL_ABORT:
225 			REPORT_GUEST_ASSERT(uc);
226 		case UCALL_DONE:
227 			TEST_FAIL("Unexpected UCALL_DONE");
228 		default:
229 			TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
230 		}
231 	}
232 }
233 
234 static void vcpus_run(struct kvm_vcpu **vcpus, const int NR_VCPUS)
235 {
236 	int i;
237 
238 	for (i = 0; i < NR_VCPUS; i++)
239 		do_vcpu_run(vcpus[i]);
240 }
241 
242 #define MISC_ENABLES_RESET_VAL (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
243 
244 static void test_msrs(void)
245 {
246 	const struct kvm_msr __msrs[] = {
247 		MSR_TEST_NON_ZERO(MSR_IA32_MISC_ENABLE,
248 				  MISC_ENABLES_RESET_VAL | MSR_IA32_MISC_ENABLE_FAST_STRING,
249 				  MSR_IA32_MISC_ENABLE_FAST_STRING, MISC_ENABLES_RESET_VAL, NONE),
250 		MSR_TEST_NON_ZERO(MSR_IA32_CR_PAT, 0x07070707, 0, 0x7040600070406, NONE),
251 
252 		/*
253 		 * TSC_AUX is supported if RDTSCP *or* RDPID is supported.  Add
254 		 * entries for each features so that TSC_AUX doesn't exists for
255 		 * the "unsupported" vCPU, and obviously to test both cases.
256 		 */
257 		MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDTSCP, RDPID),
258 		MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDPID, RDTSCP),
259 
260 		MSR_TEST(MSR_IA32_SYSENTER_CS, 0x1234, 0, NONE),
261 		/*
262 		 * SYSENTER_{ESP,EIP} are technically non-canonical on Intel,
263 		 * but KVM doesn't emulate that behavior on emulated writes,
264 		 * i.e. this test will observe different behavior if the MSR
265 		 * writes are handed by hardware vs. KVM.  KVM's behavior is
266 		 * intended (though far from ideal), so don't bother testing
267 		 * non-canonical values.
268 		 */
269 		MSR_TEST(MSR_IA32_SYSENTER_ESP, canonical_val, 0, NONE),
270 		MSR_TEST(MSR_IA32_SYSENTER_EIP, canonical_val, 0, NONE),
271 
272 		MSR_TEST_CANONICAL(MSR_FS_BASE, LM),
273 		MSR_TEST_CANONICAL(MSR_GS_BASE, LM),
274 		MSR_TEST_CANONICAL(MSR_KERNEL_GS_BASE, LM),
275 		MSR_TEST_CANONICAL(MSR_LSTAR, LM),
276 		MSR_TEST_CANONICAL(MSR_CSTAR, LM),
277 		MSR_TEST(MSR_SYSCALL_MASK, 0xffffffff, 0, LM),
278 
279 		MSR_TEST_CANONICAL(MSR_IA32_PL0_SSP, SHSTK),
280 		MSR_TEST(MSR_IA32_PL0_SSP, canonical_val, canonical_val | 1, SHSTK),
281 		MSR_TEST_CANONICAL(MSR_IA32_PL1_SSP, SHSTK),
282 		MSR_TEST(MSR_IA32_PL1_SSP, canonical_val, canonical_val | 1, SHSTK),
283 		MSR_TEST_CANONICAL(MSR_IA32_PL2_SSP, SHSTK),
284 		MSR_TEST(MSR_IA32_PL2_SSP, canonical_val, canonical_val | 1, SHSTK),
285 		MSR_TEST_CANONICAL(MSR_IA32_PL3_SSP, SHSTK),
286 		MSR_TEST(MSR_IA32_PL3_SSP, canonical_val, canonical_val | 1, SHSTK),
287 	};
288 
289 	/*
290 	 * Create two vCPUs, but run them on the same task, to validate KVM's
291 	 * context switching of MSR state.  Don't pin the task to a pCPU to
292 	 * also validate KVM's handling of cross-pCPU migration.
293 	 */
294 	const int NR_VCPUS = 2;
295 	struct kvm_vcpu *vcpus[NR_VCPUS];
296 	struct kvm_vm *vm;
297 
298 	kvm_static_assert(sizeof(__msrs) <= sizeof(msrs));
299 	kvm_static_assert(ARRAY_SIZE(__msrs) <= ARRAY_SIZE(msrs));
300 	memcpy(msrs, __msrs, sizeof(__msrs));
301 
302 	ignore_unsupported_msrs = kvm_is_ignore_msrs();
303 
304 	vm = vm_create_with_vcpus(NR_VCPUS, guest_main, vcpus);
305 
306 	sync_global_to_guest(vm, msrs);
307 	sync_global_to_guest(vm, ignore_unsupported_msrs);
308 
309 	for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {
310 		sync_global_to_guest(vm, idx);
311 
312 		vcpus_run(vcpus, NR_VCPUS);
313 		vcpus_run(vcpus, NR_VCPUS);
314 	}
315 
316 	kvm_vm_free(vm);
317 }
318 
319 int main(void)
320 {
321 	test_msrs();
322 }
323