xref: /linux/tools/testing/selftests/kvm/x86/msrs_test.c (revision 256e3417065b2721f77bcd37331796b59483ef3b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <asm/msr-index.h>
3 
4 #include <stdint.h>
5 
6 #include "kvm_util.h"
7 #include "processor.h"
8 
9 /* Use HYPERVISOR for MSRs that are emulated unconditionally (as is HYPERVISOR). */
10 #define X86_FEATURE_NONE X86_FEATURE_HYPERVISOR
11 
12 struct kvm_msr {
13 	const struct kvm_x86_cpu_feature feature;
14 	const struct kvm_x86_cpu_feature feature2;
15 	const char *name;
16 	const u64 reset_val;
17 	const u64 write_val;
18 	const u64 rsvd_val;
19 	const u32 index;
20 	const bool is_kvm_defined;
21 };
22 
23 #define ____MSR_TEST(msr, str, val, rsvd, reset, feat, f2, is_kvm)	\
24 {									\
25 	.index = msr,							\
26 	.name = str,							\
27 	.write_val = val,						\
28 	.rsvd_val = rsvd,						\
29 	.reset_val = reset,						\
30 	.feature = X86_FEATURE_ ##feat,					\
31 	.feature2 = X86_FEATURE_ ##f2,					\
32 	.is_kvm_defined = is_kvm,					\
33 }
34 
35 #define __MSR_TEST(msr, str, val, rsvd, reset, feat)			\
36 	____MSR_TEST(msr, str, val, rsvd, reset, feat, feat, false)
37 
38 #define MSR_TEST_NON_ZERO(msr, val, rsvd, reset, feat)			\
39 	__MSR_TEST(msr, #msr, val, rsvd, reset, feat)
40 
41 #define MSR_TEST(msr, val, rsvd, feat)					\
42 	__MSR_TEST(msr, #msr, val, rsvd, 0, feat)
43 
44 #define MSR_TEST2(msr, val, rsvd, feat, f2)				\
45 	____MSR_TEST(msr, #msr, val, rsvd, 0, feat, f2, false)
46 
47 /*
48  * Note, use a page aligned value for the canonical value so that the value
49  * is compatible with MSRs that use bits 11:0 for things other than addresses.
50  */
51 static const u64 canonical_val = 0x123456789000ull;
52 
53 /*
54  * Arbitrary value with bits set in every byte, but not all bits set.  This is
55  * also a non-canonical value, but that's coincidental (any 64-bit value with
56  * an alternating 0s/1s pattern will be non-canonical).
57  */
58 static const u64 u64_val = 0xaaaa5555aaaa5555ull;
59 
60 #define MSR_TEST_CANONICAL(msr, feat)					\
61 	__MSR_TEST(msr, #msr, canonical_val, NONCANONICAL, 0, feat)
62 
63 #define MSR_TEST_KVM(msr, val, rsvd, feat)				\
64 	____MSR_TEST(KVM_REG_ ##msr, #msr, val, rsvd, 0, feat, feat, true)
65 
66 /*
67  * The main struct must be scoped to a function due to the use of structures to
68  * define features.  For the global structure, allocate enough space for the
69  * foreseeable future without getting too ridiculous, to minimize maintenance
70  * costs (bumping the array size every time an MSR is added is really annoying).
71  */
72 static struct kvm_msr msrs[128];
73 static int idx;
74 
75 static bool ignore_unsupported_msrs;
76 
fixup_rdmsr_val(u32 msr,u64 want)77 static u64 fixup_rdmsr_val(u32 msr, u64 want)
78 {
79 	/*
80 	 * AMD CPUs drop bits 63:32 on some MSRs that Intel CPUs support.  KVM
81 	 * is supposed to emulate that behavior based on guest vendor model
82 	 * (which is the same as the host vendor model for this test).
83 	 */
84 	if (!host_cpu_is_amd)
85 		return want;
86 
87 	switch (msr) {
88 	case MSR_IA32_SYSENTER_ESP:
89 	case MSR_IA32_SYSENTER_EIP:
90 	case MSR_TSC_AUX:
91 		return want & GENMASK_ULL(31, 0);
92 	default:
93 		return want;
94 	}
95 }
96 
__rdmsr(u32 msr,u64 want)97 static void __rdmsr(u32 msr, u64 want)
98 {
99 	u64 val;
100 	u8 vec;
101 
102 	vec = rdmsr_safe(msr, &val);
103 	__GUEST_ASSERT(!vec, "Unexpected %s on RDMSR(0x%x)", ex_str(vec), msr);
104 
105 	__GUEST_ASSERT(val == want, "Wanted 0x%lx from RDMSR(0x%x), got 0x%lx",
106 		       want, msr, val);
107 }
108 
__wrmsr(u32 msr,u64 val)109 static void __wrmsr(u32 msr, u64 val)
110 {
111 	u8 vec;
112 
113 	vec = wrmsr_safe(msr, val);
114 	__GUEST_ASSERT(!vec, "Unexpected %s on WRMSR(0x%x, 0x%lx)",
115 		       ex_str(vec), msr, val);
116 	__rdmsr(msr, fixup_rdmsr_val(msr, val));
117 }
118 
guest_test_supported_msr(const struct kvm_msr * msr)119 static void guest_test_supported_msr(const struct kvm_msr *msr)
120 {
121 	__rdmsr(msr->index, msr->reset_val);
122 	__wrmsr(msr->index, msr->write_val);
123 	GUEST_SYNC(fixup_rdmsr_val(msr->index, msr->write_val));
124 
125 	__rdmsr(msr->index, msr->reset_val);
126 }
127 
guest_test_unsupported_msr(const struct kvm_msr * msr)128 static void guest_test_unsupported_msr(const struct kvm_msr *msr)
129 {
130 	u64 val;
131 	u8 vec;
132 
133 	/*
134 	 * KVM's ABI with respect to ignore_msrs is a mess and largely beyond
135 	 * repair, just skip the unsupported MSR tests.
136 	 */
137 	if (ignore_unsupported_msrs)
138 		goto skip_wrmsr_gp;
139 
140 	/*
141 	 * {S,U}_CET exist if IBT or SHSTK is supported, but with bits that are
142 	 * writable only if their associated feature is supported.  Skip the
143 	 * RDMSR #GP test if the secondary feature is supported, but perform
144 	 * the WRMSR #GP test as the to-be-written value is tied to the primary
145 	 * feature.  For all other MSRs, simply do nothing.
146 	 */
147 	if (this_cpu_has(msr->feature2)) {
148 		if  (msr->index != MSR_IA32_U_CET &&
149 		     msr->index != MSR_IA32_S_CET)
150 			goto skip_wrmsr_gp;
151 
152 		goto skip_rdmsr_gp;
153 	}
154 
155 	vec = rdmsr_safe(msr->index, &val);
156 	__GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on RDMSR(0x%x), got %s",
157 		       msr->index, ex_str(vec));
158 
159 skip_rdmsr_gp:
160 	vec = wrmsr_safe(msr->index, msr->write_val);
161 	__GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",
162 		       msr->index, msr->write_val, ex_str(vec));
163 
164 skip_wrmsr_gp:
165 	GUEST_SYNC(0);
166 }
167 
guest_test_reserved_val(const struct kvm_msr * msr)168 void guest_test_reserved_val(const struct kvm_msr *msr)
169 {
170 	/* Skip reserved value checks as well, ignore_msrs is trully a mess. */
171 	if (ignore_unsupported_msrs)
172 		return;
173 
174 	/*
175 	 * If the CPU will truncate the written value (e.g. SYSENTER on AMD),
176 	 * expect success and a truncated value, not #GP.
177 	 */
178 	if (!this_cpu_has(msr->feature) ||
179 	    msr->rsvd_val == fixup_rdmsr_val(msr->index, msr->rsvd_val)) {
180 		u8 vec = wrmsr_safe(msr->index, msr->rsvd_val);
181 
182 		__GUEST_ASSERT(vec == GP_VECTOR,
183 			       "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",
184 			       msr->index, msr->rsvd_val, ex_str(vec));
185 	} else {
186 		__wrmsr(msr->index, msr->rsvd_val);
187 		__wrmsr(msr->index, msr->reset_val);
188 	}
189 }
190 
guest_main(void)191 static void guest_main(void)
192 {
193 	for (;;) {
194 		const struct kvm_msr *msr = &msrs[READ_ONCE(idx)];
195 
196 		if (this_cpu_has(msr->feature))
197 			guest_test_supported_msr(msr);
198 		else
199 			guest_test_unsupported_msr(msr);
200 
201 		if (msr->rsvd_val)
202 			guest_test_reserved_val(msr);
203 
204 		GUEST_SYNC(msr->reset_val);
205 	}
206 }
207 
208 static bool has_one_reg;
209 static bool use_one_reg;
210 
211 #define KVM_X86_MAX_NR_REGS	1
212 
vcpu_has_reg(struct kvm_vcpu * vcpu,u64 reg)213 static bool vcpu_has_reg(struct kvm_vcpu *vcpu, u64 reg)
214 {
215 	struct {
216 		struct kvm_reg_list list;
217 		u64 regs[KVM_X86_MAX_NR_REGS];
218 	} regs = {};
219 	int r, i;
220 
221 	/*
222 	 * If KVM_GET_REG_LIST succeeds with n=0, i.e. there are no supported
223 	 * regs, then the vCPU obviously doesn't support the reg.
224 	 */
225 	r = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, &regs.list);
226 	if (!r)
227 		return false;
228 
229 	TEST_ASSERT_EQ(errno, E2BIG);
230 
231 	/*
232 	 * KVM x86 is expected to support enumerating a relative small number
233 	 * of regs.  The majority of registers supported by KVM_{G,S}ET_ONE_REG
234 	 * are enumerated via other ioctls, e.g. KVM_GET_MSR_INDEX_LIST.  For
235 	 * simplicity, hardcode the maximum number of regs and manually update
236 	 * the test as necessary.
237 	 */
238 	TEST_ASSERT(regs.list.n <= KVM_X86_MAX_NR_REGS,
239 		    "KVM reports %llu regs, test expects at most %u regs, stale test?",
240 		    regs.list.n, KVM_X86_MAX_NR_REGS);
241 
242 	vcpu_ioctl(vcpu, KVM_GET_REG_LIST, &regs.list);
243 	for (i = 0; i < regs.list.n; i++) {
244 		if (regs.regs[i] == reg)
245 			return true;
246 	}
247 
248 	return false;
249 }
250 
host_test_kvm_reg(struct kvm_vcpu * vcpu)251 static void host_test_kvm_reg(struct kvm_vcpu *vcpu)
252 {
253 	bool has_reg = vcpu_cpuid_has(vcpu, msrs[idx].feature);
254 	u64 reset_val = msrs[idx].reset_val;
255 	u64 write_val = msrs[idx].write_val;
256 	u64 rsvd_val = msrs[idx].rsvd_val;
257 	u32 reg = msrs[idx].index;
258 	u64 val;
259 	int r;
260 
261 	if (!use_one_reg)
262 		return;
263 
264 	TEST_ASSERT_EQ(vcpu_has_reg(vcpu, KVM_X86_REG_KVM(reg)), has_reg);
265 
266 	if (!has_reg) {
267 		r = __vcpu_get_reg(vcpu, KVM_X86_REG_KVM(reg), &val);
268 		TEST_ASSERT(r && errno == EINVAL,
269 			    "Expected failure on get_reg(0x%x)", reg);
270 		rsvd_val = 0;
271 		goto out;
272 	}
273 
274 	val = vcpu_get_reg(vcpu, KVM_X86_REG_KVM(reg));
275 	TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_reg(0x%x), got 0x%lx",
276 		    reset_val, reg, val);
277 
278 	vcpu_set_reg(vcpu, KVM_X86_REG_KVM(reg), write_val);
279 	val = vcpu_get_reg(vcpu, KVM_X86_REG_KVM(reg));
280 	TEST_ASSERT(val == write_val, "Wanted 0x%lx from get_reg(0x%x), got 0x%lx",
281 		    write_val, reg, val);
282 
283 out:
284 	r = __vcpu_set_reg(vcpu, KVM_X86_REG_KVM(reg), rsvd_val);
285 	TEST_ASSERT(r, "Expected failure on set_reg(0x%x, 0x%lx)", reg, rsvd_val);
286 }
287 
host_test_msr(struct kvm_vcpu * vcpu,u64 guest_val)288 static void host_test_msr(struct kvm_vcpu *vcpu, u64 guest_val)
289 {
290 	u64 reset_val = msrs[idx].reset_val;
291 	u32 msr = msrs[idx].index;
292 	u64 val;
293 
294 	if (!kvm_cpu_has(msrs[idx].feature))
295 		return;
296 
297 	val = vcpu_get_msr(vcpu, msr);
298 	TEST_ASSERT(val == guest_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",
299 		    guest_val, msr, val);
300 
301 	if (use_one_reg)
302 		vcpu_set_reg(vcpu, KVM_X86_REG_MSR(msr), reset_val);
303 	else
304 		vcpu_set_msr(vcpu, msr, reset_val);
305 
306 	val = vcpu_get_msr(vcpu, msr);
307 	TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",
308 		    reset_val, msr, val);
309 
310 	if (!has_one_reg)
311 		return;
312 
313 	val = vcpu_get_reg(vcpu, KVM_X86_REG_MSR(msr));
314 	TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_reg(0x%x), got 0x%lx",
315 		    reset_val, msr, val);
316 }
317 
do_vcpu_run(struct kvm_vcpu * vcpu)318 static void do_vcpu_run(struct kvm_vcpu *vcpu)
319 {
320 	struct ucall uc;
321 
322 	for (;;) {
323 		vcpu_run(vcpu);
324 
325 		switch (get_ucall(vcpu, &uc)) {
326 		case UCALL_SYNC:
327 			host_test_msr(vcpu, uc.args[1]);
328 			return;
329 		case UCALL_PRINTF:
330 			pr_info("%s", uc.buffer);
331 			break;
332 		case UCALL_ABORT:
333 			REPORT_GUEST_ASSERT(uc);
334 		case UCALL_DONE:
335 			TEST_FAIL("Unexpected UCALL_DONE");
336 		default:
337 			TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
338 		}
339 	}
340 }
341 
vcpus_run(struct kvm_vcpu ** vcpus,const int NR_VCPUS)342 static void vcpus_run(struct kvm_vcpu **vcpus, const int NR_VCPUS)
343 {
344 	int i;
345 
346 	for (i = 0; i < NR_VCPUS; i++)
347 		do_vcpu_run(vcpus[i]);
348 }
349 
350 #define MISC_ENABLES_RESET_VAL (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
351 
test_msrs(void)352 static void test_msrs(void)
353 {
354 	const struct kvm_msr __msrs[] = {
355 		MSR_TEST_NON_ZERO(MSR_IA32_MISC_ENABLE,
356 				  MISC_ENABLES_RESET_VAL | MSR_IA32_MISC_ENABLE_FAST_STRING,
357 				  MSR_IA32_MISC_ENABLE_FAST_STRING, MISC_ENABLES_RESET_VAL, NONE),
358 		MSR_TEST_NON_ZERO(MSR_IA32_CR_PAT, 0x07070707, 0, 0x7040600070406, NONE),
359 
360 		/*
361 		 * TSC_AUX is supported if RDTSCP *or* RDPID is supported.  Add
362 		 * entries for each features so that TSC_AUX doesn't exists for
363 		 * the "unsupported" vCPU, and obviously to test both cases.
364 		 */
365 		MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDTSCP, RDPID),
366 		MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDPID, RDTSCP),
367 
368 		MSR_TEST(MSR_IA32_SYSENTER_CS, 0x1234, 0, NONE),
369 		/*
370 		 * SYSENTER_{ESP,EIP} are technically non-canonical on Intel,
371 		 * but KVM doesn't emulate that behavior on emulated writes,
372 		 * i.e. this test will observe different behavior if the MSR
373 		 * writes are handed by hardware vs. KVM.  KVM's behavior is
374 		 * intended (though far from ideal), so don't bother testing
375 		 * non-canonical values.
376 		 */
377 		MSR_TEST(MSR_IA32_SYSENTER_ESP, canonical_val, 0, NONE),
378 		MSR_TEST(MSR_IA32_SYSENTER_EIP, canonical_val, 0, NONE),
379 
380 		MSR_TEST_CANONICAL(MSR_FS_BASE, LM),
381 		MSR_TEST_CANONICAL(MSR_GS_BASE, LM),
382 		MSR_TEST_CANONICAL(MSR_KERNEL_GS_BASE, LM),
383 		MSR_TEST_CANONICAL(MSR_LSTAR, LM),
384 		MSR_TEST_CANONICAL(MSR_CSTAR, LM),
385 		MSR_TEST(MSR_SYSCALL_MASK, 0xffffffff, 0, LM),
386 
387 		MSR_TEST2(MSR_IA32_S_CET, CET_SHSTK_EN, CET_RESERVED, SHSTK, IBT),
388 		MSR_TEST2(MSR_IA32_S_CET, CET_ENDBR_EN, CET_RESERVED, IBT, SHSTK),
389 		MSR_TEST2(MSR_IA32_U_CET, CET_SHSTK_EN, CET_RESERVED, SHSTK, IBT),
390 		MSR_TEST2(MSR_IA32_U_CET, CET_ENDBR_EN, CET_RESERVED, IBT, SHSTK),
391 		MSR_TEST_CANONICAL(MSR_IA32_PL0_SSP, SHSTK),
392 		MSR_TEST(MSR_IA32_PL0_SSP, canonical_val, canonical_val | 1, SHSTK),
393 		MSR_TEST_CANONICAL(MSR_IA32_PL1_SSP, SHSTK),
394 		MSR_TEST(MSR_IA32_PL1_SSP, canonical_val, canonical_val | 1, SHSTK),
395 		MSR_TEST_CANONICAL(MSR_IA32_PL2_SSP, SHSTK),
396 		MSR_TEST(MSR_IA32_PL2_SSP, canonical_val, canonical_val | 1, SHSTK),
397 		MSR_TEST_CANONICAL(MSR_IA32_PL3_SSP, SHSTK),
398 		MSR_TEST(MSR_IA32_PL3_SSP, canonical_val, canonical_val | 1, SHSTK),
399 
400 		MSR_TEST_KVM(GUEST_SSP, canonical_val, NONCANONICAL, SHSTK),
401 	};
402 
403 	const struct kvm_x86_cpu_feature feat_none = X86_FEATURE_NONE;
404 	const struct kvm_x86_cpu_feature feat_lm = X86_FEATURE_LM;
405 
406 	/*
407 	 * Create three vCPUs, but run them on the same task, to validate KVM's
408 	 * context switching of MSR state.  Don't pin the task to a pCPU to
409 	 * also validate KVM's handling of cross-pCPU migration.  Use the full
410 	 * set of features for the first two vCPUs, but clear all features in
411 	 * third vCPU in order to test both positive and negative paths.
412 	 */
413 	const int NR_VCPUS = 3;
414 	struct kvm_vcpu *vcpus[NR_VCPUS];
415 	struct kvm_vm *vm;
416 	int i;
417 
418 	kvm_static_assert(sizeof(__msrs) <= sizeof(msrs));
419 	kvm_static_assert(ARRAY_SIZE(__msrs) <= ARRAY_SIZE(msrs));
420 	memcpy(msrs, __msrs, sizeof(__msrs));
421 
422 	ignore_unsupported_msrs = kvm_is_ignore_msrs();
423 
424 	vm = vm_create_with_vcpus(NR_VCPUS, guest_main, vcpus);
425 
426 	sync_global_to_guest(vm, msrs);
427 	sync_global_to_guest(vm, ignore_unsupported_msrs);
428 
429 	/*
430 	 * Clear features in the "unsupported features" vCPU.  This needs to be
431 	 * done before the first vCPU run as KVM's ABI is that guest CPUID is
432 	 * immutable once the vCPU has been run.
433 	 */
434 	for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {
435 		/*
436 		 * Don't clear LM; selftests are 64-bit only, and KVM doesn't
437 		 * honor LM=0 for MSRs that are supposed to exist if and only
438 		 * if the vCPU is a 64-bit model.  Ditto for NONE; clearing a
439 		 * fake feature flag will result in false failures.
440 		 */
441 		if (memcmp(&msrs[idx].feature, &feat_lm, sizeof(feat_lm)) &&
442 		    memcmp(&msrs[idx].feature, &feat_none, sizeof(feat_none)))
443 			vcpu_clear_cpuid_feature(vcpus[2], msrs[idx].feature);
444 	}
445 
446 	for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {
447 		struct kvm_msr *msr = &msrs[idx];
448 
449 		if (msr->is_kvm_defined) {
450 			for (i = 0; i < NR_VCPUS; i++)
451 				host_test_kvm_reg(vcpus[i]);
452 			continue;
453 		}
454 
455 		/*
456 		 * Verify KVM_GET_SUPPORTED_CPUID and KVM_GET_MSR_INDEX_LIST
457 		 * are consistent with respect to MSRs whose existence is
458 		 * enumerated via CPUID.  Skip the check for FS/GS.base MSRs,
459 		 * as they aren't reported in the save/restore list since their
460 		 * state is managed via SREGS.
461 		 */
462 		TEST_ASSERT(msr->index == MSR_FS_BASE || msr->index == MSR_GS_BASE ||
463 			    kvm_msr_is_in_save_restore_list(msr->index) ==
464 			    (kvm_cpu_has(msr->feature) || kvm_cpu_has(msr->feature2)),
465 			    "%s %s in save/restore list, but %s according to CPUID", msr->name,
466 			    kvm_msr_is_in_save_restore_list(msr->index) ? "is" : "isn't",
467 			    (kvm_cpu_has(msr->feature) || kvm_cpu_has(msr->feature2)) ?
468 			    "supported" : "unsupported");
469 
470 		sync_global_to_guest(vm, idx);
471 
472 		vcpus_run(vcpus, NR_VCPUS);
473 		vcpus_run(vcpus, NR_VCPUS);
474 	}
475 
476 	kvm_vm_free(vm);
477 }
478 
main(void)479 int main(void)
480 {
481 	has_one_reg = kvm_has_cap(KVM_CAP_ONE_REG);
482 
483 	test_msrs();
484 
485 	if (has_one_reg) {
486 		use_one_reg = true;
487 		test_msrs();
488 	}
489 }
490