xref: /linux/tools/testing/selftests/kvm/x86/msrs_test.c (revision 80c2b6d8e7bb194744f5fdfc4f0560e053ababda)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <asm/msr-index.h>
3 
4 #include <stdint.h>
5 
6 #include "kvm_util.h"
7 #include "processor.h"
8 
9 /* Use HYPERVISOR for MSRs that are emulated unconditionally (as is HYPERVISOR). */
10 #define X86_FEATURE_NONE X86_FEATURE_HYPERVISOR
11 
12 struct kvm_msr {
13 	const struct kvm_x86_cpu_feature feature;
14 	const struct kvm_x86_cpu_feature feature2;
15 	const char *name;
16 	const u64 reset_val;
17 	const u64 write_val;
18 	const u64 rsvd_val;
19 	const u32 index;
20 };
21 
22 #define ____MSR_TEST(msr, str, val, rsvd, reset, feat, f2)		\
23 {									\
24 	.index = msr,							\
25 	.name = str,							\
26 	.write_val = val,						\
27 	.rsvd_val = rsvd,						\
28 	.reset_val = reset,						\
29 	.feature = X86_FEATURE_ ##feat,					\
30 	.feature2 = X86_FEATURE_ ##f2,					\
31 }
32 
33 #define __MSR_TEST(msr, str, val, rsvd, reset, feat)			\
34 	____MSR_TEST(msr, str, val, rsvd, reset, feat, feat)
35 
36 #define MSR_TEST_NON_ZERO(msr, val, rsvd, reset, feat)			\
37 	__MSR_TEST(msr, #msr, val, rsvd, reset, feat)
38 
39 #define MSR_TEST(msr, val, rsvd, feat)					\
40 	__MSR_TEST(msr, #msr, val, rsvd, 0, feat)
41 
42 #define MSR_TEST2(msr, val, rsvd, feat, f2)				\
43 	____MSR_TEST(msr, #msr, val, rsvd, 0, feat, f2)
44 
45 /*
46  * Note, use a page aligned value for the canonical value so that the value
47  * is compatible with MSRs that use bits 11:0 for things other than addresses.
48  */
49 static const u64 canonical_val = 0x123456789000ull;
50 
51 /*
52  * Arbitrary value with bits set in every byte, but not all bits set.  This is
53  * also a non-canonical value, but that's coincidental (any 64-bit value with
54  * an alternating 0s/1s pattern will be non-canonical).
55  */
56 static const u64 u64_val = 0xaaaa5555aaaa5555ull;
57 
58 #define MSR_TEST_CANONICAL(msr, feat)					\
59 	__MSR_TEST(msr, #msr, canonical_val, NONCANONICAL, 0, feat)
60 
61 /*
62  * The main struct must be scoped to a function due to the use of structures to
63  * define features.  For the global structure, allocate enough space for the
64  * foreseeable future without getting too ridiculous, to minimize maintenance
65  * costs (bumping the array size every time an MSR is added is really annoying).
66  */
67 static struct kvm_msr msrs[128];
68 static int idx;
69 
70 static bool ignore_unsupported_msrs;
71 
72 static u64 fixup_rdmsr_val(u32 msr, u64 want)
73 {
74 	/*
75 	 * AMD CPUs drop bits 63:32 on some MSRs that Intel CPUs support.  KVM
76 	 * is supposed to emulate that behavior based on guest vendor model
77 	 * (which is the same as the host vendor model for this test).
78 	 */
79 	if (!host_cpu_is_amd)
80 		return want;
81 
82 	switch (msr) {
83 	case MSR_IA32_SYSENTER_ESP:
84 	case MSR_IA32_SYSENTER_EIP:
85 	case MSR_TSC_AUX:
86 		return want & GENMASK_ULL(31, 0);
87 	default:
88 		return want;
89 	}
90 }
91 
92 static void __rdmsr(u32 msr, u64 want)
93 {
94 	u64 val;
95 	u8 vec;
96 
97 	vec = rdmsr_safe(msr, &val);
98 	__GUEST_ASSERT(!vec, "Unexpected %s on RDMSR(0x%x)", ex_str(vec), msr);
99 
100 	__GUEST_ASSERT(val == want, "Wanted 0x%lx from RDMSR(0x%x), got 0x%lx",
101 		       want, msr, val);
102 }
103 
104 static void __wrmsr(u32 msr, u64 val)
105 {
106 	u8 vec;
107 
108 	vec = wrmsr_safe(msr, val);
109 	__GUEST_ASSERT(!vec, "Unexpected %s on WRMSR(0x%x, 0x%lx)",
110 		       ex_str(vec), msr, val);
111 	__rdmsr(msr, fixup_rdmsr_val(msr, val));
112 }
113 
114 static void guest_test_supported_msr(const struct kvm_msr *msr)
115 {
116 	__rdmsr(msr->index, msr->reset_val);
117 	__wrmsr(msr->index, msr->write_val);
118 	GUEST_SYNC(fixup_rdmsr_val(msr->index, msr->write_val));
119 
120 	__rdmsr(msr->index, msr->reset_val);
121 }
122 
123 static void guest_test_unsupported_msr(const struct kvm_msr *msr)
124 {
125 	u64 val;
126 	u8 vec;
127 
128 	/*
129 	 * KVM's ABI with respect to ignore_msrs is a mess and largely beyond
130 	 * repair, just skip the unsupported MSR tests.
131 	 */
132 	if (ignore_unsupported_msrs)
133 		goto skip_wrmsr_gp;
134 
135 	/*
136 	 * {S,U}_CET exist if IBT or SHSTK is supported, but with bits that are
137 	 * writable only if their associated feature is supported.  Skip the
138 	 * RDMSR #GP test if the secondary feature is supported, but perform
139 	 * the WRMSR #GP test as the to-be-written value is tied to the primary
140 	 * feature.  For all other MSRs, simply do nothing.
141 	 */
142 	if (this_cpu_has(msr->feature2)) {
143 		if  (msr->index != MSR_IA32_U_CET &&
144 		     msr->index != MSR_IA32_S_CET)
145 			goto skip_wrmsr_gp;
146 
147 		goto skip_rdmsr_gp;
148 	}
149 
150 	vec = rdmsr_safe(msr->index, &val);
151 	__GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on RDMSR(0x%x), got %s",
152 		       msr->index, ex_str(vec));
153 
154 skip_rdmsr_gp:
155 	vec = wrmsr_safe(msr->index, msr->write_val);
156 	__GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",
157 		       msr->index, msr->write_val, ex_str(vec));
158 
159 skip_wrmsr_gp:
160 	GUEST_SYNC(0);
161 }
162 
163 void guest_test_reserved_val(const struct kvm_msr *msr)
164 {
165 	/* Skip reserved value checks as well, ignore_msrs is trully a mess. */
166 	if (ignore_unsupported_msrs)
167 		return;
168 
169 	/*
170 	 * If the CPU will truncate the written value (e.g. SYSENTER on AMD),
171 	 * expect success and a truncated value, not #GP.
172 	 */
173 	if (!this_cpu_has(msr->feature) ||
174 	    msr->rsvd_val == fixup_rdmsr_val(msr->index, msr->rsvd_val)) {
175 		u8 vec = wrmsr_safe(msr->index, msr->rsvd_val);
176 
177 		__GUEST_ASSERT(vec == GP_VECTOR,
178 			       "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",
179 			       msr->index, msr->rsvd_val, ex_str(vec));
180 	} else {
181 		__wrmsr(msr->index, msr->rsvd_val);
182 		__wrmsr(msr->index, msr->reset_val);
183 	}
184 }
185 
186 static void guest_main(void)
187 {
188 	for (;;) {
189 		const struct kvm_msr *msr = &msrs[READ_ONCE(idx)];
190 
191 		if (this_cpu_has(msr->feature))
192 			guest_test_supported_msr(msr);
193 		else
194 			guest_test_unsupported_msr(msr);
195 
196 		if (msr->rsvd_val)
197 			guest_test_reserved_val(msr);
198 
199 		GUEST_SYNC(msr->reset_val);
200 	}
201 }
202 
203 static bool has_one_reg;
204 static bool use_one_reg;
205 
206 static void host_test_msr(struct kvm_vcpu *vcpu, u64 guest_val)
207 {
208 	u64 reset_val = msrs[idx].reset_val;
209 	u32 msr = msrs[idx].index;
210 	u64 val;
211 
212 	if (!kvm_cpu_has(msrs[idx].feature))
213 		return;
214 
215 	val = vcpu_get_msr(vcpu, msr);
216 	TEST_ASSERT(val == guest_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",
217 		    guest_val, msr, val);
218 
219 	if (use_one_reg)
220 		vcpu_set_reg(vcpu, KVM_X86_REG_MSR(msr), reset_val);
221 	else
222 		vcpu_set_msr(vcpu, msr, reset_val);
223 
224 	val = vcpu_get_msr(vcpu, msr);
225 	TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",
226 		    reset_val, msr, val);
227 
228 	if (!has_one_reg)
229 		return;
230 
231 	val = vcpu_get_reg(vcpu, KVM_X86_REG_MSR(msr));
232 	TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_reg(0x%x), got 0x%lx",
233 		    reset_val, msr, val);
234 }
235 
236 static void do_vcpu_run(struct kvm_vcpu *vcpu)
237 {
238 	struct ucall uc;
239 
240 	for (;;) {
241 		vcpu_run(vcpu);
242 
243 		switch (get_ucall(vcpu, &uc)) {
244 		case UCALL_SYNC:
245 			host_test_msr(vcpu, uc.args[1]);
246 			return;
247 		case UCALL_PRINTF:
248 			pr_info("%s", uc.buffer);
249 			break;
250 		case UCALL_ABORT:
251 			REPORT_GUEST_ASSERT(uc);
252 		case UCALL_DONE:
253 			TEST_FAIL("Unexpected UCALL_DONE");
254 		default:
255 			TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
256 		}
257 	}
258 }
259 
260 static void vcpus_run(struct kvm_vcpu **vcpus, const int NR_VCPUS)
261 {
262 	int i;
263 
264 	for (i = 0; i < NR_VCPUS; i++)
265 		do_vcpu_run(vcpus[i]);
266 }
267 
268 #define MISC_ENABLES_RESET_VAL (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
269 
270 static void test_msrs(void)
271 {
272 	const struct kvm_msr __msrs[] = {
273 		MSR_TEST_NON_ZERO(MSR_IA32_MISC_ENABLE,
274 				  MISC_ENABLES_RESET_VAL | MSR_IA32_MISC_ENABLE_FAST_STRING,
275 				  MSR_IA32_MISC_ENABLE_FAST_STRING, MISC_ENABLES_RESET_VAL, NONE),
276 		MSR_TEST_NON_ZERO(MSR_IA32_CR_PAT, 0x07070707, 0, 0x7040600070406, NONE),
277 
278 		/*
279 		 * TSC_AUX is supported if RDTSCP *or* RDPID is supported.  Add
280 		 * entries for each features so that TSC_AUX doesn't exists for
281 		 * the "unsupported" vCPU, and obviously to test both cases.
282 		 */
283 		MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDTSCP, RDPID),
284 		MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDPID, RDTSCP),
285 
286 		MSR_TEST(MSR_IA32_SYSENTER_CS, 0x1234, 0, NONE),
287 		/*
288 		 * SYSENTER_{ESP,EIP} are technically non-canonical on Intel,
289 		 * but KVM doesn't emulate that behavior on emulated writes,
290 		 * i.e. this test will observe different behavior if the MSR
291 		 * writes are handed by hardware vs. KVM.  KVM's behavior is
292 		 * intended (though far from ideal), so don't bother testing
293 		 * non-canonical values.
294 		 */
295 		MSR_TEST(MSR_IA32_SYSENTER_ESP, canonical_val, 0, NONE),
296 		MSR_TEST(MSR_IA32_SYSENTER_EIP, canonical_val, 0, NONE),
297 
298 		MSR_TEST_CANONICAL(MSR_FS_BASE, LM),
299 		MSR_TEST_CANONICAL(MSR_GS_BASE, LM),
300 		MSR_TEST_CANONICAL(MSR_KERNEL_GS_BASE, LM),
301 		MSR_TEST_CANONICAL(MSR_LSTAR, LM),
302 		MSR_TEST_CANONICAL(MSR_CSTAR, LM),
303 		MSR_TEST(MSR_SYSCALL_MASK, 0xffffffff, 0, LM),
304 
305 		MSR_TEST2(MSR_IA32_S_CET, CET_SHSTK_EN, CET_RESERVED, SHSTK, IBT),
306 		MSR_TEST2(MSR_IA32_S_CET, CET_ENDBR_EN, CET_RESERVED, IBT, SHSTK),
307 		MSR_TEST2(MSR_IA32_U_CET, CET_SHSTK_EN, CET_RESERVED, SHSTK, IBT),
308 		MSR_TEST2(MSR_IA32_U_CET, CET_ENDBR_EN, CET_RESERVED, IBT, SHSTK),
309 		MSR_TEST_CANONICAL(MSR_IA32_PL0_SSP, SHSTK),
310 		MSR_TEST(MSR_IA32_PL0_SSP, canonical_val, canonical_val | 1, SHSTK),
311 		MSR_TEST_CANONICAL(MSR_IA32_PL1_SSP, SHSTK),
312 		MSR_TEST(MSR_IA32_PL1_SSP, canonical_val, canonical_val | 1, SHSTK),
313 		MSR_TEST_CANONICAL(MSR_IA32_PL2_SSP, SHSTK),
314 		MSR_TEST(MSR_IA32_PL2_SSP, canonical_val, canonical_val | 1, SHSTK),
315 		MSR_TEST_CANONICAL(MSR_IA32_PL3_SSP, SHSTK),
316 		MSR_TEST(MSR_IA32_PL3_SSP, canonical_val, canonical_val | 1, SHSTK),
317 	};
318 
319 	const struct kvm_x86_cpu_feature feat_none = X86_FEATURE_NONE;
320 	const struct kvm_x86_cpu_feature feat_lm = X86_FEATURE_LM;
321 
322 	/*
323 	 * Create three vCPUs, but run them on the same task, to validate KVM's
324 	 * context switching of MSR state.  Don't pin the task to a pCPU to
325 	 * also validate KVM's handling of cross-pCPU migration.  Use the full
326 	 * set of features for the first two vCPUs, but clear all features in
327 	 * third vCPU in order to test both positive and negative paths.
328 	 */
329 	const int NR_VCPUS = 3;
330 	struct kvm_vcpu *vcpus[NR_VCPUS];
331 	struct kvm_vm *vm;
332 
333 	kvm_static_assert(sizeof(__msrs) <= sizeof(msrs));
334 	kvm_static_assert(ARRAY_SIZE(__msrs) <= ARRAY_SIZE(msrs));
335 	memcpy(msrs, __msrs, sizeof(__msrs));
336 
337 	ignore_unsupported_msrs = kvm_is_ignore_msrs();
338 
339 	vm = vm_create_with_vcpus(NR_VCPUS, guest_main, vcpus);
340 
341 	sync_global_to_guest(vm, msrs);
342 	sync_global_to_guest(vm, ignore_unsupported_msrs);
343 
344 	/*
345 	 * Clear features in the "unsupported features" vCPU.  This needs to be
346 	 * done before the first vCPU run as KVM's ABI is that guest CPUID is
347 	 * immutable once the vCPU has been run.
348 	 */
349 	for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {
350 		/*
351 		 * Don't clear LM; selftests are 64-bit only, and KVM doesn't
352 		 * honor LM=0 for MSRs that are supposed to exist if and only
353 		 * if the vCPU is a 64-bit model.  Ditto for NONE; clearing a
354 		 * fake feature flag will result in false failures.
355 		 */
356 		if (memcmp(&msrs[idx].feature, &feat_lm, sizeof(feat_lm)) &&
357 		    memcmp(&msrs[idx].feature, &feat_none, sizeof(feat_none)))
358 			vcpu_clear_cpuid_feature(vcpus[2], msrs[idx].feature);
359 	}
360 
361 	for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {
362 		sync_global_to_guest(vm, idx);
363 
364 		vcpus_run(vcpus, NR_VCPUS);
365 		vcpus_run(vcpus, NR_VCPUS);
366 	}
367 
368 	kvm_vm_free(vm);
369 }
370 
371 int main(void)
372 {
373 	has_one_reg = kvm_has_cap(KVM_CAP_ONE_REG);
374 
375 	test_msrs();
376 
377 	if (has_one_reg) {
378 		use_one_reg = true;
379 		test_msrs();
380 	}
381 }
382