xref: /linux/tools/testing/selftests/kvm/x86/msrs_test.c (revision a8b9cca99cf454799ef799f8af9bdb55389cfd94)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <asm/msr-index.h>
3 
4 #include <stdint.h>
5 
6 #include "kvm_util.h"
7 #include "processor.h"
8 
9 /* Use HYPERVISOR for MSRs that are emulated unconditionally (as is HYPERVISOR). */
10 #define X86_FEATURE_NONE X86_FEATURE_HYPERVISOR
11 
12 struct kvm_msr {
13 	const struct kvm_x86_cpu_feature feature;
14 	const struct kvm_x86_cpu_feature feature2;
15 	const char *name;
16 	const u64 reset_val;
17 	const u64 write_val;
18 	const u64 rsvd_val;
19 	const u32 index;
20 };
21 
22 #define ____MSR_TEST(msr, str, val, rsvd, reset, feat, f2)		\
23 {									\
24 	.index = msr,							\
25 	.name = str,							\
26 	.write_val = val,						\
27 	.rsvd_val = rsvd,						\
28 	.reset_val = reset,						\
29 	.feature = X86_FEATURE_ ##feat,					\
30 	.feature2 = X86_FEATURE_ ##f2,					\
31 }
32 
33 #define __MSR_TEST(msr, str, val, rsvd, reset, feat)			\
34 	____MSR_TEST(msr, str, val, rsvd, reset, feat, feat)
35 
36 #define MSR_TEST_NON_ZERO(msr, val, rsvd, reset, feat)			\
37 	__MSR_TEST(msr, #msr, val, rsvd, reset, feat)
38 
39 #define MSR_TEST(msr, val, rsvd, feat)					\
40 	__MSR_TEST(msr, #msr, val, rsvd, 0, feat)
41 
42 #define MSR_TEST2(msr, val, rsvd, feat, f2)				\
43 	____MSR_TEST(msr, #msr, val, rsvd, 0, feat, f2)
44 
45 /*
46  * Note, use a page aligned value for the canonical value so that the value
47  * is compatible with MSRs that use bits 11:0 for things other than addresses.
48  */
49 static const u64 canonical_val = 0x123456789000ull;
50 
51 /*
52  * Arbitrary value with bits set in every byte, but not all bits set.  This is
53  * also a non-canonical value, but that's coincidental (any 64-bit value with
54  * an alternating 0s/1s pattern will be non-canonical).
55  */
56 static const u64 u64_val = 0xaaaa5555aaaa5555ull;
57 
58 #define MSR_TEST_CANONICAL(msr, feat)					\
59 	__MSR_TEST(msr, #msr, canonical_val, NONCANONICAL, 0, feat)
60 
61 /*
62  * The main struct must be scoped to a function due to the use of structures to
63  * define features.  For the global structure, allocate enough space for the
64  * foreseeable future without getting too ridiculous, to minimize maintenance
65  * costs (bumping the array size every time an MSR is added is really annoying).
66  */
67 static struct kvm_msr msrs[128];
68 static int idx;
69 
70 static bool ignore_unsupported_msrs;
71 
72 static u64 fixup_rdmsr_val(u32 msr, u64 want)
73 {
74 	/*
75 	 * AMD CPUs drop bits 63:32 on some MSRs that Intel CPUs support.  KVM
76 	 * is supposed to emulate that behavior based on guest vendor model
77 	 * (which is the same as the host vendor model for this test).
78 	 */
79 	if (!host_cpu_is_amd)
80 		return want;
81 
82 	switch (msr) {
83 	case MSR_IA32_SYSENTER_ESP:
84 	case MSR_IA32_SYSENTER_EIP:
85 	case MSR_TSC_AUX:
86 		return want & GENMASK_ULL(31, 0);
87 	default:
88 		return want;
89 	}
90 }
91 
92 static void __rdmsr(u32 msr, u64 want)
93 {
94 	u64 val;
95 	u8 vec;
96 
97 	vec = rdmsr_safe(msr, &val);
98 	__GUEST_ASSERT(!vec, "Unexpected %s on RDMSR(0x%x)", ex_str(vec), msr);
99 
100 	__GUEST_ASSERT(val == want, "Wanted 0x%lx from RDMSR(0x%x), got 0x%lx",
101 		       want, msr, val);
102 }
103 
104 static void __wrmsr(u32 msr, u64 val)
105 {
106 	u8 vec;
107 
108 	vec = wrmsr_safe(msr, val);
109 	__GUEST_ASSERT(!vec, "Unexpected %s on WRMSR(0x%x, 0x%lx)",
110 		       ex_str(vec), msr, val);
111 	__rdmsr(msr, fixup_rdmsr_val(msr, val));
112 }
113 
114 static void guest_test_supported_msr(const struct kvm_msr *msr)
115 {
116 	__rdmsr(msr->index, msr->reset_val);
117 	__wrmsr(msr->index, msr->write_val);
118 	GUEST_SYNC(fixup_rdmsr_val(msr->index, msr->write_val));
119 
120 	__rdmsr(msr->index, msr->reset_val);
121 }
122 
123 static void guest_test_unsupported_msr(const struct kvm_msr *msr)
124 {
125 	u64 val;
126 	u8 vec;
127 
128 	/*
129 	 * KVM's ABI with respect to ignore_msrs is a mess and largely beyond
130 	 * repair, just skip the unsupported MSR tests.
131 	 */
132 	if (ignore_unsupported_msrs)
133 		goto skip_wrmsr_gp;
134 
135 	/*
136 	 * {S,U}_CET exist if IBT or SHSTK is supported, but with bits that are
137 	 * writable only if their associated feature is supported.  Skip the
138 	 * RDMSR #GP test if the secondary feature is supported, but perform
139 	 * the WRMSR #GP test as the to-be-written value is tied to the primary
140 	 * feature.  For all other MSRs, simply do nothing.
141 	 */
142 	if (this_cpu_has(msr->feature2)) {
143 		if  (msr->index != MSR_IA32_U_CET &&
144 		     msr->index != MSR_IA32_S_CET)
145 			goto skip_wrmsr_gp;
146 
147 		goto skip_rdmsr_gp;
148 	}
149 
150 	vec = rdmsr_safe(msr->index, &val);
151 	__GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on RDMSR(0x%x), got %s",
152 		       msr->index, ex_str(vec));
153 
154 skip_rdmsr_gp:
155 	vec = wrmsr_safe(msr->index, msr->write_val);
156 	__GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",
157 		       msr->index, msr->write_val, ex_str(vec));
158 
159 skip_wrmsr_gp:
160 	GUEST_SYNC(0);
161 }
162 
163 void guest_test_reserved_val(const struct kvm_msr *msr)
164 {
165 	/* Skip reserved value checks as well, ignore_msrs is trully a mess. */
166 	if (ignore_unsupported_msrs)
167 		return;
168 
169 	/*
170 	 * If the CPU will truncate the written value (e.g. SYSENTER on AMD),
171 	 * expect success and a truncated value, not #GP.
172 	 */
173 	if (!this_cpu_has(msr->feature) ||
174 	    msr->rsvd_val == fixup_rdmsr_val(msr->index, msr->rsvd_val)) {
175 		u8 vec = wrmsr_safe(msr->index, msr->rsvd_val);
176 
177 		__GUEST_ASSERT(vec == GP_VECTOR,
178 			       "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",
179 			       msr->index, msr->rsvd_val, ex_str(vec));
180 	} else {
181 		__wrmsr(msr->index, msr->rsvd_val);
182 		__wrmsr(msr->index, msr->reset_val);
183 	}
184 }
185 
186 static void guest_main(void)
187 {
188 	for (;;) {
189 		const struct kvm_msr *msr = &msrs[READ_ONCE(idx)];
190 
191 		if (this_cpu_has(msr->feature))
192 			guest_test_supported_msr(msr);
193 		else
194 			guest_test_unsupported_msr(msr);
195 
196 		if (msr->rsvd_val)
197 			guest_test_reserved_val(msr);
198 
199 		GUEST_SYNC(msr->reset_val);
200 	}
201 }
202 
203 static void host_test_msr(struct kvm_vcpu *vcpu, u64 guest_val)
204 {
205 	u64 reset_val = msrs[idx].reset_val;
206 	u32 msr = msrs[idx].index;
207 	u64 val;
208 
209 	if (!kvm_cpu_has(msrs[idx].feature))
210 		return;
211 
212 	val = vcpu_get_msr(vcpu, msr);
213 	TEST_ASSERT(val == guest_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",
214 		    guest_val, msr, val);
215 
216 	vcpu_set_msr(vcpu, msr, reset_val);
217 
218 	val = vcpu_get_msr(vcpu, msr);
219 	TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",
220 		    reset_val, msr, val);
221 }
222 
223 static void do_vcpu_run(struct kvm_vcpu *vcpu)
224 {
225 	struct ucall uc;
226 
227 	for (;;) {
228 		vcpu_run(vcpu);
229 
230 		switch (get_ucall(vcpu, &uc)) {
231 		case UCALL_SYNC:
232 			host_test_msr(vcpu, uc.args[1]);
233 			return;
234 		case UCALL_PRINTF:
235 			pr_info("%s", uc.buffer);
236 			break;
237 		case UCALL_ABORT:
238 			REPORT_GUEST_ASSERT(uc);
239 		case UCALL_DONE:
240 			TEST_FAIL("Unexpected UCALL_DONE");
241 		default:
242 			TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
243 		}
244 	}
245 }
246 
247 static void vcpus_run(struct kvm_vcpu **vcpus, const int NR_VCPUS)
248 {
249 	int i;
250 
251 	for (i = 0; i < NR_VCPUS; i++)
252 		do_vcpu_run(vcpus[i]);
253 }
254 
255 #define MISC_ENABLES_RESET_VAL (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
256 
257 static void test_msrs(void)
258 {
259 	const struct kvm_msr __msrs[] = {
260 		MSR_TEST_NON_ZERO(MSR_IA32_MISC_ENABLE,
261 				  MISC_ENABLES_RESET_VAL | MSR_IA32_MISC_ENABLE_FAST_STRING,
262 				  MSR_IA32_MISC_ENABLE_FAST_STRING, MISC_ENABLES_RESET_VAL, NONE),
263 		MSR_TEST_NON_ZERO(MSR_IA32_CR_PAT, 0x07070707, 0, 0x7040600070406, NONE),
264 
265 		/*
266 		 * TSC_AUX is supported if RDTSCP *or* RDPID is supported.  Add
267 		 * entries for each features so that TSC_AUX doesn't exists for
268 		 * the "unsupported" vCPU, and obviously to test both cases.
269 		 */
270 		MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDTSCP, RDPID),
271 		MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDPID, RDTSCP),
272 
273 		MSR_TEST(MSR_IA32_SYSENTER_CS, 0x1234, 0, NONE),
274 		/*
275 		 * SYSENTER_{ESP,EIP} are technically non-canonical on Intel,
276 		 * but KVM doesn't emulate that behavior on emulated writes,
277 		 * i.e. this test will observe different behavior if the MSR
278 		 * writes are handed by hardware vs. KVM.  KVM's behavior is
279 		 * intended (though far from ideal), so don't bother testing
280 		 * non-canonical values.
281 		 */
282 		MSR_TEST(MSR_IA32_SYSENTER_ESP, canonical_val, 0, NONE),
283 		MSR_TEST(MSR_IA32_SYSENTER_EIP, canonical_val, 0, NONE),
284 
285 		MSR_TEST_CANONICAL(MSR_FS_BASE, LM),
286 		MSR_TEST_CANONICAL(MSR_GS_BASE, LM),
287 		MSR_TEST_CANONICAL(MSR_KERNEL_GS_BASE, LM),
288 		MSR_TEST_CANONICAL(MSR_LSTAR, LM),
289 		MSR_TEST_CANONICAL(MSR_CSTAR, LM),
290 		MSR_TEST(MSR_SYSCALL_MASK, 0xffffffff, 0, LM),
291 
292 		MSR_TEST2(MSR_IA32_S_CET, CET_SHSTK_EN, CET_RESERVED, SHSTK, IBT),
293 		MSR_TEST2(MSR_IA32_S_CET, CET_ENDBR_EN, CET_RESERVED, IBT, SHSTK),
294 		MSR_TEST2(MSR_IA32_U_CET, CET_SHSTK_EN, CET_RESERVED, SHSTK, IBT),
295 		MSR_TEST2(MSR_IA32_U_CET, CET_ENDBR_EN, CET_RESERVED, IBT, SHSTK),
296 		MSR_TEST_CANONICAL(MSR_IA32_PL0_SSP, SHSTK),
297 		MSR_TEST(MSR_IA32_PL0_SSP, canonical_val, canonical_val | 1, SHSTK),
298 		MSR_TEST_CANONICAL(MSR_IA32_PL1_SSP, SHSTK),
299 		MSR_TEST(MSR_IA32_PL1_SSP, canonical_val, canonical_val | 1, SHSTK),
300 		MSR_TEST_CANONICAL(MSR_IA32_PL2_SSP, SHSTK),
301 		MSR_TEST(MSR_IA32_PL2_SSP, canonical_val, canonical_val | 1, SHSTK),
302 		MSR_TEST_CANONICAL(MSR_IA32_PL3_SSP, SHSTK),
303 		MSR_TEST(MSR_IA32_PL3_SSP, canonical_val, canonical_val | 1, SHSTK),
304 	};
305 
306 	const struct kvm_x86_cpu_feature feat_none = X86_FEATURE_NONE;
307 	const struct kvm_x86_cpu_feature feat_lm = X86_FEATURE_LM;
308 
309 	/*
310 	 * Create three vCPUs, but run them on the same task, to validate KVM's
311 	 * context switching of MSR state.  Don't pin the task to a pCPU to
312 	 * also validate KVM's handling of cross-pCPU migration.  Use the full
313 	 * set of features for the first two vCPUs, but clear all features in
314 	 * third vCPU in order to test both positive and negative paths.
315 	 */
316 	const int NR_VCPUS = 3;
317 	struct kvm_vcpu *vcpus[NR_VCPUS];
318 	struct kvm_vm *vm;
319 
320 	kvm_static_assert(sizeof(__msrs) <= sizeof(msrs));
321 	kvm_static_assert(ARRAY_SIZE(__msrs) <= ARRAY_SIZE(msrs));
322 	memcpy(msrs, __msrs, sizeof(__msrs));
323 
324 	ignore_unsupported_msrs = kvm_is_ignore_msrs();
325 
326 	vm = vm_create_with_vcpus(NR_VCPUS, guest_main, vcpus);
327 
328 	sync_global_to_guest(vm, msrs);
329 	sync_global_to_guest(vm, ignore_unsupported_msrs);
330 
331 	/*
332 	 * Clear features in the "unsupported features" vCPU.  This needs to be
333 	 * done before the first vCPU run as KVM's ABI is that guest CPUID is
334 	 * immutable once the vCPU has been run.
335 	 */
336 	for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {
337 		/*
338 		 * Don't clear LM; selftests are 64-bit only, and KVM doesn't
339 		 * honor LM=0 for MSRs that are supposed to exist if and only
340 		 * if the vCPU is a 64-bit model.  Ditto for NONE; clearing a
341 		 * fake feature flag will result in false failures.
342 		 */
343 		if (memcmp(&msrs[idx].feature, &feat_lm, sizeof(feat_lm)) &&
344 		    memcmp(&msrs[idx].feature, &feat_none, sizeof(feat_none)))
345 			vcpu_clear_cpuid_feature(vcpus[2], msrs[idx].feature);
346 	}
347 
348 	for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {
349 		sync_global_to_guest(vm, idx);
350 
351 		vcpus_run(vcpus, NR_VCPUS);
352 		vcpus_run(vcpus, NR_VCPUS);
353 	}
354 
355 	kvm_vm_free(vm);
356 }
357 
358 int main(void)
359 {
360 	test_msrs();
361 }
362