xref: /linux/tools/testing/selftests/kvm/x86_64/sync_regs_test.c (revision f4b0c4b508364fde023e4f7b9f23f7e38c663dfe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Test for x86 KVM_CAP_SYNC_REGS
4  *
5  * Copyright (C) 2018, Google LLC.
6  *
7  * Verifies expected behavior of x86 KVM_CAP_SYNC_REGS functionality,
8  * including requesting an invalid register set, updates to/from values
9  * in kvm_run.s.regs when kvm_valid_regs and kvm_dirty_regs are toggled.
10  */
11 #include <fcntl.h>
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <sys/ioctl.h>
16 #include <pthread.h>
17 
18 #include "kvm_test_harness.h"
19 #include "test_util.h"
20 #include "kvm_util.h"
21 #include "processor.h"
22 
23 #define UCALL_PIO_PORT ((uint16_t)0x1000)
24 
25 struct ucall uc_none = {
26 	.cmd = UCALL_NONE,
27 };
28 
29 /*
30  * ucall is embedded here to protect against compiler reshuffling registers
31  * before calling a function. In this test we only need to get KVM_EXIT_IO
32  * vmexit and preserve RBX, no additional information is needed.
33  */
guest_code(void)34 void guest_code(void)
35 {
36 	asm volatile("1: in %[port], %%al\n"
37 		     "add $0x1, %%rbx\n"
38 		     "jmp 1b"
39 		     : : [port] "d" (UCALL_PIO_PORT), "D" (&uc_none)
40 		     : "rax", "rbx");
41 }
42 
43 KVM_ONE_VCPU_TEST_SUITE(sync_regs_test);
44 
compare_regs(struct kvm_regs * left,struct kvm_regs * right)45 static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
46 {
47 #define REG_COMPARE(reg) \
48 	TEST_ASSERT(left->reg == right->reg, \
49 		    "Register " #reg \
50 		    " values did not match: 0x%llx, 0x%llx", \
51 		    left->reg, right->reg)
52 	REG_COMPARE(rax);
53 	REG_COMPARE(rbx);
54 	REG_COMPARE(rcx);
55 	REG_COMPARE(rdx);
56 	REG_COMPARE(rsi);
57 	REG_COMPARE(rdi);
58 	REG_COMPARE(rsp);
59 	REG_COMPARE(rbp);
60 	REG_COMPARE(r8);
61 	REG_COMPARE(r9);
62 	REG_COMPARE(r10);
63 	REG_COMPARE(r11);
64 	REG_COMPARE(r12);
65 	REG_COMPARE(r13);
66 	REG_COMPARE(r14);
67 	REG_COMPARE(r15);
68 	REG_COMPARE(rip);
69 	REG_COMPARE(rflags);
70 #undef REG_COMPARE
71 }
72 
compare_sregs(struct kvm_sregs * left,struct kvm_sregs * right)73 static void compare_sregs(struct kvm_sregs *left, struct kvm_sregs *right)
74 {
75 }
76 
compare_vcpu_events(struct kvm_vcpu_events * left,struct kvm_vcpu_events * right)77 static void compare_vcpu_events(struct kvm_vcpu_events *left,
78 				struct kvm_vcpu_events *right)
79 {
80 }
81 
82 #define TEST_SYNC_FIELDS   (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS)
83 #define INVALID_SYNC_FIELD 0x80000000
84 
85 /*
86  * Set an exception as pending *and* injected while KVM is processing events.
87  * KVM is supposed to ignore/drop pending exceptions if userspace is also
88  * requesting that an exception be injected.
89  */
race_events_inj_pen(void * arg)90 static void *race_events_inj_pen(void *arg)
91 {
92 	struct kvm_run *run = (struct kvm_run *)arg;
93 	struct kvm_vcpu_events *events = &run->s.regs.events;
94 
95 	WRITE_ONCE(events->exception.nr, UD_VECTOR);
96 
97 	for (;;) {
98 		WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS);
99 		WRITE_ONCE(events->flags, 0);
100 		WRITE_ONCE(events->exception.injected, 1);
101 		WRITE_ONCE(events->exception.pending, 1);
102 
103 		pthread_testcancel();
104 	}
105 
106 	return NULL;
107 }
108 
109 /*
110  * Set an invalid exception vector while KVM is processing events.  KVM is
111  * supposed to reject any vector >= 32, as well as NMIs (vector 2).
112  */
race_events_exc(void * arg)113 static void *race_events_exc(void *arg)
114 {
115 	struct kvm_run *run = (struct kvm_run *)arg;
116 	struct kvm_vcpu_events *events = &run->s.regs.events;
117 
118 	for (;;) {
119 		WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS);
120 		WRITE_ONCE(events->flags, 0);
121 		WRITE_ONCE(events->exception.nr, UD_VECTOR);
122 		WRITE_ONCE(events->exception.pending, 1);
123 		WRITE_ONCE(events->exception.nr, 255);
124 
125 		pthread_testcancel();
126 	}
127 
128 	return NULL;
129 }
130 
131 /*
132  * Toggle CR4.PAE while KVM is processing SREGS, EFER.LME=1 with CR4.PAE=0 is
133  * illegal, and KVM's MMU heavily relies on vCPU state being valid.
134  */
race_sregs_cr4(void * arg)135 static noinline void *race_sregs_cr4(void *arg)
136 {
137 	struct kvm_run *run = (struct kvm_run *)arg;
138 	__u64 *cr4 = &run->s.regs.sregs.cr4;
139 	__u64 pae_enabled = *cr4;
140 	__u64 pae_disabled = *cr4 & ~X86_CR4_PAE;
141 
142 	for (;;) {
143 		WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_SREGS);
144 		WRITE_ONCE(*cr4, pae_enabled);
145 		asm volatile(".rept 512\n\t"
146 			     "nop\n\t"
147 			     ".endr");
148 		WRITE_ONCE(*cr4, pae_disabled);
149 
150 		pthread_testcancel();
151 	}
152 
153 	return NULL;
154 }
155 
race_sync_regs(struct kvm_vcpu * vcpu,void * racer)156 static void race_sync_regs(struct kvm_vcpu *vcpu, void *racer)
157 {
158 	const time_t TIMEOUT = 2; /* seconds, roughly */
159 	struct kvm_x86_state *state;
160 	struct kvm_translation tr;
161 	struct kvm_run *run;
162 	pthread_t thread;
163 	time_t t;
164 
165 	run = vcpu->run;
166 
167 	run->kvm_valid_regs = KVM_SYNC_X86_SREGS;
168 	vcpu_run(vcpu);
169 	run->kvm_valid_regs = 0;
170 
171 	/* Save state *before* spawning the thread that mucks with vCPU state. */
172 	state = vcpu_save_state(vcpu);
173 
174 	/*
175 	 * Selftests run 64-bit guests by default, both EFER.LME and CR4.PAE
176 	 * should already be set in guest state.
177 	 */
178 	TEST_ASSERT((run->s.regs.sregs.cr4 & X86_CR4_PAE) &&
179 		    (run->s.regs.sregs.efer & EFER_LME),
180 		    "vCPU should be in long mode, CR4.PAE=%d, EFER.LME=%d",
181 		    !!(run->s.regs.sregs.cr4 & X86_CR4_PAE),
182 		    !!(run->s.regs.sregs.efer & EFER_LME));
183 
184 	TEST_ASSERT_EQ(pthread_create(&thread, NULL, racer, (void *)run), 0);
185 
186 	for (t = time(NULL) + TIMEOUT; time(NULL) < t;) {
187 		/*
188 		 * Reload known good state if the vCPU triple faults, e.g. due
189 		 * to the unhandled #GPs being injected.  VMX preserves state
190 		 * on shutdown, but SVM synthesizes an INIT as the VMCB state
191 		 * is architecturally undefined on triple fault.
192 		 */
193 		if (!__vcpu_run(vcpu) && run->exit_reason == KVM_EXIT_SHUTDOWN)
194 			vcpu_load_state(vcpu, state);
195 
196 		if (racer == race_sregs_cr4) {
197 			tr = (struct kvm_translation) { .linear_address = 0 };
198 			__vcpu_ioctl(vcpu, KVM_TRANSLATE, &tr);
199 		}
200 	}
201 
202 	TEST_ASSERT_EQ(pthread_cancel(thread), 0);
203 	TEST_ASSERT_EQ(pthread_join(thread, NULL), 0);
204 
205 	kvm_x86_state_cleanup(state);
206 }
207 
KVM_ONE_VCPU_TEST(sync_regs_test,read_invalid,guest_code)208 KVM_ONE_VCPU_TEST(sync_regs_test, read_invalid, guest_code)
209 {
210 	struct kvm_run *run = vcpu->run;
211 	int rv;
212 
213 	/* Request reading invalid register set from VCPU. */
214 	run->kvm_valid_regs = INVALID_SYNC_FIELD;
215 	rv = _vcpu_run(vcpu);
216 	TEST_ASSERT(rv < 0 && errno == EINVAL,
217 		    "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d",
218 		    rv);
219 	run->kvm_valid_regs = 0;
220 
221 	run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
222 	rv = _vcpu_run(vcpu);
223 	TEST_ASSERT(rv < 0 && errno == EINVAL,
224 		    "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d",
225 		    rv);
226 	run->kvm_valid_regs = 0;
227 }
228 
KVM_ONE_VCPU_TEST(sync_regs_test,set_invalid,guest_code)229 KVM_ONE_VCPU_TEST(sync_regs_test, set_invalid, guest_code)
230 {
231 	struct kvm_run *run = vcpu->run;
232 	int rv;
233 
234 	/* Request setting invalid register set into VCPU. */
235 	run->kvm_dirty_regs = INVALID_SYNC_FIELD;
236 	rv = _vcpu_run(vcpu);
237 	TEST_ASSERT(rv < 0 && errno == EINVAL,
238 		    "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d",
239 		    rv);
240 	run->kvm_dirty_regs = 0;
241 
242 	run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
243 	rv = _vcpu_run(vcpu);
244 	TEST_ASSERT(rv < 0 && errno == EINVAL,
245 		    "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d",
246 		    rv);
247 	run->kvm_dirty_regs = 0;
248 }
249 
KVM_ONE_VCPU_TEST(sync_regs_test,req_and_verify_all_valid,guest_code)250 KVM_ONE_VCPU_TEST(sync_regs_test, req_and_verify_all_valid, guest_code)
251 {
252 	struct kvm_run *run = vcpu->run;
253 	struct kvm_vcpu_events events;
254 	struct kvm_sregs sregs;
255 	struct kvm_regs regs;
256 
257 	/* Request and verify all valid register sets. */
258 	/* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */
259 	run->kvm_valid_regs = TEST_SYNC_FIELDS;
260 	vcpu_run(vcpu);
261 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
262 
263 	vcpu_regs_get(vcpu, &regs);
264 	compare_regs(&regs, &run->s.regs.regs);
265 
266 	vcpu_sregs_get(vcpu, &sregs);
267 	compare_sregs(&sregs, &run->s.regs.sregs);
268 
269 	vcpu_events_get(vcpu, &events);
270 	compare_vcpu_events(&events, &run->s.regs.events);
271 }
272 
KVM_ONE_VCPU_TEST(sync_regs_test,set_and_verify_various,guest_code)273 KVM_ONE_VCPU_TEST(sync_regs_test, set_and_verify_various, guest_code)
274 {
275 	struct kvm_run *run = vcpu->run;
276 	struct kvm_vcpu_events events;
277 	struct kvm_sregs sregs;
278 	struct kvm_regs regs;
279 
280 	/* Run once to get register set */
281 	run->kvm_valid_regs = TEST_SYNC_FIELDS;
282 	vcpu_run(vcpu);
283 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
284 
285 	/* Set and verify various register values. */
286 	run->s.regs.regs.rbx = 0xBAD1DEA;
287 	run->s.regs.sregs.apic_base = 1 << 11;
288 	/* TODO run->s.regs.events.XYZ = ABC; */
289 
290 	run->kvm_valid_regs = TEST_SYNC_FIELDS;
291 	run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS;
292 	vcpu_run(vcpu);
293 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
294 	TEST_ASSERT(run->s.regs.regs.rbx == 0xBAD1DEA + 1,
295 		    "rbx sync regs value incorrect 0x%llx.",
296 		    run->s.regs.regs.rbx);
297 	TEST_ASSERT(run->s.regs.sregs.apic_base == 1 << 11,
298 		    "apic_base sync regs value incorrect 0x%llx.",
299 		    run->s.regs.sregs.apic_base);
300 
301 	vcpu_regs_get(vcpu, &regs);
302 	compare_regs(&regs, &run->s.regs.regs);
303 
304 	vcpu_sregs_get(vcpu, &sregs);
305 	compare_sregs(&sregs, &run->s.regs.sregs);
306 
307 	vcpu_events_get(vcpu, &events);
308 	compare_vcpu_events(&events, &run->s.regs.events);
309 }
310 
KVM_ONE_VCPU_TEST(sync_regs_test,clear_kvm_dirty_regs_bits,guest_code)311 KVM_ONE_VCPU_TEST(sync_regs_test, clear_kvm_dirty_regs_bits, guest_code)
312 {
313 	struct kvm_run *run = vcpu->run;
314 
315 	/* Clear kvm_dirty_regs bits, verify new s.regs values are
316 	 * overwritten with existing guest values.
317 	 */
318 	run->kvm_valid_regs = TEST_SYNC_FIELDS;
319 	run->kvm_dirty_regs = 0;
320 	run->s.regs.regs.rbx = 0xDEADBEEF;
321 	vcpu_run(vcpu);
322 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
323 	TEST_ASSERT(run->s.regs.regs.rbx != 0xDEADBEEF,
324 		    "rbx sync regs value incorrect 0x%llx.",
325 		    run->s.regs.regs.rbx);
326 }
327 
KVM_ONE_VCPU_TEST(sync_regs_test,clear_kvm_valid_and_dirty_regs,guest_code)328 KVM_ONE_VCPU_TEST(sync_regs_test, clear_kvm_valid_and_dirty_regs, guest_code)
329 {
330 	struct kvm_run *run = vcpu->run;
331 	struct kvm_regs regs;
332 
333 	/* Run once to get register set */
334 	run->kvm_valid_regs = TEST_SYNC_FIELDS;
335 	vcpu_run(vcpu);
336 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
337 
338 	/* Clear kvm_valid_regs bits and kvm_dirty_bits.
339 	 * Verify s.regs values are not overwritten with existing guest values
340 	 * and that guest values are not overwritten with kvm_sync_regs values.
341 	 */
342 	run->kvm_valid_regs = 0;
343 	run->kvm_dirty_regs = 0;
344 	run->s.regs.regs.rbx = 0xAAAA;
345 	vcpu_regs_get(vcpu, &regs);
346 	regs.rbx = 0xBAC0;
347 	vcpu_regs_set(vcpu, &regs);
348 	vcpu_run(vcpu);
349 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
350 	TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA,
351 		    "rbx sync regs value incorrect 0x%llx.",
352 		    run->s.regs.regs.rbx);
353 	vcpu_regs_get(vcpu, &regs);
354 	TEST_ASSERT(regs.rbx == 0xBAC0 + 1,
355 		    "rbx guest value incorrect 0x%llx.",
356 		    regs.rbx);
357 }
358 
KVM_ONE_VCPU_TEST(sync_regs_test,clear_kvm_valid_regs_bits,guest_code)359 KVM_ONE_VCPU_TEST(sync_regs_test, clear_kvm_valid_regs_bits, guest_code)
360 {
361 	struct kvm_run *run = vcpu->run;
362 	struct kvm_regs regs;
363 
364 	/* Run once to get register set */
365 	run->kvm_valid_regs = TEST_SYNC_FIELDS;
366 	vcpu_run(vcpu);
367 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
368 
369 	/* Clear kvm_valid_regs bits. Verify s.regs values are not overwritten
370 	 * with existing guest values but that guest values are overwritten
371 	 * with kvm_sync_regs values.
372 	 */
373 	run->kvm_valid_regs = 0;
374 	run->kvm_dirty_regs = TEST_SYNC_FIELDS;
375 	run->s.regs.regs.rbx = 0xBBBB;
376 	vcpu_run(vcpu);
377 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
378 	TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB,
379 		    "rbx sync regs value incorrect 0x%llx.",
380 		    run->s.regs.regs.rbx);
381 	vcpu_regs_get(vcpu, &regs);
382 	TEST_ASSERT(regs.rbx == 0xBBBB + 1,
383 		    "rbx guest value incorrect 0x%llx.",
384 		    regs.rbx);
385 }
386 
KVM_ONE_VCPU_TEST(sync_regs_test,race_cr4,guest_code)387 KVM_ONE_VCPU_TEST(sync_regs_test, race_cr4, guest_code)
388 {
389 	race_sync_regs(vcpu, race_sregs_cr4);
390 }
391 
KVM_ONE_VCPU_TEST(sync_regs_test,race_exc,guest_code)392 KVM_ONE_VCPU_TEST(sync_regs_test, race_exc, guest_code)
393 {
394 	race_sync_regs(vcpu, race_events_exc);
395 }
396 
KVM_ONE_VCPU_TEST(sync_regs_test,race_inj_pen,guest_code)397 KVM_ONE_VCPU_TEST(sync_regs_test, race_inj_pen, guest_code)
398 {
399 	race_sync_regs(vcpu, race_events_inj_pen);
400 }
401 
main(int argc,char * argv[])402 int main(int argc, char *argv[])
403 {
404 	int cap;
405 
406 	cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
407 	TEST_REQUIRE((cap & TEST_SYNC_FIELDS) == TEST_SYNC_FIELDS);
408 	TEST_REQUIRE(!(cap & INVALID_SYNC_FIELD));
409 
410 	return test_harness_run(argc, argv);
411 }
412