1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020, Google LLC.
4 *
5 * Tests for exiting into userspace on registered MSRs
6 */
7 #include <sys/ioctl.h>
8
9 #include "kvm_test_harness.h"
10 #include "test_util.h"
11 #include "kvm_util.h"
12 #include "vmx.h"
13
14 #define MSR_NON_EXISTENT 0x474f4f00
15
16 static u64 deny_bits = 0;
17 struct kvm_msr_filter filter_allow = {
18 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
19 .ranges = {
20 {
21 .flags = KVM_MSR_FILTER_READ |
22 KVM_MSR_FILTER_WRITE,
23 .nmsrs = 1,
24 /* Test an MSR the kernel knows about. */
25 .base = MSR_IA32_XSS,
26 .bitmap = (uint8_t*)&deny_bits,
27 }, {
28 .flags = KVM_MSR_FILTER_READ |
29 KVM_MSR_FILTER_WRITE,
30 .nmsrs = 1,
31 /* Test an MSR the kernel doesn't know about. */
32 .base = MSR_IA32_FLUSH_CMD,
33 .bitmap = (uint8_t*)&deny_bits,
34 }, {
35 .flags = KVM_MSR_FILTER_READ |
36 KVM_MSR_FILTER_WRITE,
37 .nmsrs = 1,
38 /* Test a fabricated MSR that no one knows about. */
39 .base = MSR_NON_EXISTENT,
40 .bitmap = (uint8_t*)&deny_bits,
41 },
42 },
43 };
44
45 struct kvm_msr_filter filter_fs = {
46 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
47 .ranges = {
48 {
49 .flags = KVM_MSR_FILTER_READ,
50 .nmsrs = 1,
51 .base = MSR_FS_BASE,
52 .bitmap = (uint8_t*)&deny_bits,
53 },
54 },
55 };
56
57 struct kvm_msr_filter filter_gs = {
58 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
59 .ranges = {
60 {
61 .flags = KVM_MSR_FILTER_READ,
62 .nmsrs = 1,
63 .base = MSR_GS_BASE,
64 .bitmap = (uint8_t*)&deny_bits,
65 },
66 },
67 };
68
69 static uint64_t msr_non_existent_data;
70 static int guest_exception_count;
71 static u32 msr_reads, msr_writes;
72
73 static u8 bitmap_00000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
74 static u8 bitmap_00000000_write[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
75 static u8 bitmap_40000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
76 static u8 bitmap_c0000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
77 static u8 bitmap_c0000000_read[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
78 static u8 bitmap_deadbeef[1] = { 0x1 };
79
deny_msr(uint8_t * bitmap,u32 msr)80 static void deny_msr(uint8_t *bitmap, u32 msr)
81 {
82 u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1);
83
84 bitmap[idx / 8] &= ~(1 << (idx % 8));
85 }
86
prepare_bitmaps(void)87 static void prepare_bitmaps(void)
88 {
89 memset(bitmap_00000000, 0xff, sizeof(bitmap_00000000));
90 memset(bitmap_00000000_write, 0xff, sizeof(bitmap_00000000_write));
91 memset(bitmap_40000000, 0xff, sizeof(bitmap_40000000));
92 memset(bitmap_c0000000, 0xff, sizeof(bitmap_c0000000));
93 memset(bitmap_c0000000_read, 0xff, sizeof(bitmap_c0000000_read));
94
95 deny_msr(bitmap_00000000_write, MSR_IA32_POWER_CTL);
96 deny_msr(bitmap_c0000000_read, MSR_SYSCALL_MASK);
97 deny_msr(bitmap_c0000000_read, MSR_GS_BASE);
98 }
99
100 struct kvm_msr_filter filter_deny = {
101 .flags = KVM_MSR_FILTER_DEFAULT_DENY,
102 .ranges = {
103 {
104 .flags = KVM_MSR_FILTER_READ,
105 .base = 0x00000000,
106 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
107 .bitmap = bitmap_00000000,
108 }, {
109 .flags = KVM_MSR_FILTER_WRITE,
110 .base = 0x00000000,
111 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
112 .bitmap = bitmap_00000000_write,
113 }, {
114 .flags = KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE,
115 .base = 0x40000000,
116 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
117 .bitmap = bitmap_40000000,
118 }, {
119 .flags = KVM_MSR_FILTER_READ,
120 .base = 0xc0000000,
121 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
122 .bitmap = bitmap_c0000000_read,
123 }, {
124 .flags = KVM_MSR_FILTER_WRITE,
125 .base = 0xc0000000,
126 .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
127 .bitmap = bitmap_c0000000,
128 }, {
129 .flags = KVM_MSR_FILTER_WRITE | KVM_MSR_FILTER_READ,
130 .base = 0xdeadbeef,
131 .nmsrs = 1,
132 .bitmap = bitmap_deadbeef,
133 },
134 },
135 };
136
137 struct kvm_msr_filter no_filter_deny = {
138 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
139 };
140
141 /*
142 * Note: Force test_rdmsr() to not be inlined to prevent the labels,
143 * rdmsr_start and rdmsr_end, from being defined multiple times.
144 */
test_rdmsr(uint32_t msr)145 static noinline uint64_t test_rdmsr(uint32_t msr)
146 {
147 uint32_t a, d;
148
149 guest_exception_count = 0;
150
151 __asm__ __volatile__("rdmsr_start: rdmsr; rdmsr_end:" :
152 "=a"(a), "=d"(d) : "c"(msr) : "memory");
153
154 return a | ((uint64_t) d << 32);
155 }
156
157 /*
158 * Note: Force test_wrmsr() to not be inlined to prevent the labels,
159 * wrmsr_start and wrmsr_end, from being defined multiple times.
160 */
test_wrmsr(uint32_t msr,uint64_t value)161 static noinline void test_wrmsr(uint32_t msr, uint64_t value)
162 {
163 uint32_t a = value;
164 uint32_t d = value >> 32;
165
166 guest_exception_count = 0;
167
168 __asm__ __volatile__("wrmsr_start: wrmsr; wrmsr_end:" ::
169 "a"(a), "d"(d), "c"(msr) : "memory");
170 }
171
172 extern char rdmsr_start, rdmsr_end;
173 extern char wrmsr_start, wrmsr_end;
174
175 /*
176 * Note: Force test_em_rdmsr() to not be inlined to prevent the labels,
177 * rdmsr_start and rdmsr_end, from being defined multiple times.
178 */
test_em_rdmsr(uint32_t msr)179 static noinline uint64_t test_em_rdmsr(uint32_t msr)
180 {
181 uint32_t a, d;
182
183 guest_exception_count = 0;
184
185 __asm__ __volatile__(KVM_FEP "em_rdmsr_start: rdmsr; em_rdmsr_end:" :
186 "=a"(a), "=d"(d) : "c"(msr) : "memory");
187
188 return a | ((uint64_t) d << 32);
189 }
190
191 /*
192 * Note: Force test_em_wrmsr() to not be inlined to prevent the labels,
193 * wrmsr_start and wrmsr_end, from being defined multiple times.
194 */
test_em_wrmsr(uint32_t msr,uint64_t value)195 static noinline void test_em_wrmsr(uint32_t msr, uint64_t value)
196 {
197 uint32_t a = value;
198 uint32_t d = value >> 32;
199
200 guest_exception_count = 0;
201
202 __asm__ __volatile__(KVM_FEP "em_wrmsr_start: wrmsr; em_wrmsr_end:" ::
203 "a"(a), "d"(d), "c"(msr) : "memory");
204 }
205
206 extern char em_rdmsr_start, em_rdmsr_end;
207 extern char em_wrmsr_start, em_wrmsr_end;
208
guest_code_filter_allow(void)209 static void guest_code_filter_allow(void)
210 {
211 uint64_t data;
212
213 /*
214 * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_XSS.
215 *
216 * A GP is thrown if anything other than 0 is written to
217 * MSR_IA32_XSS.
218 */
219 data = test_rdmsr(MSR_IA32_XSS);
220 GUEST_ASSERT(data == 0);
221 GUEST_ASSERT(guest_exception_count == 0);
222
223 test_wrmsr(MSR_IA32_XSS, 0);
224 GUEST_ASSERT(guest_exception_count == 0);
225
226 test_wrmsr(MSR_IA32_XSS, 1);
227 GUEST_ASSERT(guest_exception_count == 1);
228
229 /*
230 * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_FLUSH_CMD.
231 *
232 * A GP is thrown if MSR_IA32_FLUSH_CMD is read
233 * from or if a value other than 1 is written to it.
234 */
235 test_rdmsr(MSR_IA32_FLUSH_CMD);
236 GUEST_ASSERT(guest_exception_count == 1);
237
238 test_wrmsr(MSR_IA32_FLUSH_CMD, 0);
239 GUEST_ASSERT(guest_exception_count == 1);
240
241 test_wrmsr(MSR_IA32_FLUSH_CMD, 1);
242 GUEST_ASSERT(guest_exception_count == 0);
243
244 /*
245 * Test userspace intercepting rdmsr / wrmsr for MSR_NON_EXISTENT.
246 *
247 * Test that a fabricated MSR can pass through the kernel
248 * and be handled in userspace.
249 */
250 test_wrmsr(MSR_NON_EXISTENT, 2);
251 GUEST_ASSERT(guest_exception_count == 0);
252
253 data = test_rdmsr(MSR_NON_EXISTENT);
254 GUEST_ASSERT(data == 2);
255 GUEST_ASSERT(guest_exception_count == 0);
256
257 if (is_forced_emulation_enabled) {
258 /* Let userspace know we aren't done. */
259 GUEST_SYNC(0);
260
261 /*
262 * Now run the same tests with the instruction emulator.
263 */
264 data = test_em_rdmsr(MSR_IA32_XSS);
265 GUEST_ASSERT(data == 0);
266 GUEST_ASSERT(guest_exception_count == 0);
267 test_em_wrmsr(MSR_IA32_XSS, 0);
268 GUEST_ASSERT(guest_exception_count == 0);
269 test_em_wrmsr(MSR_IA32_XSS, 1);
270 GUEST_ASSERT(guest_exception_count == 1);
271
272 test_em_rdmsr(MSR_IA32_FLUSH_CMD);
273 GUEST_ASSERT(guest_exception_count == 1);
274 test_em_wrmsr(MSR_IA32_FLUSH_CMD, 0);
275 GUEST_ASSERT(guest_exception_count == 1);
276 test_em_wrmsr(MSR_IA32_FLUSH_CMD, 1);
277 GUEST_ASSERT(guest_exception_count == 0);
278
279 test_em_wrmsr(MSR_NON_EXISTENT, 2);
280 GUEST_ASSERT(guest_exception_count == 0);
281 data = test_em_rdmsr(MSR_NON_EXISTENT);
282 GUEST_ASSERT(data == 2);
283 GUEST_ASSERT(guest_exception_count == 0);
284 }
285
286 GUEST_DONE();
287 }
288
guest_msr_calls(bool trapped)289 static void guest_msr_calls(bool trapped)
290 {
291 /* This goes into the in-kernel emulation */
292 wrmsr(MSR_SYSCALL_MASK, 0);
293
294 if (trapped) {
295 /* This goes into user space emulation */
296 GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) == MSR_SYSCALL_MASK);
297 GUEST_ASSERT(rdmsr(MSR_GS_BASE) == MSR_GS_BASE);
298 } else {
299 GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) != MSR_SYSCALL_MASK);
300 GUEST_ASSERT(rdmsr(MSR_GS_BASE) != MSR_GS_BASE);
301 }
302
303 /* If trapped == true, this goes into user space emulation */
304 wrmsr(MSR_IA32_POWER_CTL, 0x1234);
305
306 /* This goes into the in-kernel emulation */
307 rdmsr(MSR_IA32_POWER_CTL);
308
309 /* Invalid MSR, should always be handled by user space exit */
310 GUEST_ASSERT(rdmsr(0xdeadbeef) == 0xdeadbeef);
311 wrmsr(0xdeadbeef, 0x1234);
312 }
313
guest_code_filter_deny(void)314 static void guest_code_filter_deny(void)
315 {
316 guest_msr_calls(true);
317
318 /*
319 * Disable msr filtering, so that the kernel
320 * handles everything in the next round
321 */
322 GUEST_SYNC(0);
323
324 guest_msr_calls(false);
325
326 GUEST_DONE();
327 }
328
guest_code_permission_bitmap(void)329 static void guest_code_permission_bitmap(void)
330 {
331 uint64_t data;
332
333 data = test_rdmsr(MSR_FS_BASE);
334 GUEST_ASSERT(data == MSR_FS_BASE);
335 data = test_rdmsr(MSR_GS_BASE);
336 GUEST_ASSERT(data != MSR_GS_BASE);
337
338 /* Let userspace know to switch the filter */
339 GUEST_SYNC(0);
340
341 data = test_rdmsr(MSR_FS_BASE);
342 GUEST_ASSERT(data != MSR_FS_BASE);
343 data = test_rdmsr(MSR_GS_BASE);
344 GUEST_ASSERT(data == MSR_GS_BASE);
345
346 GUEST_DONE();
347 }
348
__guest_gp_handler(struct ex_regs * regs,char * r_start,char * r_end,char * w_start,char * w_end)349 static void __guest_gp_handler(struct ex_regs *regs,
350 char *r_start, char *r_end,
351 char *w_start, char *w_end)
352 {
353 if (regs->rip == (uintptr_t)r_start) {
354 regs->rip = (uintptr_t)r_end;
355 regs->rax = 0;
356 regs->rdx = 0;
357 } else if (regs->rip == (uintptr_t)w_start) {
358 regs->rip = (uintptr_t)w_end;
359 } else {
360 GUEST_ASSERT(!"RIP is at an unknown location!");
361 }
362
363 ++guest_exception_count;
364 }
365
guest_gp_handler(struct ex_regs * regs)366 static void guest_gp_handler(struct ex_regs *regs)
367 {
368 __guest_gp_handler(regs, &rdmsr_start, &rdmsr_end,
369 &wrmsr_start, &wrmsr_end);
370 }
371
guest_fep_gp_handler(struct ex_regs * regs)372 static void guest_fep_gp_handler(struct ex_regs *regs)
373 {
374 __guest_gp_handler(regs, &em_rdmsr_start, &em_rdmsr_end,
375 &em_wrmsr_start, &em_wrmsr_end);
376 }
377
check_for_guest_assert(struct kvm_vcpu * vcpu)378 static void check_for_guest_assert(struct kvm_vcpu *vcpu)
379 {
380 struct ucall uc;
381
382 if (vcpu->run->exit_reason == KVM_EXIT_IO &&
383 get_ucall(vcpu, &uc) == UCALL_ABORT) {
384 REPORT_GUEST_ASSERT(uc);
385 }
386 }
387
process_rdmsr(struct kvm_vcpu * vcpu,uint32_t msr_index)388 static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
389 {
390 struct kvm_run *run = vcpu->run;
391
392 check_for_guest_assert(vcpu);
393
394 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_RDMSR);
395 TEST_ASSERT(run->msr.index == msr_index,
396 "Unexpected msr (0x%04x), expected 0x%04x",
397 run->msr.index, msr_index);
398
399 switch (run->msr.index) {
400 case MSR_IA32_XSS:
401 run->msr.data = 0;
402 break;
403 case MSR_IA32_FLUSH_CMD:
404 run->msr.error = 1;
405 break;
406 case MSR_NON_EXISTENT:
407 run->msr.data = msr_non_existent_data;
408 break;
409 case MSR_FS_BASE:
410 run->msr.data = MSR_FS_BASE;
411 break;
412 case MSR_GS_BASE:
413 run->msr.data = MSR_GS_BASE;
414 break;
415 default:
416 TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
417 }
418 }
419
process_wrmsr(struct kvm_vcpu * vcpu,uint32_t msr_index)420 static void process_wrmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
421 {
422 struct kvm_run *run = vcpu->run;
423
424 check_for_guest_assert(vcpu);
425
426 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_WRMSR);
427 TEST_ASSERT(run->msr.index == msr_index,
428 "Unexpected msr (0x%04x), expected 0x%04x",
429 run->msr.index, msr_index);
430
431 switch (run->msr.index) {
432 case MSR_IA32_XSS:
433 if (run->msr.data != 0)
434 run->msr.error = 1;
435 break;
436 case MSR_IA32_FLUSH_CMD:
437 if (run->msr.data != 1)
438 run->msr.error = 1;
439 break;
440 case MSR_NON_EXISTENT:
441 msr_non_existent_data = run->msr.data;
442 break;
443 default:
444 TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
445 }
446 }
447
process_ucall_done(struct kvm_vcpu * vcpu)448 static void process_ucall_done(struct kvm_vcpu *vcpu)
449 {
450 struct ucall uc;
451
452 check_for_guest_assert(vcpu);
453
454 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
455
456 TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_DONE,
457 "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
458 uc.cmd, UCALL_DONE);
459 }
460
process_ucall(struct kvm_vcpu * vcpu)461 static uint64_t process_ucall(struct kvm_vcpu *vcpu)
462 {
463 struct ucall uc = {};
464
465 check_for_guest_assert(vcpu);
466
467 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
468
469 switch (get_ucall(vcpu, &uc)) {
470 case UCALL_SYNC:
471 break;
472 case UCALL_ABORT:
473 check_for_guest_assert(vcpu);
474 break;
475 case UCALL_DONE:
476 process_ucall_done(vcpu);
477 break;
478 default:
479 TEST_ASSERT(false, "Unexpected ucall");
480 }
481
482 return uc.cmd;
483 }
484
run_guest_then_process_rdmsr(struct kvm_vcpu * vcpu,uint32_t msr_index)485 static void run_guest_then_process_rdmsr(struct kvm_vcpu *vcpu,
486 uint32_t msr_index)
487 {
488 vcpu_run(vcpu);
489 process_rdmsr(vcpu, msr_index);
490 }
491
run_guest_then_process_wrmsr(struct kvm_vcpu * vcpu,uint32_t msr_index)492 static void run_guest_then_process_wrmsr(struct kvm_vcpu *vcpu,
493 uint32_t msr_index)
494 {
495 vcpu_run(vcpu);
496 process_wrmsr(vcpu, msr_index);
497 }
498
run_guest_then_process_ucall(struct kvm_vcpu * vcpu)499 static uint64_t run_guest_then_process_ucall(struct kvm_vcpu *vcpu)
500 {
501 vcpu_run(vcpu);
502 return process_ucall(vcpu);
503 }
504
run_guest_then_process_ucall_done(struct kvm_vcpu * vcpu)505 static void run_guest_then_process_ucall_done(struct kvm_vcpu *vcpu)
506 {
507 vcpu_run(vcpu);
508 process_ucall_done(vcpu);
509 }
510
511 KVM_ONE_VCPU_TEST_SUITE(user_msr);
512
KVM_ONE_VCPU_TEST(user_msr,msr_filter_allow,guest_code_filter_allow)513 KVM_ONE_VCPU_TEST(user_msr, msr_filter_allow, guest_code_filter_allow)
514 {
515 struct kvm_vm *vm = vcpu->vm;
516 uint64_t cmd;
517 int rc;
518
519 rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
520 TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
521 vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER);
522
523 rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
524 TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
525
526 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow);
527
528 vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
529
530 /* Process guest code userspace exits. */
531 run_guest_then_process_rdmsr(vcpu, MSR_IA32_XSS);
532 run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
533 run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
534
535 run_guest_then_process_rdmsr(vcpu, MSR_IA32_FLUSH_CMD);
536 run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
537 run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
538
539 run_guest_then_process_wrmsr(vcpu, MSR_NON_EXISTENT);
540 run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT);
541
542 vcpu_run(vcpu);
543 cmd = process_ucall(vcpu);
544
545 if (is_forced_emulation_enabled) {
546 TEST_ASSERT_EQ(cmd, UCALL_SYNC);
547 vm_install_exception_handler(vm, GP_VECTOR, guest_fep_gp_handler);
548
549 /* Process emulated rdmsr and wrmsr instructions. */
550 run_guest_then_process_rdmsr(vcpu, MSR_IA32_XSS);
551 run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
552 run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
553
554 run_guest_then_process_rdmsr(vcpu, MSR_IA32_FLUSH_CMD);
555 run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
556 run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
557
558 run_guest_then_process_wrmsr(vcpu, MSR_NON_EXISTENT);
559 run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT);
560
561 /* Confirm the guest completed without issues. */
562 run_guest_then_process_ucall_done(vcpu);
563 } else {
564 TEST_ASSERT_EQ(cmd, UCALL_DONE);
565 printf("To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1'\n");
566 }
567 }
568
handle_ucall(struct kvm_vcpu * vcpu)569 static int handle_ucall(struct kvm_vcpu *vcpu)
570 {
571 struct ucall uc;
572
573 switch (get_ucall(vcpu, &uc)) {
574 case UCALL_ABORT:
575 REPORT_GUEST_ASSERT(uc);
576 break;
577 case UCALL_SYNC:
578 vm_ioctl(vcpu->vm, KVM_X86_SET_MSR_FILTER, &no_filter_deny);
579 break;
580 case UCALL_DONE:
581 return 1;
582 default:
583 TEST_FAIL("Unknown ucall %lu", uc.cmd);
584 }
585
586 return 0;
587 }
588
handle_rdmsr(struct kvm_run * run)589 static void handle_rdmsr(struct kvm_run *run)
590 {
591 run->msr.data = run->msr.index;
592 msr_reads++;
593
594 if (run->msr.index == MSR_SYSCALL_MASK ||
595 run->msr.index == MSR_GS_BASE) {
596 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
597 "MSR read trap w/o access fault");
598 }
599
600 if (run->msr.index == 0xdeadbeef) {
601 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
602 "MSR deadbeef read trap w/o inval fault");
603 }
604 }
605
handle_wrmsr(struct kvm_run * run)606 static void handle_wrmsr(struct kvm_run *run)
607 {
608 /* ignore */
609 msr_writes++;
610
611 if (run->msr.index == MSR_IA32_POWER_CTL) {
612 TEST_ASSERT(run->msr.data == 0x1234,
613 "MSR data for MSR_IA32_POWER_CTL incorrect");
614 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
615 "MSR_IA32_POWER_CTL trap w/o access fault");
616 }
617
618 if (run->msr.index == 0xdeadbeef) {
619 TEST_ASSERT(run->msr.data == 0x1234,
620 "MSR data for deadbeef incorrect");
621 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
622 "deadbeef trap w/o inval fault");
623 }
624 }
625
KVM_ONE_VCPU_TEST(user_msr,msr_filter_deny,guest_code_filter_deny)626 KVM_ONE_VCPU_TEST(user_msr, msr_filter_deny, guest_code_filter_deny)
627 {
628 struct kvm_vm *vm = vcpu->vm;
629 struct kvm_run *run = vcpu->run;
630 int rc;
631
632 rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
633 TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
634 vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_INVAL |
635 KVM_MSR_EXIT_REASON_UNKNOWN |
636 KVM_MSR_EXIT_REASON_FILTER);
637
638 rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
639 TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
640
641 prepare_bitmaps();
642 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_deny);
643
644 while (1) {
645 vcpu_run(vcpu);
646
647 switch (run->exit_reason) {
648 case KVM_EXIT_X86_RDMSR:
649 handle_rdmsr(run);
650 break;
651 case KVM_EXIT_X86_WRMSR:
652 handle_wrmsr(run);
653 break;
654 case KVM_EXIT_IO:
655 if (handle_ucall(vcpu))
656 goto done;
657 break;
658 }
659
660 }
661
662 done:
663 TEST_ASSERT(msr_reads == 4, "Handled 4 rdmsr in user space");
664 TEST_ASSERT(msr_writes == 3, "Handled 3 wrmsr in user space");
665 }
666
KVM_ONE_VCPU_TEST(user_msr,msr_permission_bitmap,guest_code_permission_bitmap)667 KVM_ONE_VCPU_TEST(user_msr, msr_permission_bitmap, guest_code_permission_bitmap)
668 {
669 struct kvm_vm *vm = vcpu->vm;
670 int rc;
671
672 rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
673 TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
674 vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER);
675
676 rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
677 TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
678
679 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_fs);
680 run_guest_then_process_rdmsr(vcpu, MSR_FS_BASE);
681 TEST_ASSERT(run_guest_then_process_ucall(vcpu) == UCALL_SYNC,
682 "Expected ucall state to be UCALL_SYNC.");
683 vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_gs);
684 run_guest_then_process_rdmsr(vcpu, MSR_GS_BASE);
685 run_guest_then_process_ucall_done(vcpu);
686 }
687
688 #define test_user_exit_msr_ioctl(vm, cmd, arg, flag, valid_mask) \
689 ({ \
690 int r = __vm_ioctl(vm, cmd, arg); \
691 \
692 if (flag & valid_mask) \
693 TEST_ASSERT(!r, __KVM_IOCTL_ERROR(#cmd, r)); \
694 else \
695 TEST_ASSERT(r == -1 && errno == EINVAL, \
696 "Wanted EINVAL for %s with flag = 0x%llx, got rc: %i errno: %i (%s)", \
697 #cmd, flag, r, errno, strerror(errno)); \
698 })
699
run_user_space_msr_flag_test(struct kvm_vm * vm)700 static void run_user_space_msr_flag_test(struct kvm_vm *vm)
701 {
702 struct kvm_enable_cap cap = { .cap = KVM_CAP_X86_USER_SPACE_MSR };
703 int nflags = sizeof(cap.args[0]) * BITS_PER_BYTE;
704 int rc;
705 int i;
706
707 rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
708 TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
709
710 for (i = 0; i < nflags; i++) {
711 cap.args[0] = BIT_ULL(i);
712 test_user_exit_msr_ioctl(vm, KVM_ENABLE_CAP, &cap,
713 BIT_ULL(i), KVM_MSR_EXIT_REASON_VALID_MASK);
714 }
715 }
716
run_msr_filter_flag_test(struct kvm_vm * vm)717 static void run_msr_filter_flag_test(struct kvm_vm *vm)
718 {
719 u64 deny_bits = 0;
720 struct kvm_msr_filter filter = {
721 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
722 .ranges = {
723 {
724 .flags = KVM_MSR_FILTER_READ,
725 .nmsrs = 1,
726 .base = 0,
727 .bitmap = (uint8_t *)&deny_bits,
728 },
729 },
730 };
731 int nflags;
732 int rc;
733 int i;
734
735 rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
736 TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
737
738 nflags = sizeof(filter.flags) * BITS_PER_BYTE;
739 for (i = 0; i < nflags; i++) {
740 filter.flags = BIT_ULL(i);
741 test_user_exit_msr_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter,
742 BIT_ULL(i), KVM_MSR_FILTER_VALID_MASK);
743 }
744
745 filter.flags = KVM_MSR_FILTER_DEFAULT_ALLOW;
746 nflags = sizeof(filter.ranges[0].flags) * BITS_PER_BYTE;
747 for (i = 0; i < nflags; i++) {
748 filter.ranges[0].flags = BIT_ULL(i);
749 test_user_exit_msr_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter,
750 BIT_ULL(i), KVM_MSR_FILTER_RANGE_VALID_MASK);
751 }
752 }
753
754 /* Test that attempts to write to the unused bits in a flag fails. */
KVM_ONE_VCPU_TEST(user_msr,user_exit_msr_flags,NULL)755 KVM_ONE_VCPU_TEST(user_msr, user_exit_msr_flags, NULL)
756 {
757 struct kvm_vm *vm = vcpu->vm;
758
759 /* Test flags for KVM_CAP_X86_USER_SPACE_MSR. */
760 run_user_space_msr_flag_test(vm);
761
762 /* Test flags and range flags for KVM_X86_SET_MSR_FILTER. */
763 run_msr_filter_flag_test(vm);
764 }
765
main(int argc,char * argv[])766 int main(int argc, char *argv[])
767 {
768 return test_harness_run(argc, argv);
769 }
770