1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2025, Google LLC.
4 *
5 * This test verifies that L1 fails to enter L2 with an invalid CR3, and
6 * succeeds otherwise.
7 */
8 #include "kvm_util.h"
9 #include "vmx.h"
10 #include "svm_util.h"
11 #include "kselftest.h"
12
13
14 #define L2_GUEST_STACK_SIZE 64
15
l2_guest_code(void)16 static void l2_guest_code(void)
17 {
18 vmcall();
19 }
20
l1_svm_code(struct svm_test_data * svm)21 static void l1_svm_code(struct svm_test_data *svm)
22 {
23 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
24 uintptr_t save_cr3;
25
26 generic_svm_setup(svm, l2_guest_code,
27 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
28
29 /* Try to run L2 with invalid CR3 and make sure it fails */
30 save_cr3 = svm->vmcb->save.cr3;
31 svm->vmcb->save.cr3 = -1ull;
32 run_guest(svm->vmcb, svm->vmcb_gpa);
33 GUEST_ASSERT(svm->vmcb->control.exit_code == SVM_EXIT_ERR);
34
35 /* Now restore CR3 and make sure L2 runs successfully */
36 svm->vmcb->save.cr3 = save_cr3;
37 run_guest(svm->vmcb, svm->vmcb_gpa);
38 GUEST_ASSERT(svm->vmcb->control.exit_code == SVM_EXIT_VMMCALL);
39
40 GUEST_DONE();
41 }
42
l1_vmx_code(struct vmx_pages * vmx_pages)43 static void l1_vmx_code(struct vmx_pages *vmx_pages)
44 {
45 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
46 uintptr_t save_cr3;
47
48 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
49 GUEST_ASSERT(load_vmcs(vmx_pages));
50
51 prepare_vmcs(vmx_pages, l2_guest_code,
52 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
53
54 /* Try to run L2 with invalid CR3 and make sure it fails */
55 save_cr3 = vmreadz(GUEST_CR3);
56 vmwrite(GUEST_CR3, -1ull);
57 GUEST_ASSERT(!vmlaunch());
58 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
59 (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
60
61 /* Now restore CR3 and make sure L2 runs successfully */
62 vmwrite(GUEST_CR3, save_cr3);
63 GUEST_ASSERT(!vmlaunch());
64 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
65
66 GUEST_DONE();
67 }
68
l1_guest_code(void * data)69 static void l1_guest_code(void *data)
70 {
71 if (this_cpu_has(X86_FEATURE_VMX))
72 l1_vmx_code(data);
73 else
74 l1_svm_code(data);
75 }
76
main(int argc,char * argv[])77 int main(int argc, char *argv[])
78 {
79 struct kvm_vcpu *vcpu;
80 struct kvm_vm *vm;
81 vm_vaddr_t guest_gva = 0;
82
83 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) ||
84 kvm_cpu_has(X86_FEATURE_SVM));
85
86 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
87
88 if (kvm_cpu_has(X86_FEATURE_VMX))
89 vcpu_alloc_vmx(vm, &guest_gva);
90 else
91 vcpu_alloc_svm(vm, &guest_gva);
92
93 vcpu_args_set(vcpu, 1, guest_gva);
94
95 for (;;) {
96 struct ucall uc;
97
98 vcpu_run(vcpu);
99 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
100
101 switch (get_ucall(vcpu, &uc)) {
102 case UCALL_ABORT:
103 REPORT_GUEST_ASSERT(uc);
104 case UCALL_SYNC:
105 break;
106 case UCALL_DONE:
107 goto done;
108 default:
109 TEST_FAIL("Unknown ucall %lu", uc.cmd);
110 }
111 }
112
113 done:
114 kvm_vm_free(vm);
115 return 0;
116 }
117