xref: /linux/tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c (revision 01f492e1817e858d1712f2489d0afbaa552f417b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2026, Google LLC.
4  */
5 #include "kvm_util.h"
6 #include "vmx.h"
7 #include "svm_util.h"
8 #include "kselftest.h"
9 #include "kvm_test_harness.h"
10 #include "test_util.h"
11 
12 
13 #define L2_GUEST_STACK_SIZE 64
14 
15 #define SYNC_GP 101
16 #define SYNC_L2_STARTED 102
17 
18 static unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
19 
20 static void guest_gp_handler(struct ex_regs *regs)
21 {
22 	GUEST_SYNC(SYNC_GP);
23 }
24 
25 static void l2_code(void)
26 {
27 	GUEST_SYNC(SYNC_L2_STARTED);
28 	vmcall();
29 }
30 
31 static void l1_vmrun(struct svm_test_data *svm, u64 gpa)
32 {
33 	generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
34 
35 	asm volatile ("vmrun %[gpa]" : : [gpa] "a" (gpa) : "memory");
36 }
37 
38 static void l1_vmload(struct svm_test_data *svm, u64 gpa)
39 {
40 	generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
41 
42 	asm volatile ("vmload %[gpa]" : : [gpa] "a" (gpa) : "memory");
43 }
44 
45 static void l1_vmsave(struct svm_test_data *svm, u64 gpa)
46 {
47 	generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
48 
49 	asm volatile ("vmsave %[gpa]" : : [gpa] "a" (gpa) : "memory");
50 }
51 
52 static void l1_vmexit(struct svm_test_data *svm, u64 gpa)
53 {
54 	generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
55 
56 	run_guest(svm->vmcb, svm->vmcb_gpa);
57 	GUEST_ASSERT(svm->vmcb->control.exit_code == SVM_EXIT_VMMCALL);
58 	GUEST_DONE();
59 }
60 
61 static u64 unmappable_gpa(struct kvm_vcpu *vcpu)
62 {
63 	struct userspace_mem_region *region;
64 	u64 region_gpa_end, vm_gpa_end = 0;
65 	int i;
66 
67 	hash_for_each(vcpu->vm->regions.slot_hash, i, region, slot_node) {
68 		region_gpa_end = region->region.guest_phys_addr + region->region.memory_size;
69 		vm_gpa_end = max(vm_gpa_end, region_gpa_end);
70 	}
71 
72 	return vm_gpa_end;
73 }
74 
75 static void test_invalid_vmcb12(struct kvm_vcpu *vcpu)
76 {
77 	vm_vaddr_t nested_gva = 0;
78 	struct ucall uc;
79 
80 
81 	vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
82 	vcpu_alloc_svm(vcpu->vm, &nested_gva);
83 	vcpu_args_set(vcpu, 2, nested_gva, -1ULL);
84 	vcpu_run(vcpu);
85 
86 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
87 	TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
88 	TEST_ASSERT_EQ(uc.args[1], SYNC_GP);
89 }
90 
91 static void test_unmappable_vmcb12(struct kvm_vcpu *vcpu)
92 {
93 	vm_vaddr_t nested_gva = 0;
94 
95 	vcpu_alloc_svm(vcpu->vm, &nested_gva);
96 	vcpu_args_set(vcpu, 2, nested_gva, unmappable_gpa(vcpu));
97 	vcpu_run(vcpu);
98 
99 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR);
100 	TEST_ASSERT_EQ(vcpu->run->emulation_failure.suberror, KVM_INTERNAL_ERROR_EMULATION);
101 }
102 
103 static void test_unmappable_vmcb12_vmexit(struct kvm_vcpu *vcpu)
104 {
105 	struct kvm_x86_state *state;
106 	vm_vaddr_t nested_gva = 0;
107 	struct ucall uc;
108 
109 	/*
110 	 * Enter L2 (with a legit vmcb12 GPA), then overwrite vmcb12 GPA with an
111 	 * unmappable GPA. KVM will fail to map vmcb12 on nested VM-Exit and
112 	 * cause a shutdown.
113 	 */
114 	vcpu_alloc_svm(vcpu->vm, &nested_gva);
115 	vcpu_args_set(vcpu, 2, nested_gva, unmappable_gpa(vcpu));
116 	vcpu_run(vcpu);
117 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
118 	TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
119 	TEST_ASSERT_EQ(uc.args[1], SYNC_L2_STARTED);
120 
121 	state = vcpu_save_state(vcpu);
122 	state->nested.hdr.svm.vmcb_pa = unmappable_gpa(vcpu);
123 	vcpu_load_state(vcpu, state);
124 	vcpu_run(vcpu);
125 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN);
126 
127 	kvm_x86_state_cleanup(state);
128 }
129 
130 KVM_ONE_VCPU_TEST_SUITE(vmcb12_gpa);
131 
132 KVM_ONE_VCPU_TEST(vmcb12_gpa, vmrun_invalid, l1_vmrun)
133 {
134 	test_invalid_vmcb12(vcpu);
135 }
136 
137 KVM_ONE_VCPU_TEST(vmcb12_gpa, vmload_invalid, l1_vmload)
138 {
139 	test_invalid_vmcb12(vcpu);
140 }
141 
142 KVM_ONE_VCPU_TEST(vmcb12_gpa, vmsave_invalid, l1_vmsave)
143 {
144 	test_invalid_vmcb12(vcpu);
145 }
146 
147 KVM_ONE_VCPU_TEST(vmcb12_gpa, vmrun_unmappable, l1_vmrun)
148 {
149 	test_unmappable_vmcb12(vcpu);
150 }
151 
152 KVM_ONE_VCPU_TEST(vmcb12_gpa, vmload_unmappable, l1_vmload)
153 {
154 	test_unmappable_vmcb12(vcpu);
155 }
156 
157 KVM_ONE_VCPU_TEST(vmcb12_gpa, vmsave_unmappable, l1_vmsave)
158 {
159 	test_unmappable_vmcb12(vcpu);
160 }
161 
162 /*
163  * Invalid vmcb12_gpa cannot be test for #VMEXIT as KVM_SET_NESTED_STATE will
164  * reject it.
165  */
166 KVM_ONE_VCPU_TEST(vmcb12_gpa, vmexit_unmappable, l1_vmexit)
167 {
168 	test_unmappable_vmcb12_vmexit(vcpu);
169 }
170 
171 int main(int argc, char *argv[])
172 {
173 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
174 
175 	return test_harness_run(argc, argv);
176 }
177