xref: /linux/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2026, Google LLC.
4  */
5 #include "kvm_util.h"
6 #include "vmx.h"
7 #include "svm_util.h"
8 #include "kselftest.h"
9 
10 /*
11  * Allocate two VMCB pages for testing. Both pages have different GVAs (shared
12  * by both L1 and L2) and L1 GPAs. A single L2 GPA is used such that:
13  * - L2 GPA == L1 GPA for VMCB0.
14  * - L2 GPA is mapped to L1 GPA for VMCB1 using NPT in L1.
15  *
16  * This allows testing whether the GPA used by VMSAVE/VMLOAD in L2 is
17  * interpreted as a direct L1 GPA or translated using NPT as an L2 GPA, depends
18  * on which VMCB is accessed.
19  */
20 #define TEST_MEM_SLOT_INDEX		1
21 #define TEST_MEM_PAGES			2
22 #define TEST_MEM_BASE			0xc0000000
23 
24 #define TEST_GUEST_ADDR(idx)		(TEST_MEM_BASE + (idx) * PAGE_SIZE)
25 
26 #define TEST_VMCB_L1_GPA(idx)		TEST_GUEST_ADDR(idx)
27 #define TEST_VMCB_GVA(idx)		TEST_GUEST_ADDR(idx)
28 
29 #define TEST_VMCB_L2_GPA		TEST_VMCB_L1_GPA(0)
30 
31 #define L2_GUEST_STACK_SIZE		64
32 
33 static void l2_guest_code_vmsave(void)
34 {
35 	asm volatile("vmsave %0" : : "a"(TEST_VMCB_L2_GPA) : "memory");
36 }
37 
38 static void l2_guest_code_vmload(void)
39 {
40 	asm volatile("vmload %0" : : "a"(TEST_VMCB_L2_GPA) : "memory");
41 }
42 
43 static void l2_guest_code_vmcb(int vmcb_idx)
44 {
45 	wrmsr(MSR_KERNEL_GS_BASE, 0xaaaa);
46 	l2_guest_code_vmsave();
47 
48 	/* Verify the VMCB used by VMSAVE and update KERNEL_GS_BASE to 0xbbbb */
49 	GUEST_SYNC(vmcb_idx);
50 
51 	l2_guest_code_vmload();
52 	GUEST_ASSERT_EQ(rdmsr(MSR_KERNEL_GS_BASE), 0xbbbb);
53 
54 	/* Reset MSR_KERNEL_GS_BASE */
55 	wrmsr(MSR_KERNEL_GS_BASE, 0);
56 	l2_guest_code_vmsave();
57 
58 	vmmcall();
59 }
60 
61 static void l2_guest_code_vmcb0(void)
62 {
63 	l2_guest_code_vmcb(0);
64 }
65 
66 static void l2_guest_code_vmcb1(void)
67 {
68 	l2_guest_code_vmcb(1);
69 }
70 
71 static void l1_guest_code(struct svm_test_data *svm)
72 {
73 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
74 
75 	/* Each test case initializes the guest RIP below */
76 	generic_svm_setup(svm, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
77 
78 	/* Set VMSAVE/VMLOAD intercepts and make sure they work with.. */
79 	svm->vmcb->control.intercept |= (BIT_ULL(INTERCEPT_VMSAVE) |
80 					 BIT_ULL(INTERCEPT_VMLOAD));
81 
82 	 /* ..VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK cleared.. */
83 	svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
84 
85 	svm->vmcb->save.rip = (u64)l2_guest_code_vmsave;
86 	run_guest(svm->vmcb, svm->vmcb_gpa);
87 	GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMSAVE);
88 
89 	svm->vmcb->save.rip = (u64)l2_guest_code_vmload;
90 	run_guest(svm->vmcb, svm->vmcb_gpa);
91 	GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMLOAD);
92 
93 	/* ..and VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK set */
94 	svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
95 
96 	svm->vmcb->save.rip = (u64)l2_guest_code_vmsave;
97 	run_guest(svm->vmcb, svm->vmcb_gpa);
98 	GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMSAVE);
99 
100 	svm->vmcb->save.rip = (u64)l2_guest_code_vmload;
101 	run_guest(svm->vmcb, svm->vmcb_gpa);
102 	GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMLOAD);
103 
104 	/* Now clear the intercepts to test VMSAVE/VMLOAD behavior */
105 	svm->vmcb->control.intercept &= ~(BIT_ULL(INTERCEPT_VMSAVE) |
106 					  BIT_ULL(INTERCEPT_VMLOAD));
107 
108 	/*
109 	 * Without VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK, the GPA will be
110 	 * interpreted as an L1 GPA, so VMCB0 should be used.
111 	 */
112 	svm->vmcb->save.rip = (u64)l2_guest_code_vmcb0;
113 	svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
114 	run_guest(svm->vmcb, svm->vmcb_gpa);
115 	GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMMCALL);
116 
117 	/*
118 	 * With VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK, the GPA will be interpeted as
119 	 * an L2 GPA, and translated through the NPT to VMCB1.
120 	 */
121 	svm->vmcb->save.rip = (u64)l2_guest_code_vmcb1;
122 	svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
123 	run_guest(svm->vmcb, svm->vmcb_gpa);
124 	GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMMCALL);
125 
126 	GUEST_DONE();
127 }
128 
129 int main(int argc, char *argv[])
130 {
131 	vm_vaddr_t nested_gva = 0;
132 	struct vmcb *test_vmcb[2];
133 	struct kvm_vcpu *vcpu;
134 	struct kvm_vm *vm;
135 	int i;
136 
137 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
138 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_NPT));
139 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD));
140 
141 	vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
142 	vm_enable_tdp(vm);
143 
144 	vcpu_alloc_svm(vm, &nested_gva);
145 	vcpu_args_set(vcpu, 1, nested_gva);
146 
147 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
148 				    TEST_MEM_BASE, TEST_MEM_SLOT_INDEX,
149 				    TEST_MEM_PAGES, 0);
150 
151 	for (i = 0; i <= 1; i++) {
152 		virt_map(vm, TEST_VMCB_GVA(i), TEST_VMCB_L1_GPA(i), 1);
153 		test_vmcb[i] = (struct vmcb *)addr_gva2hva(vm, TEST_VMCB_GVA(i));
154 	}
155 
156 	tdp_identity_map_default_memslots(vm);
157 
158 	/*
159 	 * L2 GPA == L1_GPA(0), but map it to L1_GPA(1), to allow testing
160 	 * whether the L2 GPA is interpreted as an L1 GPA or translated through
161 	 * the NPT.
162 	 */
163 	TEST_ASSERT_EQ(TEST_VMCB_L2_GPA, TEST_VMCB_L1_GPA(0));
164 	tdp_map(vm, TEST_VMCB_L2_GPA, TEST_VMCB_L1_GPA(1), PAGE_SIZE);
165 
166 	for (;;) {
167 		struct ucall uc;
168 
169 		vcpu_run(vcpu);
170 		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
171 
172 		switch (get_ucall(vcpu, &uc)) {
173 		case UCALL_ABORT:
174 			REPORT_GUEST_ASSERT(uc);
175 		case UCALL_SYNC:
176 			i = uc.args[1];
177 			TEST_ASSERT(i == 0 || i == 1, "Unexpected VMCB idx: %d", i);
178 
179 			/*
180 			 * Check that only the expected VMCB has KERNEL_GS_BASE
181 			 * set to 0xaaaa, and update it to 0xbbbb.
182 			 */
183 			TEST_ASSERT_EQ(test_vmcb[i]->save.kernel_gs_base, 0xaaaa);
184 			TEST_ASSERT_EQ(test_vmcb[1-i]->save.kernel_gs_base, 0);
185 			test_vmcb[i]->save.kernel_gs_base = 0xbbbb;
186 			break;
187 		case UCALL_DONE:
188 			goto done;
189 		default:
190 			TEST_FAIL("Unknown ucall %lu", uc.cmd);
191 		}
192 	}
193 
194 done:
195 	kvm_vm_free(vm);
196 	return 0;
197 }
198