1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <fcntl.h>
3 #include <stdio.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <sys/ioctl.h>
7 #include <math.h>
8
9 #include "test_util.h"
10 #include "kvm_util.h"
11 #include "processor.h"
12 #include "svm_util.h"
13 #include "linux/psp-sev.h"
14 #include "sev.h"
15
guest_sev_test_msr(uint32_t msr)16 static void guest_sev_test_msr(uint32_t msr)
17 {
18 uint64_t val = rdmsr(msr);
19
20 wrmsr(msr, val);
21 GUEST_ASSERT(val == rdmsr(msr));
22 }
23
24 #define guest_sev_test_reg(reg) \
25 do { \
26 uint64_t val = get_##reg(); \
27 \
28 set_##reg(val); \
29 GUEST_ASSERT(val == get_##reg()); \
30 } while (0)
31
guest_sev_test_regs(void)32 static void guest_sev_test_regs(void)
33 {
34 guest_sev_test_msr(MSR_EFER);
35 guest_sev_test_reg(cr0);
36 guest_sev_test_reg(cr3);
37 guest_sev_test_reg(cr4);
38 guest_sev_test_reg(cr8);
39 }
40
41 #define XFEATURE_MASK_X87_AVX (XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM)
42
guest_snp_code(void)43 static void guest_snp_code(void)
44 {
45 uint64_t sev_msr = rdmsr(MSR_AMD64_SEV);
46
47 GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ENABLED);
48 GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ES_ENABLED);
49 GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_SNP_ENABLED);
50
51 guest_sev_test_regs();
52
53 wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ);
54 vmgexit();
55 }
56
guest_sev_es_code(void)57 static void guest_sev_es_code(void)
58 {
59 /* TODO: Check CPUID after GHCB-based hypercall support is added. */
60 GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
61 GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ES_ENABLED);
62
63 guest_sev_test_regs();
64
65 /*
66 * TODO: Add GHCB and ucall support for SEV-ES guests. For now, simply
67 * force "termination" to signal "done" via the GHCB MSR protocol.
68 */
69 wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ);
70 vmgexit();
71 }
72
guest_sev_code(void)73 static void guest_sev_code(void)
74 {
75 GUEST_ASSERT(this_cpu_has(X86_FEATURE_SEV));
76 GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
77
78 guest_sev_test_regs();
79
80 GUEST_DONE();
81 }
82
83 /* Stash state passed via VMSA before any compiled code runs. */
84 extern void guest_code_xsave(void);
85 asm("guest_code_xsave:\n"
86 "mov $" __stringify(XFEATURE_MASK_X87_AVX) ", %eax\n"
87 "xor %edx, %edx\n"
88 "xsave (%rdi)\n"
89 "jmp guest_sev_es_code");
90
compare_xsave(u8 * from_host,u8 * from_guest)91 static void compare_xsave(u8 *from_host, u8 *from_guest)
92 {
93 int i;
94 bool bad = false;
95 for (i = 0; i < 4095; i++) {
96 if (from_host[i] != from_guest[i]) {
97 printf("mismatch at %u | %02hhx %02hhx\n",
98 i, from_host[i], from_guest[i]);
99 bad = true;
100 }
101 }
102
103 if (bad)
104 abort();
105 }
106
test_sync_vmsa(uint32_t type,uint64_t policy)107 static void test_sync_vmsa(uint32_t type, uint64_t policy)
108 {
109 struct kvm_vcpu *vcpu;
110 struct kvm_vm *vm;
111 vm_vaddr_t gva;
112 void *hva;
113
114 double x87val = M_PI;
115 struct kvm_xsave __attribute__((aligned(64))) xsave = { 0 };
116
117 vm = vm_sev_create_with_one_vcpu(type, guest_code_xsave, &vcpu);
118 gva = vm_vaddr_alloc_shared(vm, PAGE_SIZE, KVM_UTIL_MIN_VADDR,
119 MEM_REGION_TEST_DATA);
120 hva = addr_gva2hva(vm, gva);
121
122 vcpu_args_set(vcpu, 1, gva);
123
124 asm("fninit\n"
125 "vpcmpeqb %%ymm4, %%ymm4, %%ymm4\n"
126 "fldl %3\n"
127 "xsave (%2)\n"
128 "fstp %%st\n"
129 : "=m"(xsave)
130 : "A"(XFEATURE_MASK_X87_AVX), "r"(&xsave), "m" (x87val)
131 : "ymm4", "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)");
132 vcpu_xsave_set(vcpu, &xsave);
133
134 vm_sev_launch(vm, policy, NULL);
135
136 /* This page is shared, so make it decrypted. */
137 memset(hva, 0, PAGE_SIZE);
138
139 vcpu_run(vcpu);
140
141 TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SYSTEM_EVENT,
142 "Wanted SYSTEM_EVENT, got %s",
143 exit_reason_str(vcpu->run->exit_reason));
144 TEST_ASSERT_EQ(vcpu->run->system_event.type, KVM_SYSTEM_EVENT_SEV_TERM);
145 TEST_ASSERT_EQ(vcpu->run->system_event.ndata, 1);
146 TEST_ASSERT_EQ(vcpu->run->system_event.data[0], GHCB_MSR_TERM_REQ);
147
148 compare_xsave((u8 *)&xsave, (u8 *)hva);
149
150 kvm_vm_free(vm);
151 }
152
test_sev(void * guest_code,uint32_t type,uint64_t policy)153 static void test_sev(void *guest_code, uint32_t type, uint64_t policy)
154 {
155 struct kvm_vcpu *vcpu;
156 struct kvm_vm *vm;
157 struct ucall uc;
158
159 vm = vm_sev_create_with_one_vcpu(type, guest_code, &vcpu);
160
161 /* TODO: Validate the measurement is as expected. */
162 vm_sev_launch(vm, policy, NULL);
163
164 for (;;) {
165 vcpu_run(vcpu);
166
167 if (is_sev_es_vm(vm)) {
168 TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SYSTEM_EVENT,
169 "Wanted SYSTEM_EVENT, got %s",
170 exit_reason_str(vcpu->run->exit_reason));
171 TEST_ASSERT_EQ(vcpu->run->system_event.type, KVM_SYSTEM_EVENT_SEV_TERM);
172 TEST_ASSERT_EQ(vcpu->run->system_event.ndata, 1);
173 TEST_ASSERT_EQ(vcpu->run->system_event.data[0], GHCB_MSR_TERM_REQ);
174 break;
175 }
176
177 switch (get_ucall(vcpu, &uc)) {
178 case UCALL_SYNC:
179 continue;
180 case UCALL_DONE:
181 return;
182 case UCALL_ABORT:
183 REPORT_GUEST_ASSERT(uc);
184 default:
185 TEST_FAIL("Unexpected exit: %s",
186 exit_reason_str(vcpu->run->exit_reason));
187 }
188 }
189
190 kvm_vm_free(vm);
191 }
192
guest_shutdown_code(void)193 static void guest_shutdown_code(void)
194 {
195 struct desc_ptr idt;
196
197 /* Clobber the IDT so that #UD is guaranteed to trigger SHUTDOWN. */
198 memset(&idt, 0, sizeof(idt));
199 set_idt(&idt);
200
201 __asm__ __volatile__("ud2");
202 }
203
test_sev_shutdown(uint32_t type,uint64_t policy)204 static void test_sev_shutdown(uint32_t type, uint64_t policy)
205 {
206 struct kvm_vcpu *vcpu;
207 struct kvm_vm *vm;
208
209 vm = vm_sev_create_with_one_vcpu(type, guest_shutdown_code, &vcpu);
210
211 vm_sev_launch(vm, policy, NULL);
212
213 vcpu_run(vcpu);
214 TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SHUTDOWN,
215 "Wanted SHUTDOWN, got %s",
216 exit_reason_str(vcpu->run->exit_reason));
217
218 kvm_vm_free(vm);
219 }
220
test_sev_smoke(void * guest,uint32_t type,uint64_t policy)221 static void test_sev_smoke(void *guest, uint32_t type, uint64_t policy)
222 {
223 const u64 xf_mask = XFEATURE_MASK_X87_AVX;
224
225 if (type == KVM_X86_SNP_VM)
226 test_sev(guest, type, policy | SNP_POLICY_DBG);
227 else
228 test_sev(guest, type, policy | SEV_POLICY_NO_DBG);
229 test_sev(guest, type, policy);
230
231 if (type == KVM_X86_SEV_VM)
232 return;
233
234 test_sev_shutdown(type, policy);
235
236 if (kvm_has_cap(KVM_CAP_XCRS) &&
237 (xgetbv(0) & kvm_cpu_supported_xcr0() & xf_mask) == xf_mask) {
238 test_sync_vmsa(type, policy);
239 if (type == KVM_X86_SNP_VM)
240 test_sync_vmsa(type, policy | SNP_POLICY_DBG);
241 else
242 test_sync_vmsa(type, policy | SEV_POLICY_NO_DBG);
243 }
244 }
245
main(int argc,char * argv[])246 int main(int argc, char *argv[])
247 {
248 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SEV));
249
250 test_sev_smoke(guest_sev_code, KVM_X86_SEV_VM, 0);
251
252 if (kvm_cpu_has(X86_FEATURE_SEV_ES))
253 test_sev_smoke(guest_sev_es_code, KVM_X86_SEV_ES_VM, SEV_POLICY_ES);
254
255 if (kvm_cpu_has(X86_FEATURE_SEV_SNP))
256 test_sev_smoke(guest_snp_code, KVM_X86_SNP_VM, snp_default_policy());
257
258 return 0;
259 }
260