xref: /linux/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c (revision c34e9ab9a612ee8b18273398ef75c207b01f516d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <fcntl.h>
3 #include <stdio.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <sys/ioctl.h>
7 #include <math.h>
8 
9 #include "test_util.h"
10 #include "kvm_util.h"
11 #include "processor.h"
12 #include "svm_util.h"
13 #include "linux/psp-sev.h"
14 #include "sev.h"
15 
16 
17 #define XFEATURE_MASK_X87_AVX (XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM)
18 
19 static void guest_sev_es_code(void)
20 {
21 	/* TODO: Check CPUID after GHCB-based hypercall support is added. */
22 	GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
23 	GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ES_ENABLED);
24 
25 	/*
26 	 * TODO: Add GHCB and ucall support for SEV-ES guests.  For now, simply
27 	 * force "termination" to signal "done" via the GHCB MSR protocol.
28 	 */
29 	wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ);
30 	__asm__ __volatile__("rep; vmmcall");
31 }
32 
33 static void guest_sev_code(void)
34 {
35 	GUEST_ASSERT(this_cpu_has(X86_FEATURE_SEV));
36 	GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
37 
38 	GUEST_DONE();
39 }
40 
41 /* Stash state passed via VMSA before any compiled code runs.  */
42 extern void guest_code_xsave(void);
43 asm("guest_code_xsave:\n"
44     "mov $" __stringify(XFEATURE_MASK_X87_AVX) ", %eax\n"
45     "xor %edx, %edx\n"
46     "xsave (%rdi)\n"
47     "jmp guest_sev_es_code");
48 
49 static void compare_xsave(u8 *from_host, u8 *from_guest)
50 {
51 	int i;
52 	bool bad = false;
53 	for (i = 0; i < 4095; i++) {
54 		if (from_host[i] != from_guest[i]) {
55 			printf("mismatch at %02hhx | %02hhx %02hhx\n", i, from_host[i], from_guest[i]);
56 			bad = true;
57 		}
58 	}
59 
60 	if (bad)
61 		abort();
62 }
63 
64 static void test_sync_vmsa(uint32_t policy)
65 {
66 	struct kvm_vcpu *vcpu;
67 	struct kvm_vm *vm;
68 	vm_vaddr_t gva;
69 	void *hva;
70 
71 	double x87val = M_PI;
72 	struct kvm_xsave __attribute__((aligned(64))) xsave = { 0 };
73 
74 	vm = vm_sev_create_with_one_vcpu(KVM_X86_SEV_ES_VM, guest_code_xsave, &vcpu);
75 	gva = vm_vaddr_alloc_shared(vm, PAGE_SIZE, KVM_UTIL_MIN_VADDR,
76 				    MEM_REGION_TEST_DATA);
77 	hva = addr_gva2hva(vm, gva);
78 
79 	vcpu_args_set(vcpu, 1, gva);
80 
81 	asm("fninit\n"
82 	    "vpcmpeqb %%ymm4, %%ymm4, %%ymm4\n"
83 	    "fldl %3\n"
84 	    "xsave (%2)\n"
85 	    "fstp %%st\n"
86 	    : "=m"(xsave)
87 	    : "A"(XFEATURE_MASK_X87_AVX), "r"(&xsave), "m" (x87val)
88 	    : "ymm4", "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)");
89 	vcpu_xsave_set(vcpu, &xsave);
90 
91 	vm_sev_launch(vm, SEV_POLICY_ES | policy, NULL);
92 
93 	/* This page is shared, so make it decrypted.  */
94 	memset(hva, 0, 4096);
95 
96 	vcpu_run(vcpu);
97 
98 	TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SYSTEM_EVENT,
99 		    "Wanted SYSTEM_EVENT, got %s",
100 		    exit_reason_str(vcpu->run->exit_reason));
101 	TEST_ASSERT_EQ(vcpu->run->system_event.type, KVM_SYSTEM_EVENT_SEV_TERM);
102 	TEST_ASSERT_EQ(vcpu->run->system_event.ndata, 1);
103 	TEST_ASSERT_EQ(vcpu->run->system_event.data[0], GHCB_MSR_TERM_REQ);
104 
105 	compare_xsave((u8 *)&xsave, (u8 *)hva);
106 
107 	kvm_vm_free(vm);
108 }
109 
110 static void test_sev(void *guest_code, uint64_t policy)
111 {
112 	struct kvm_vcpu *vcpu;
113 	struct kvm_vm *vm;
114 	struct ucall uc;
115 
116 	uint32_t type = policy & SEV_POLICY_ES ? KVM_X86_SEV_ES_VM : KVM_X86_SEV_VM;
117 
118 	vm = vm_sev_create_with_one_vcpu(type, guest_code, &vcpu);
119 
120 	/* TODO: Validate the measurement is as expected. */
121 	vm_sev_launch(vm, policy, NULL);
122 
123 	for (;;) {
124 		vcpu_run(vcpu);
125 
126 		if (policy & SEV_POLICY_ES) {
127 			TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SYSTEM_EVENT,
128 				    "Wanted SYSTEM_EVENT, got %s",
129 				    exit_reason_str(vcpu->run->exit_reason));
130 			TEST_ASSERT_EQ(vcpu->run->system_event.type, KVM_SYSTEM_EVENT_SEV_TERM);
131 			TEST_ASSERT_EQ(vcpu->run->system_event.ndata, 1);
132 			TEST_ASSERT_EQ(vcpu->run->system_event.data[0], GHCB_MSR_TERM_REQ);
133 			break;
134 		}
135 
136 		switch (get_ucall(vcpu, &uc)) {
137 		case UCALL_SYNC:
138 			continue;
139 		case UCALL_DONE:
140 			return;
141 		case UCALL_ABORT:
142 			REPORT_GUEST_ASSERT(uc);
143 		default:
144 			TEST_FAIL("Unexpected exit: %s",
145 				  exit_reason_str(vcpu->run->exit_reason));
146 		}
147 	}
148 
149 	kvm_vm_free(vm);
150 }
151 
152 static void guest_shutdown_code(void)
153 {
154 	struct desc_ptr idt;
155 
156 	/* Clobber the IDT so that #UD is guaranteed to trigger SHUTDOWN. */
157 	memset(&idt, 0, sizeof(idt));
158 	__asm__ __volatile__("lidt %0" :: "m"(idt));
159 
160 	__asm__ __volatile__("ud2");
161 }
162 
163 static void test_sev_es_shutdown(void)
164 {
165 	struct kvm_vcpu *vcpu;
166 	struct kvm_vm *vm;
167 
168 	uint32_t type = KVM_X86_SEV_ES_VM;
169 
170 	vm = vm_sev_create_with_one_vcpu(type, guest_shutdown_code, &vcpu);
171 
172 	vm_sev_launch(vm, SEV_POLICY_ES, NULL);
173 
174 	vcpu_run(vcpu);
175 	TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SHUTDOWN,
176 		    "Wanted SHUTDOWN, got %s",
177 		    exit_reason_str(vcpu->run->exit_reason));
178 
179 	kvm_vm_free(vm);
180 }
181 
182 int main(int argc, char *argv[])
183 {
184 	const u64 xf_mask = XFEATURE_MASK_X87_AVX;
185 
186 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SEV));
187 
188 	test_sev(guest_sev_code, SEV_POLICY_NO_DBG);
189 	test_sev(guest_sev_code, 0);
190 
191 	if (kvm_cpu_has(X86_FEATURE_SEV_ES)) {
192 		test_sev(guest_sev_es_code, SEV_POLICY_ES | SEV_POLICY_NO_DBG);
193 		test_sev(guest_sev_es_code, SEV_POLICY_ES);
194 
195 		test_sev_es_shutdown();
196 
197 		if (kvm_has_cap(KVM_CAP_XCRS) &&
198 		    (xgetbv(0) & kvm_cpu_supported_xcr0() & xf_mask) == xf_mask) {
199 			test_sync_vmsa(0);
200 			test_sync_vmsa(SEV_POLICY_NO_DBG);
201 		}
202 	}
203 
204 	return 0;
205 }
206