1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kvm.h>
3 #include <linux/psp-sev.h>
4 #include <stdio.h>
5 #include <sys/ioctl.h>
6 #include <stdlib.h>
7 #include <errno.h>
8 #include <pthread.h>
9
10 #include "test_util.h"
11 #include "kvm_util.h"
12 #include "processor.h"
13 #include "sev.h"
14 #include "kselftest.h"
15
16 #define NR_MIGRATE_TEST_VCPUS 4
17 #define NR_MIGRATE_TEST_VMS 3
18 #define NR_LOCK_TESTING_THREADS 3
19 #define NR_LOCK_TESTING_ITERATIONS 10000
20
21 bool have_sev_es;
22
sev_vm_create(bool es)23 static struct kvm_vm *sev_vm_create(bool es)
24 {
25 struct kvm_vm *vm;
26 int i;
27
28 vm = vm_create_barebones();
29 if (!es)
30 sev_vm_init(vm);
31 else
32 sev_es_vm_init(vm);
33
34 for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
35 __vm_vcpu_add(vm, i);
36
37 sev_vm_launch(vm, es ? SEV_POLICY_ES : 0);
38
39 if (es)
40 vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
41 return vm;
42 }
43
aux_vm_create(bool with_vcpus)44 static struct kvm_vm *aux_vm_create(bool with_vcpus)
45 {
46 struct kvm_vm *vm;
47 int i;
48
49 vm = vm_create_barebones();
50 if (!with_vcpus)
51 return vm;
52
53 for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
54 __vm_vcpu_add(vm, i);
55
56 return vm;
57 }
58
__sev_migrate_from(struct kvm_vm * dst,struct kvm_vm * src)59 static int __sev_migrate_from(struct kvm_vm *dst, struct kvm_vm *src)
60 {
61 return __vm_enable_cap(dst, KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM, src->fd);
62 }
63
64
sev_migrate_from(struct kvm_vm * dst,struct kvm_vm * src)65 static void sev_migrate_from(struct kvm_vm *dst, struct kvm_vm *src)
66 {
67 int ret;
68
69 ret = __sev_migrate_from(dst, src);
70 TEST_ASSERT(!ret, "Migration failed, ret: %d, errno: %d", ret, errno);
71 }
72
test_sev_migrate_from(bool es)73 static void test_sev_migrate_from(bool es)
74 {
75 struct kvm_vm *src_vm;
76 struct kvm_vm *dst_vms[NR_MIGRATE_TEST_VMS];
77 int i, ret;
78
79 src_vm = sev_vm_create(es);
80 for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
81 dst_vms[i] = aux_vm_create(true);
82
83 /* Initial migration from the src to the first dst. */
84 sev_migrate_from(dst_vms[0], src_vm);
85
86 for (i = 1; i < NR_MIGRATE_TEST_VMS; i++)
87 sev_migrate_from(dst_vms[i], dst_vms[i - 1]);
88
89 /* Migrate the guest back to the original VM. */
90 ret = __sev_migrate_from(src_vm, dst_vms[NR_MIGRATE_TEST_VMS - 1]);
91 TEST_ASSERT(ret == -1 && errno == EIO,
92 "VM that was migrated from should be dead. ret %d, errno: %d", ret,
93 errno);
94
95 kvm_vm_free(src_vm);
96 for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
97 kvm_vm_free(dst_vms[i]);
98 }
99
100 struct locking_thread_input {
101 struct kvm_vm *vm;
102 struct kvm_vm *source_vms[NR_LOCK_TESTING_THREADS];
103 };
104
locking_test_thread(void * arg)105 static void *locking_test_thread(void *arg)
106 {
107 int i, j;
108 struct locking_thread_input *input = (struct locking_thread_input *)arg;
109
110 for (i = 0; i < NR_LOCK_TESTING_ITERATIONS; ++i) {
111 j = i % NR_LOCK_TESTING_THREADS;
112 __sev_migrate_from(input->vm, input->source_vms[j]);
113 }
114
115 return NULL;
116 }
117
test_sev_migrate_locking(void)118 static void test_sev_migrate_locking(void)
119 {
120 struct locking_thread_input input[NR_LOCK_TESTING_THREADS];
121 pthread_t pt[NR_LOCK_TESTING_THREADS];
122 int i;
123
124 for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i) {
125 input[i].vm = sev_vm_create(/* es= */ false);
126 input[0].source_vms[i] = input[i].vm;
127 }
128 for (i = 1; i < NR_LOCK_TESTING_THREADS; ++i)
129 memcpy(input[i].source_vms, input[0].source_vms,
130 sizeof(input[i].source_vms));
131
132 for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
133 pthread_create(&pt[i], NULL, locking_test_thread, &input[i]);
134
135 for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
136 pthread_join(pt[i], NULL);
137 for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
138 kvm_vm_free(input[i].vm);
139 }
140
test_sev_migrate_parameters(void)141 static void test_sev_migrate_parameters(void)
142 {
143 struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_no_sev,
144 *sev_es_vm_no_vmsa;
145 int ret;
146
147 vm_no_vcpu = vm_create_barebones();
148 vm_no_sev = aux_vm_create(true);
149 ret = __sev_migrate_from(vm_no_vcpu, vm_no_sev);
150 TEST_ASSERT(ret == -1 && errno == EINVAL,
151 "Migrations require SEV enabled. ret %d, errno: %d", ret,
152 errno);
153
154 if (!have_sev_es)
155 goto out;
156
157 sev_vm = sev_vm_create(/* es= */ false);
158 sev_es_vm = sev_vm_create(/* es= */ true);
159 sev_es_vm_no_vmsa = vm_create_barebones();
160 sev_es_vm_init(sev_es_vm_no_vmsa);
161 __vm_vcpu_add(sev_es_vm_no_vmsa, 1);
162
163 ret = __sev_migrate_from(sev_vm, sev_es_vm);
164 TEST_ASSERT(
165 ret == -1 && errno == EINVAL,
166 "Should not be able migrate to SEV enabled VM. ret: %d, errno: %d",
167 ret, errno);
168
169 ret = __sev_migrate_from(sev_es_vm, sev_vm);
170 TEST_ASSERT(
171 ret == -1 && errno == EINVAL,
172 "Should not be able migrate to SEV-ES enabled VM. ret: %d, errno: %d",
173 ret, errno);
174
175 ret = __sev_migrate_from(vm_no_vcpu, sev_es_vm);
176 TEST_ASSERT(
177 ret == -1 && errno == EINVAL,
178 "SEV-ES migrations require same number of vCPUS. ret: %d, errno: %d",
179 ret, errno);
180
181 ret = __sev_migrate_from(vm_no_vcpu, sev_es_vm_no_vmsa);
182 TEST_ASSERT(
183 ret == -1 && errno == EINVAL,
184 "SEV-ES migrations require UPDATE_VMSA. ret %d, errno: %d",
185 ret, errno);
186
187 kvm_vm_free(sev_vm);
188 kvm_vm_free(sev_es_vm);
189 kvm_vm_free(sev_es_vm_no_vmsa);
190 out:
191 kvm_vm_free(vm_no_vcpu);
192 kvm_vm_free(vm_no_sev);
193 }
194
__sev_mirror_create(struct kvm_vm * dst,struct kvm_vm * src)195 static int __sev_mirror_create(struct kvm_vm *dst, struct kvm_vm *src)
196 {
197 return __vm_enable_cap(dst, KVM_CAP_VM_COPY_ENC_CONTEXT_FROM, src->fd);
198 }
199
200
sev_mirror_create(struct kvm_vm * dst,struct kvm_vm * src)201 static void sev_mirror_create(struct kvm_vm *dst, struct kvm_vm *src)
202 {
203 int ret;
204
205 ret = __sev_mirror_create(dst, src);
206 TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d", ret, errno);
207 }
208
verify_mirror_allowed_cmds(struct kvm_vm * vm)209 static void verify_mirror_allowed_cmds(struct kvm_vm *vm)
210 {
211 struct kvm_sev_guest_status status;
212 int cmd_id;
213
214 for (cmd_id = KVM_SEV_INIT; cmd_id < KVM_SEV_NR_MAX; ++cmd_id) {
215 int ret;
216
217 /*
218 * These commands are allowed for mirror VMs, all others are
219 * not.
220 */
221 switch (cmd_id) {
222 case KVM_SEV_LAUNCH_UPDATE_VMSA:
223 case KVM_SEV_GUEST_STATUS:
224 case KVM_SEV_DBG_DECRYPT:
225 case KVM_SEV_DBG_ENCRYPT:
226 continue;
227 default:
228 break;
229 }
230
231 /*
232 * These commands should be disallowed before the data
233 * parameter is examined so NULL is OK here.
234 */
235 ret = __vm_sev_ioctl(vm, cmd_id, NULL);
236 TEST_ASSERT(
237 ret == -1 && errno == EINVAL,
238 "Should not be able call command: %d. ret: %d, errno: %d",
239 cmd_id, ret, errno);
240 }
241
242 vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
243 }
244
test_sev_mirror(bool es)245 static void test_sev_mirror(bool es)
246 {
247 struct kvm_vm *src_vm, *dst_vm;
248 int i;
249
250 src_vm = sev_vm_create(es);
251 dst_vm = aux_vm_create(false);
252
253 sev_mirror_create(dst_vm, src_vm);
254
255 /* Check that we can complete creation of the mirror VM. */
256 for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
257 __vm_vcpu_add(dst_vm, i);
258
259 if (es)
260 vm_sev_ioctl(dst_vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
261
262 verify_mirror_allowed_cmds(dst_vm);
263
264 kvm_vm_free(src_vm);
265 kvm_vm_free(dst_vm);
266 }
267
test_sev_mirror_parameters(void)268 static void test_sev_mirror_parameters(void)
269 {
270 struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_with_vcpu;
271 int ret;
272
273 sev_vm = sev_vm_create(/* es= */ false);
274 vm_with_vcpu = aux_vm_create(true);
275 vm_no_vcpu = aux_vm_create(false);
276
277 ret = __sev_mirror_create(sev_vm, sev_vm);
278 TEST_ASSERT(
279 ret == -1 && errno == EINVAL,
280 "Should not be able copy context to self. ret: %d, errno: %d",
281 ret, errno);
282
283 ret = __sev_mirror_create(vm_no_vcpu, vm_with_vcpu);
284 TEST_ASSERT(ret == -1 && errno == EINVAL,
285 "Copy context requires SEV enabled. ret %d, errno: %d", ret,
286 errno);
287
288 ret = __sev_mirror_create(vm_with_vcpu, sev_vm);
289 TEST_ASSERT(
290 ret == -1 && errno == EINVAL,
291 "SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d",
292 ret, errno);
293
294 if (!have_sev_es)
295 goto out;
296
297 sev_es_vm = sev_vm_create(/* es= */ true);
298 ret = __sev_mirror_create(sev_vm, sev_es_vm);
299 TEST_ASSERT(
300 ret == -1 && errno == EINVAL,
301 "Should not be able copy context to SEV enabled VM. ret: %d, errno: %d",
302 ret, errno);
303
304 ret = __sev_mirror_create(sev_es_vm, sev_vm);
305 TEST_ASSERT(
306 ret == -1 && errno == EINVAL,
307 "Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d",
308 ret, errno);
309
310 kvm_vm_free(sev_es_vm);
311
312 out:
313 kvm_vm_free(sev_vm);
314 kvm_vm_free(vm_with_vcpu);
315 kvm_vm_free(vm_no_vcpu);
316 }
317
test_sev_move_copy(void)318 static void test_sev_move_copy(void)
319 {
320 struct kvm_vm *dst_vm, *dst2_vm, *dst3_vm, *sev_vm, *mirror_vm,
321 *dst_mirror_vm, *dst2_mirror_vm, *dst3_mirror_vm;
322
323 sev_vm = sev_vm_create(/* es= */ false);
324 dst_vm = aux_vm_create(true);
325 dst2_vm = aux_vm_create(true);
326 dst3_vm = aux_vm_create(true);
327 mirror_vm = aux_vm_create(false);
328 dst_mirror_vm = aux_vm_create(false);
329 dst2_mirror_vm = aux_vm_create(false);
330 dst3_mirror_vm = aux_vm_create(false);
331
332 sev_mirror_create(mirror_vm, sev_vm);
333
334 sev_migrate_from(dst_mirror_vm, mirror_vm);
335 sev_migrate_from(dst_vm, sev_vm);
336
337 sev_migrate_from(dst2_vm, dst_vm);
338 sev_migrate_from(dst2_mirror_vm, dst_mirror_vm);
339
340 sev_migrate_from(dst3_mirror_vm, dst2_mirror_vm);
341 sev_migrate_from(dst3_vm, dst2_vm);
342
343 kvm_vm_free(dst_vm);
344 kvm_vm_free(sev_vm);
345 kvm_vm_free(dst2_vm);
346 kvm_vm_free(dst3_vm);
347 kvm_vm_free(mirror_vm);
348 kvm_vm_free(dst_mirror_vm);
349 kvm_vm_free(dst2_mirror_vm);
350 kvm_vm_free(dst3_mirror_vm);
351
352 /*
353 * Run similar test be destroy mirrors before mirrored VMs to ensure
354 * destruction is done safely.
355 */
356 sev_vm = sev_vm_create(/* es= */ false);
357 dst_vm = aux_vm_create(true);
358 mirror_vm = aux_vm_create(false);
359 dst_mirror_vm = aux_vm_create(false);
360
361 sev_mirror_create(mirror_vm, sev_vm);
362
363 sev_migrate_from(dst_mirror_vm, mirror_vm);
364 sev_migrate_from(dst_vm, sev_vm);
365
366 kvm_vm_free(mirror_vm);
367 kvm_vm_free(dst_mirror_vm);
368 kvm_vm_free(dst_vm);
369 kvm_vm_free(sev_vm);
370 }
371
main(int argc,char * argv[])372 int main(int argc, char *argv[])
373 {
374 TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM));
375 TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM));
376
377 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SEV));
378
379 have_sev_es = kvm_cpu_has(X86_FEATURE_SEV_ES);
380
381 if (kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
382 test_sev_migrate_from(/* es= */ false);
383 if (have_sev_es)
384 test_sev_migrate_from(/* es= */ true);
385 test_sev_migrate_locking();
386 test_sev_migrate_parameters();
387 if (kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM))
388 test_sev_move_copy();
389 }
390 if (kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
391 test_sev_mirror(/* es= */ false);
392 if (have_sev_es)
393 test_sev_mirror(/* es= */ true);
394 test_sev_mirror_parameters();
395 }
396 return 0;
397 }
398