xref: /linux/tools/testing/selftests/kvm/lib/x86_64/sev.c (revision 78c3925c048c752334873f56c3a3d1c9d53e0416)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define _GNU_SOURCE /* for program_invocation_short_name */
3 #include <stdint.h>
4 #include <stdbool.h>
5 
6 #include "sev.h"
7 
8 /*
9  * sparsebit_next_clear() can return 0 if [x, 2**64-1] are all set, and the
10  * -1 would then cause an underflow back to 2**64 - 1. This is expected and
11  * correct.
12  *
13  * If the last range in the sparsebit is [x, y] and we try to iterate,
14  * sparsebit_next_set() will return 0, and sparsebit_next_clear() will try
15  * and find the first range, but that's correct because the condition
16  * expression would cause us to quit the loop.
17  */
18 static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region)
19 {
20 	const struct sparsebit *protected_phy_pages = region->protected_phy_pages;
21 	const vm_paddr_t gpa_base = region->region.guest_phys_addr;
22 	const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift;
23 	sparsebit_idx_t i, j;
24 
25 	if (!sparsebit_any_set(protected_phy_pages))
26 		return;
27 
28 	sev_register_encrypted_memory(vm, region);
29 
30 	sparsebit_for_each_set_range(protected_phy_pages, i, j) {
31 		const uint64_t size = (j - i + 1) * vm->page_size;
32 		const uint64_t offset = (i - lowest_page_in_region) * vm->page_size;
33 
34 		sev_launch_update_data(vm, gpa_base + offset, size);
35 	}
36 }
37 
38 void sev_vm_launch(struct kvm_vm *vm, uint32_t policy)
39 {
40 	struct kvm_sev_launch_start launch_start = {
41 		.policy = policy,
42 	};
43 	struct userspace_mem_region *region;
44 	struct kvm_sev_guest_status status;
45 	int ctr;
46 
47 	vm_sev_ioctl(vm, KVM_SEV_LAUNCH_START, &launch_start);
48 	vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
49 
50 	TEST_ASSERT_EQ(status.policy, policy);
51 	TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_LAUNCH_UPDATE);
52 
53 	hash_for_each(vm->regions.slot_hash, ctr, region, slot_node)
54 		encrypt_region(vm, region);
55 
56 	if (policy & SEV_POLICY_ES)
57 		vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
58 
59 	vm->arch.is_pt_protected = true;
60 }
61 
62 void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement)
63 {
64 	struct kvm_sev_launch_measure launch_measure;
65 	struct kvm_sev_guest_status guest_status;
66 
67 	launch_measure.len = 256;
68 	launch_measure.uaddr = (__u64)measurement;
69 	vm_sev_ioctl(vm, KVM_SEV_LAUNCH_MEASURE, &launch_measure);
70 
71 	vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &guest_status);
72 	TEST_ASSERT_EQ(guest_status.state, SEV_GUEST_STATE_LAUNCH_SECRET);
73 }
74 
75 void sev_vm_launch_finish(struct kvm_vm *vm)
76 {
77 	struct kvm_sev_guest_status status;
78 
79 	vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
80 	TEST_ASSERT(status.state == SEV_GUEST_STATE_LAUNCH_UPDATE ||
81 		    status.state == SEV_GUEST_STATE_LAUNCH_SECRET,
82 		    "Unexpected guest state: %d", status.state);
83 
84 	vm_sev_ioctl(vm, KVM_SEV_LAUNCH_FINISH, NULL);
85 
86 	vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
87 	TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING);
88 }
89 
90 struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t policy, void *guest_code,
91 					   struct kvm_vcpu **cpu)
92 {
93 	struct vm_shape shape = {
94 		.type = VM_TYPE_DEFAULT,
95 		.mode = VM_MODE_DEFAULT,
96 		.subtype = policy & SEV_POLICY_ES ? VM_SUBTYPE_SEV_ES :
97 						    VM_SUBTYPE_SEV,
98 	};
99 	struct kvm_vm *vm;
100 	struct kvm_vcpu *cpus[1];
101 	uint8_t measurement[512];
102 
103 	vm = __vm_create_with_vcpus(shape, 1, 0, guest_code, cpus);
104 	*cpu = cpus[0];
105 
106 	sev_vm_launch(vm, policy);
107 
108 	/* TODO: Validate the measurement is as expected. */
109 	sev_vm_launch_measure(vm, measurement);
110 
111 	sev_vm_launch_finish(vm);
112 
113 	return vm;
114 }
115