xref: /linux/tools/testing/selftests/kvm/lib/x86_64/sev.c (revision f4b0c4b508364fde023e4f7b9f23f7e38c663dfe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <stdint.h>
3 #include <stdbool.h>
4 
5 #include "sev.h"
6 
7 /*
8  * sparsebit_next_clear() can return 0 if [x, 2**64-1] are all set, and the
9  * -1 would then cause an underflow back to 2**64 - 1. This is expected and
10  * correct.
11  *
12  * If the last range in the sparsebit is [x, y] and we try to iterate,
13  * sparsebit_next_set() will return 0, and sparsebit_next_clear() will try
14  * and find the first range, but that's correct because the condition
15  * expression would cause us to quit the loop.
16  */
encrypt_region(struct kvm_vm * vm,struct userspace_mem_region * region)17 static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region)
18 {
19 	const struct sparsebit *protected_phy_pages = region->protected_phy_pages;
20 	const vm_paddr_t gpa_base = region->region.guest_phys_addr;
21 	const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift;
22 	sparsebit_idx_t i, j;
23 
24 	if (!sparsebit_any_set(protected_phy_pages))
25 		return;
26 
27 	sev_register_encrypted_memory(vm, region);
28 
29 	sparsebit_for_each_set_range(protected_phy_pages, i, j) {
30 		const uint64_t size = (j - i + 1) * vm->page_size;
31 		const uint64_t offset = (i - lowest_page_in_region) * vm->page_size;
32 
33 		sev_launch_update_data(vm, gpa_base + offset, size);
34 	}
35 }
36 
sev_vm_init(struct kvm_vm * vm)37 void sev_vm_init(struct kvm_vm *vm)
38 {
39 	if (vm->type == KVM_X86_DEFAULT_VM) {
40 		assert(vm->arch.sev_fd == -1);
41 		vm->arch.sev_fd = open_sev_dev_path_or_exit();
42 		vm_sev_ioctl(vm, KVM_SEV_INIT, NULL);
43 	} else {
44 		struct kvm_sev_init init = { 0 };
45 		assert(vm->type == KVM_X86_SEV_VM);
46 		vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);
47 	}
48 }
49 
sev_es_vm_init(struct kvm_vm * vm)50 void sev_es_vm_init(struct kvm_vm *vm)
51 {
52 	if (vm->type == KVM_X86_DEFAULT_VM) {
53 		assert(vm->arch.sev_fd == -1);
54 		vm->arch.sev_fd = open_sev_dev_path_or_exit();
55 		vm_sev_ioctl(vm, KVM_SEV_ES_INIT, NULL);
56 	} else {
57 		struct kvm_sev_init init = { 0 };
58 		assert(vm->type == KVM_X86_SEV_ES_VM);
59 		vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);
60 	}
61 }
62 
sev_vm_launch(struct kvm_vm * vm,uint32_t policy)63 void sev_vm_launch(struct kvm_vm *vm, uint32_t policy)
64 {
65 	struct kvm_sev_launch_start launch_start = {
66 		.policy = policy,
67 	};
68 	struct userspace_mem_region *region;
69 	struct kvm_sev_guest_status status;
70 	int ctr;
71 
72 	vm_sev_ioctl(vm, KVM_SEV_LAUNCH_START, &launch_start);
73 	vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
74 
75 	TEST_ASSERT_EQ(status.policy, policy);
76 	TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_LAUNCH_UPDATE);
77 
78 	hash_for_each(vm->regions.slot_hash, ctr, region, slot_node)
79 		encrypt_region(vm, region);
80 
81 	if (policy & SEV_POLICY_ES)
82 		vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
83 
84 	vm->arch.is_pt_protected = true;
85 }
86 
sev_vm_launch_measure(struct kvm_vm * vm,uint8_t * measurement)87 void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement)
88 {
89 	struct kvm_sev_launch_measure launch_measure;
90 	struct kvm_sev_guest_status guest_status;
91 
92 	launch_measure.len = 256;
93 	launch_measure.uaddr = (__u64)measurement;
94 	vm_sev_ioctl(vm, KVM_SEV_LAUNCH_MEASURE, &launch_measure);
95 
96 	vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &guest_status);
97 	TEST_ASSERT_EQ(guest_status.state, SEV_GUEST_STATE_LAUNCH_SECRET);
98 }
99 
sev_vm_launch_finish(struct kvm_vm * vm)100 void sev_vm_launch_finish(struct kvm_vm *vm)
101 {
102 	struct kvm_sev_guest_status status;
103 
104 	vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
105 	TEST_ASSERT(status.state == SEV_GUEST_STATE_LAUNCH_UPDATE ||
106 		    status.state == SEV_GUEST_STATE_LAUNCH_SECRET,
107 		    "Unexpected guest state: %d", status.state);
108 
109 	vm_sev_ioctl(vm, KVM_SEV_LAUNCH_FINISH, NULL);
110 
111 	vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
112 	TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING);
113 }
114 
vm_sev_create_with_one_vcpu(uint32_t type,void * guest_code,struct kvm_vcpu ** cpu)115 struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
116 					   struct kvm_vcpu **cpu)
117 {
118 	struct vm_shape shape = {
119 		.mode = VM_MODE_DEFAULT,
120 		.type = type,
121 	};
122 	struct kvm_vm *vm;
123 	struct kvm_vcpu *cpus[1];
124 
125 	vm = __vm_create_with_vcpus(shape, 1, 0, guest_code, cpus);
126 	*cpu = cpus[0];
127 
128 	return vm;
129 }
130 
vm_sev_launch(struct kvm_vm * vm,uint32_t policy,uint8_t * measurement)131 void vm_sev_launch(struct kvm_vm *vm, uint32_t policy, uint8_t *measurement)
132 {
133 	sev_vm_launch(vm, policy);
134 
135 	if (!measurement)
136 		measurement = alloca(256);
137 
138 	sev_vm_launch_measure(vm, measurement);
139 
140 	sev_vm_launch_finish(vm);
141 }
142