xref: /linux/arch/arm64/kvm/pkvm.c (revision 60684c2bd35064043360e6f716d1b7c20e967b7d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 - Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <linux/memblock.h>
9 #include <linux/mutex.h>
10 #include <linux/sort.h>
11 
12 #include <asm/kvm_pkvm.h>
13 
14 #include "hyp_constants.h"
15 
16 static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
17 static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
18 
19 phys_addr_t hyp_mem_base;
20 phys_addr_t hyp_mem_size;
21 
22 static int cmp_hyp_memblock(const void *p1, const void *p2)
23 {
24 	const struct memblock_region *r1 = p1;
25 	const struct memblock_region *r2 = p2;
26 
27 	return r1->base < r2->base ? -1 : (r1->base > r2->base);
28 }
29 
30 static void __init sort_memblock_regions(void)
31 {
32 	sort(hyp_memory,
33 	     *hyp_memblock_nr_ptr,
34 	     sizeof(struct memblock_region),
35 	     cmp_hyp_memblock,
36 	     NULL);
37 }
38 
39 static int __init register_memblock_regions(void)
40 {
41 	struct memblock_region *reg;
42 
43 	for_each_mem_region(reg) {
44 		if (*hyp_memblock_nr_ptr >= HYP_MEMBLOCK_REGIONS)
45 			return -ENOMEM;
46 
47 		hyp_memory[*hyp_memblock_nr_ptr] = *reg;
48 		(*hyp_memblock_nr_ptr)++;
49 	}
50 	sort_memblock_regions();
51 
52 	return 0;
53 }
54 
55 void __init kvm_hyp_reserve(void)
56 {
57 	u64 hyp_mem_pages = 0;
58 	int ret;
59 
60 	if (!is_hyp_mode_available() || is_kernel_in_hyp_mode())
61 		return;
62 
63 	if (kvm_get_mode() != KVM_MODE_PROTECTED)
64 		return;
65 
66 	ret = register_memblock_regions();
67 	if (ret) {
68 		*hyp_memblock_nr_ptr = 0;
69 		kvm_err("Failed to register hyp memblocks: %d\n", ret);
70 		return;
71 	}
72 
73 	hyp_mem_pages += hyp_s1_pgtable_pages();
74 	hyp_mem_pages += host_s2_pgtable_pages();
75 	hyp_mem_pages += hyp_vm_table_pages();
76 	hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE);
77 
78 	/*
79 	 * Try to allocate a PMD-aligned region to reduce TLB pressure once
80 	 * this is unmapped from the host stage-2, and fallback to PAGE_SIZE.
81 	 */
82 	hyp_mem_size = hyp_mem_pages << PAGE_SHIFT;
83 	hyp_mem_base = memblock_phys_alloc(ALIGN(hyp_mem_size, PMD_SIZE),
84 					   PMD_SIZE);
85 	if (!hyp_mem_base)
86 		hyp_mem_base = memblock_phys_alloc(hyp_mem_size, PAGE_SIZE);
87 	else
88 		hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE);
89 
90 	if (!hyp_mem_base) {
91 		kvm_err("Failed to reserve hyp memory\n");
92 		return;
93 	}
94 
95 	kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20,
96 		 hyp_mem_base);
97 }
98 
99 /*
100  * Allocates and donates memory for hypervisor VM structs at EL2.
101  *
102  * Allocates space for the VM state, which includes the hyp vm as well as
103  * the hyp vcpus.
104  *
105  * Stores an opaque handler in the kvm struct for future reference.
106  *
107  * Return 0 on success, negative error code on failure.
108  */
109 static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
110 {
111 	size_t pgd_sz, hyp_vm_sz, hyp_vcpu_sz;
112 	struct kvm_vcpu *host_vcpu;
113 	pkvm_handle_t handle;
114 	void *pgd, *hyp_vm;
115 	unsigned long idx;
116 	int ret;
117 
118 	if (host_kvm->created_vcpus < 1)
119 		return -EINVAL;
120 
121 	pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.vtcr);
122 
123 	/*
124 	 * The PGD pages will be reclaimed using a hyp_memcache which implies
125 	 * page granularity. So, use alloc_pages_exact() to get individual
126 	 * refcounts.
127 	 */
128 	pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL_ACCOUNT);
129 	if (!pgd)
130 		return -ENOMEM;
131 
132 	/* Allocate memory to donate to hyp for vm and vcpu pointers. */
133 	hyp_vm_sz = PAGE_ALIGN(size_add(PKVM_HYP_VM_SIZE,
134 					size_mul(sizeof(void *),
135 						 host_kvm->created_vcpus)));
136 	hyp_vm = alloc_pages_exact(hyp_vm_sz, GFP_KERNEL_ACCOUNT);
137 	if (!hyp_vm) {
138 		ret = -ENOMEM;
139 		goto free_pgd;
140 	}
141 
142 	/* Donate the VM memory to hyp and let hyp initialize it. */
143 	ret = kvm_call_hyp_nvhe(__pkvm_init_vm, host_kvm, hyp_vm, pgd);
144 	if (ret < 0)
145 		goto free_vm;
146 
147 	handle = ret;
148 
149 	host_kvm->arch.pkvm.handle = handle;
150 
151 	/* Donate memory for the vcpus at hyp and initialize it. */
152 	hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE);
153 	kvm_for_each_vcpu(idx, host_vcpu, host_kvm) {
154 		void *hyp_vcpu;
155 
156 		/* Indexing of the vcpus to be sequential starting at 0. */
157 		if (WARN_ON(host_vcpu->vcpu_idx != idx)) {
158 			ret = -EINVAL;
159 			goto destroy_vm;
160 		}
161 
162 		hyp_vcpu = alloc_pages_exact(hyp_vcpu_sz, GFP_KERNEL_ACCOUNT);
163 		if (!hyp_vcpu) {
164 			ret = -ENOMEM;
165 			goto destroy_vm;
166 		}
167 
168 		ret = kvm_call_hyp_nvhe(__pkvm_init_vcpu, handle, host_vcpu,
169 					hyp_vcpu);
170 		if (ret) {
171 			free_pages_exact(hyp_vcpu, hyp_vcpu_sz);
172 			goto destroy_vm;
173 		}
174 	}
175 
176 	return 0;
177 
178 destroy_vm:
179 	pkvm_destroy_hyp_vm(host_kvm);
180 	return ret;
181 free_vm:
182 	free_pages_exact(hyp_vm, hyp_vm_sz);
183 free_pgd:
184 	free_pages_exact(pgd, pgd_sz);
185 	return ret;
186 }
187 
188 int pkvm_create_hyp_vm(struct kvm *host_kvm)
189 {
190 	int ret = 0;
191 
192 	mutex_lock(&host_kvm->lock);
193 	if (!host_kvm->arch.pkvm.handle)
194 		ret = __pkvm_create_hyp_vm(host_kvm);
195 	mutex_unlock(&host_kvm->lock);
196 
197 	return ret;
198 }
199 
200 void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
201 {
202 	if (host_kvm->arch.pkvm.handle) {
203 		WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
204 					  host_kvm->arch.pkvm.handle));
205 	}
206 
207 	host_kvm->arch.pkvm.handle = 0;
208 	free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
209 }
210 
211 int pkvm_init_host_vm(struct kvm *host_kvm)
212 {
213 	mutex_init(&host_kvm->lock);
214 	return 0;
215 }
216