xref: /linux/arch/arm64/kvm/hyp/nvhe/setup.c (revision 43db1111073049220381944af4a3b8a5400eda71)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12 
13 #include <nvhe/early_alloc.h>
14 #include <nvhe/ffa.h>
15 #include <nvhe/gfp.h>
16 #include <nvhe/memory.h>
17 #include <nvhe/mem_protect.h>
18 #include <nvhe/mm.h>
19 #include <nvhe/pkvm.h>
20 #include <nvhe/trap_handler.h>
21 
22 unsigned long hyp_nr_cpus;
23 
24 #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
25 			 (unsigned long)__per_cpu_start)
26 
27 static void *vmemmap_base;
28 static void *vm_table_base;
29 static void *hyp_pgt_base;
30 static void *host_s2_pgt_base;
31 static void *selftest_base;
32 static void *ffa_proxy_pages;
33 static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
34 static struct hyp_pool hpool;
35 
divide_memory_pool(void * virt,unsigned long size)36 static int divide_memory_pool(void *virt, unsigned long size)
37 {
38 	unsigned long nr_pages;
39 
40 	hyp_early_alloc_init(virt, size);
41 
42 	nr_pages = pkvm_selftest_pages();
43 	selftest_base = hyp_early_alloc_contig(nr_pages);
44 	if (nr_pages && !selftest_base)
45 		return -ENOMEM;
46 
47 	nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
48 	vmemmap_base = hyp_early_alloc_contig(nr_pages);
49 	if (!vmemmap_base)
50 		return -ENOMEM;
51 
52 	nr_pages = hyp_vm_table_pages();
53 	vm_table_base = hyp_early_alloc_contig(nr_pages);
54 	if (!vm_table_base)
55 		return -ENOMEM;
56 
57 	nr_pages = hyp_s1_pgtable_pages();
58 	hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
59 	if (!hyp_pgt_base)
60 		return -ENOMEM;
61 
62 	nr_pages = host_s2_pgtable_pages();
63 	host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
64 	if (!host_s2_pgt_base)
65 		return -ENOMEM;
66 
67 	nr_pages = hyp_ffa_proxy_pages();
68 	ffa_proxy_pages = hyp_early_alloc_contig(nr_pages);
69 	if (!ffa_proxy_pages)
70 		return -ENOMEM;
71 
72 	return 0;
73 }
74 
pkvm_create_host_sve_mappings(void)75 static int pkvm_create_host_sve_mappings(void)
76 {
77 	void *start, *end;
78 	int ret, i;
79 
80 	if (!system_supports_sve())
81 		return 0;
82 
83 	for (i = 0; i < hyp_nr_cpus; i++) {
84 		struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
85 		struct cpu_sve_state *sve_state = host_data->sve_state;
86 
87 		start = kern_hyp_va(sve_state);
88 		end = start + PAGE_ALIGN(pkvm_host_sve_state_size());
89 		ret = pkvm_create_mappings(start, end, PAGE_HYP);
90 		if (ret)
91 			return ret;
92 	}
93 
94 	return 0;
95 }
96 
recreate_hyp_mappings(phys_addr_t phys,unsigned long size,unsigned long * per_cpu_base,u32 hyp_va_bits)97 static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
98 				 unsigned long *per_cpu_base,
99 				 u32 hyp_va_bits)
100 {
101 	void *start, *end, *virt = hyp_phys_to_virt(phys);
102 	unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
103 	int ret, i;
104 
105 	/* Recreate the hyp page-table using the early page allocator */
106 	hyp_early_alloc_init(hyp_pgt_base, pgt_size);
107 	ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
108 				   &hyp_early_alloc_mm_ops);
109 	if (ret)
110 		return ret;
111 
112 	ret = hyp_create_idmap(hyp_va_bits);
113 	if (ret)
114 		return ret;
115 
116 	ret = hyp_map_vectors();
117 	if (ret)
118 		return ret;
119 
120 	ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base));
121 	if (ret)
122 		return ret;
123 
124 	ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
125 	if (ret)
126 		return ret;
127 
128 	ret = pkvm_create_mappings(__hyp_data_start, __hyp_data_end, PAGE_HYP);
129 	if (ret)
130 		return ret;
131 
132 	ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
133 	if (ret)
134 		return ret;
135 
136 	ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
137 	if (ret)
138 		return ret;
139 
140 	ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
141 	if (ret)
142 		return ret;
143 
144 	for (i = 0; i < hyp_nr_cpus; i++) {
145 		struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
146 
147 		start = (void *)kern_hyp_va(per_cpu_base[i]);
148 		end = start + PAGE_ALIGN(hyp_percpu_size);
149 		ret = pkvm_create_mappings(start, end, PAGE_HYP);
150 		if (ret)
151 			return ret;
152 
153 		ret = pkvm_create_stack(params->stack_pa, &params->stack_hyp_va);
154 		if (ret)
155 			return ret;
156 	}
157 
158 	return pkvm_create_host_sve_mappings();
159 }
160 
update_nvhe_init_params(void)161 static void update_nvhe_init_params(void)
162 {
163 	struct kvm_nvhe_init_params *params;
164 	unsigned long i;
165 
166 	for (i = 0; i < hyp_nr_cpus; i++) {
167 		params = per_cpu_ptr(&kvm_init_params, i);
168 		params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
169 		dcache_clean_inval_poc((unsigned long)params,
170 				    (unsigned long)params + sizeof(*params));
171 	}
172 }
173 
hyp_zalloc_hyp_page(void * arg)174 static void *hyp_zalloc_hyp_page(void *arg)
175 {
176 	return hyp_alloc_pages(&hpool, 0);
177 }
178 
hpool_get_page(void * addr)179 static void hpool_get_page(void *addr)
180 {
181 	hyp_get_page(&hpool, addr);
182 }
183 
hpool_put_page(void * addr)184 static void hpool_put_page(void *addr)
185 {
186 	hyp_put_page(&hpool, addr);
187 }
188 
fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)189 static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
190 				     enum kvm_pgtable_walk_flags visit)
191 {
192 	enum pkvm_page_state state;
193 	struct hyp_page *page;
194 	phys_addr_t phys;
195 
196 	if (!kvm_pte_valid(ctx->old))
197 		return 0;
198 
199 	if (ctx->level != KVM_PGTABLE_LAST_LEVEL)
200 		return -EINVAL;
201 
202 	phys = kvm_pte_to_phys(ctx->old);
203 	if (!addr_is_memory(phys))
204 		return -EINVAL;
205 
206 	page = hyp_phys_to_page(phys);
207 
208 	/*
209 	 * Adjust the host stage-2 mappings to match the ownership attributes
210 	 * configured in the hypervisor stage-1, and make sure to propagate them
211 	 * to the hyp_vmemmap state.
212 	 */
213 	state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
214 	switch (state) {
215 	case PKVM_PAGE_OWNED:
216 		set_hyp_state(page, PKVM_PAGE_OWNED);
217 		return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
218 	case PKVM_PAGE_SHARED_OWNED:
219 		set_hyp_state(page, PKVM_PAGE_SHARED_OWNED);
220 		set_host_state(page, PKVM_PAGE_SHARED_BORROWED);
221 		break;
222 	case PKVM_PAGE_SHARED_BORROWED:
223 		set_hyp_state(page, PKVM_PAGE_SHARED_BORROWED);
224 		set_host_state(page, PKVM_PAGE_SHARED_OWNED);
225 		break;
226 	default:
227 		return -EINVAL;
228 	}
229 
230 	return 0;
231 }
232 
fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)233 static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx,
234 					 enum kvm_pgtable_walk_flags visit)
235 {
236 	/*
237 	 * Fix-up the refcount for the page-table pages as the early allocator
238 	 * was unable to access the hyp_vmemmap and so the buddy allocator has
239 	 * initialised the refcount to '1'.
240 	 */
241 	if (kvm_pte_valid(ctx->old))
242 		ctx->mm_ops->get_page(ctx->ptep);
243 
244 	return 0;
245 }
246 
fix_host_ownership(void)247 static int fix_host_ownership(void)
248 {
249 	struct kvm_pgtable_walker walker = {
250 		.cb	= fix_host_ownership_walker,
251 		.flags	= KVM_PGTABLE_WALK_LEAF,
252 	};
253 	int i, ret;
254 
255 	for (i = 0; i < hyp_memblock_nr; i++) {
256 		struct memblock_region *reg = &hyp_memory[i];
257 		u64 start = (u64)hyp_phys_to_virt(reg->base);
258 
259 		ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
260 		if (ret)
261 			return ret;
262 	}
263 
264 	return 0;
265 }
266 
fix_hyp_pgtable_refcnt(void)267 static int fix_hyp_pgtable_refcnt(void)
268 {
269 	struct kvm_pgtable_walker walker = {
270 		.cb	= fix_hyp_pgtable_refcnt_walker,
271 		.flags	= KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
272 		.arg	= pkvm_pgtable.mm_ops,
273 	};
274 
275 	return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits),
276 				&walker);
277 }
278 
__pkvm_init_finalise(void)279 void __noreturn __pkvm_init_finalise(void)
280 {
281 	struct kvm_cpu_context *host_ctxt = host_data_ptr(host_ctxt);
282 	unsigned long nr_pages, reserved_pages, pfn;
283 	int ret;
284 
285 	/* Now that the vmemmap is backed, install the full-fledged allocator */
286 	pfn = hyp_virt_to_pfn(hyp_pgt_base);
287 	nr_pages = hyp_s1_pgtable_pages();
288 	reserved_pages = hyp_early_alloc_nr_used_pages();
289 	ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
290 	if (ret)
291 		goto out;
292 
293 	ret = kvm_host_prepare_stage2(host_s2_pgt_base);
294 	if (ret)
295 		goto out;
296 
297 	pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
298 		.zalloc_page = hyp_zalloc_hyp_page,
299 		.phys_to_virt = hyp_phys_to_virt,
300 		.virt_to_phys = hyp_virt_to_phys,
301 		.get_page = hpool_get_page,
302 		.put_page = hpool_put_page,
303 		.page_count = hyp_page_count,
304 	};
305 	pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
306 
307 	ret = fix_host_ownership();
308 	if (ret)
309 		goto out;
310 
311 	ret = fix_hyp_pgtable_refcnt();
312 	if (ret)
313 		goto out;
314 
315 	ret = hyp_create_fixmap();
316 	if (ret)
317 		goto out;
318 
319 	ret = hyp_ffa_init(ffa_proxy_pages);
320 	if (ret)
321 		goto out;
322 
323 	pkvm_hyp_vm_table_init(vm_table_base);
324 
325 	pkvm_ownership_selftest(selftest_base);
326 out:
327 	/*
328 	 * We tail-called to here from handle___pkvm_init() and will not return,
329 	 * so make sure to propagate the return value to the host.
330 	 */
331 	cpu_reg(host_ctxt, 1) = ret;
332 
333 	__host_enter(host_ctxt);
334 }
335 
__pkvm_init(phys_addr_t phys,unsigned long size,unsigned long nr_cpus,unsigned long * per_cpu_base,u32 hyp_va_bits)336 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
337 		unsigned long *per_cpu_base, u32 hyp_va_bits)
338 {
339 	struct kvm_nvhe_init_params *params;
340 	void *virt = hyp_phys_to_virt(phys);
341 	typeof(__pkvm_init_switch_pgd) *fn;
342 	int ret;
343 
344 	BUG_ON(kvm_check_pvm_sysreg_table());
345 
346 	if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
347 		return -EINVAL;
348 
349 	hyp_spin_lock_init(&pkvm_pgd_lock);
350 	hyp_nr_cpus = nr_cpus;
351 
352 	ret = divide_memory_pool(virt, size);
353 	if (ret)
354 		return ret;
355 
356 	ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
357 	if (ret)
358 		return ret;
359 
360 	update_nvhe_init_params();
361 
362 	/* Jump in the idmap page to switch to the new page-tables */
363 	params = this_cpu_ptr(&kvm_init_params);
364 	fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
365 	fn(params->pgd_pa, params->stack_hyp_va, __pkvm_init_finalise);
366 
367 	unreachable();
368 }
369