xref: /linux/arch/arm64/kvm/hyp/nvhe/setup.c (revision 79d2e1919a2728ef49d938eb20ebd5903c14dfb0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12 
13 #include <nvhe/early_alloc.h>
14 #include <nvhe/ffa.h>
15 #include <nvhe/fixed_config.h>
16 #include <nvhe/gfp.h>
17 #include <nvhe/memory.h>
18 #include <nvhe/mem_protect.h>
19 #include <nvhe/mm.h>
20 #include <nvhe/pkvm.h>
21 #include <nvhe/trap_handler.h>
22 
23 unsigned long hyp_nr_cpus;
24 
25 #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
26 			 (unsigned long)__per_cpu_start)
27 
28 static void *vmemmap_base;
29 static void *vm_table_base;
30 static void *hyp_pgt_base;
31 static void *host_s2_pgt_base;
32 static void *ffa_proxy_pages;
33 static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
34 static struct hyp_pool hpool;
35 
36 static int divide_memory_pool(void *virt, unsigned long size)
37 {
38 	unsigned long nr_pages;
39 
40 	hyp_early_alloc_init(virt, size);
41 
42 	nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
43 	vmemmap_base = hyp_early_alloc_contig(nr_pages);
44 	if (!vmemmap_base)
45 		return -ENOMEM;
46 
47 	nr_pages = hyp_vm_table_pages();
48 	vm_table_base = hyp_early_alloc_contig(nr_pages);
49 	if (!vm_table_base)
50 		return -ENOMEM;
51 
52 	nr_pages = hyp_s1_pgtable_pages();
53 	hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
54 	if (!hyp_pgt_base)
55 		return -ENOMEM;
56 
57 	nr_pages = host_s2_pgtable_pages();
58 	host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
59 	if (!host_s2_pgt_base)
60 		return -ENOMEM;
61 
62 	nr_pages = hyp_ffa_proxy_pages();
63 	ffa_proxy_pages = hyp_early_alloc_contig(nr_pages);
64 	if (!ffa_proxy_pages)
65 		return -ENOMEM;
66 
67 	return 0;
68 }
69 
70 static int pkvm_create_host_sve_mappings(void)
71 {
72 	void *start, *end;
73 	int ret, i;
74 
75 	if (!system_supports_sve())
76 		return 0;
77 
78 	for (i = 0; i < hyp_nr_cpus; i++) {
79 		struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
80 		struct cpu_sve_state *sve_state = host_data->sve_state;
81 
82 		start = kern_hyp_va(sve_state);
83 		end = start + PAGE_ALIGN(pkvm_host_sve_state_size());
84 		ret = pkvm_create_mappings(start, end, PAGE_HYP);
85 		if (ret)
86 			return ret;
87 	}
88 
89 	return 0;
90 }
91 
92 static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
93 				 unsigned long *per_cpu_base,
94 				 u32 hyp_va_bits)
95 {
96 	void *start, *end, *virt = hyp_phys_to_virt(phys);
97 	unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
98 	int ret, i;
99 
100 	/* Recreate the hyp page-table using the early page allocator */
101 	hyp_early_alloc_init(hyp_pgt_base, pgt_size);
102 	ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
103 				   &hyp_early_alloc_mm_ops);
104 	if (ret)
105 		return ret;
106 
107 	ret = hyp_create_idmap(hyp_va_bits);
108 	if (ret)
109 		return ret;
110 
111 	ret = hyp_map_vectors();
112 	if (ret)
113 		return ret;
114 
115 	ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base));
116 	if (ret)
117 		return ret;
118 
119 	ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
120 	if (ret)
121 		return ret;
122 
123 	ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
124 	if (ret)
125 		return ret;
126 
127 	ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
128 	if (ret)
129 		return ret;
130 
131 	ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
132 	if (ret)
133 		return ret;
134 
135 	for (i = 0; i < hyp_nr_cpus; i++) {
136 		struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
137 
138 		start = (void *)kern_hyp_va(per_cpu_base[i]);
139 		end = start + PAGE_ALIGN(hyp_percpu_size);
140 		ret = pkvm_create_mappings(start, end, PAGE_HYP);
141 		if (ret)
142 			return ret;
143 
144 		ret = pkvm_create_stack(params->stack_pa, &params->stack_hyp_va);
145 		if (ret)
146 			return ret;
147 	}
148 
149 	return pkvm_create_host_sve_mappings();
150 }
151 
152 static void update_nvhe_init_params(void)
153 {
154 	struct kvm_nvhe_init_params *params;
155 	unsigned long i;
156 
157 	for (i = 0; i < hyp_nr_cpus; i++) {
158 		params = per_cpu_ptr(&kvm_init_params, i);
159 		params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
160 		dcache_clean_inval_poc((unsigned long)params,
161 				    (unsigned long)params + sizeof(*params));
162 	}
163 }
164 
165 static void *hyp_zalloc_hyp_page(void *arg)
166 {
167 	return hyp_alloc_pages(&hpool, 0);
168 }
169 
170 static void hpool_get_page(void *addr)
171 {
172 	hyp_get_page(&hpool, addr);
173 }
174 
175 static void hpool_put_page(void *addr)
176 {
177 	hyp_put_page(&hpool, addr);
178 }
179 
180 static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
181 				     enum kvm_pgtable_walk_flags visit)
182 {
183 	enum kvm_pgtable_prot prot;
184 	enum pkvm_page_state state;
185 	phys_addr_t phys;
186 
187 	if (!kvm_pte_valid(ctx->old))
188 		return 0;
189 
190 	if (ctx->level != KVM_PGTABLE_LAST_LEVEL)
191 		return -EINVAL;
192 
193 	phys = kvm_pte_to_phys(ctx->old);
194 	if (!addr_is_memory(phys))
195 		return -EINVAL;
196 
197 	/*
198 	 * Adjust the host stage-2 mappings to match the ownership attributes
199 	 * configured in the hypervisor stage-1.
200 	 */
201 	state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
202 	switch (state) {
203 	case PKVM_PAGE_OWNED:
204 		return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
205 	case PKVM_PAGE_SHARED_OWNED:
206 		prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
207 		break;
208 	case PKVM_PAGE_SHARED_BORROWED:
209 		prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
210 		break;
211 	default:
212 		return -EINVAL;
213 	}
214 
215 	return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
216 }
217 
218 static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx,
219 					 enum kvm_pgtable_walk_flags visit)
220 {
221 	/*
222 	 * Fix-up the refcount for the page-table pages as the early allocator
223 	 * was unable to access the hyp_vmemmap and so the buddy allocator has
224 	 * initialised the refcount to '1'.
225 	 */
226 	if (kvm_pte_valid(ctx->old))
227 		ctx->mm_ops->get_page(ctx->ptep);
228 
229 	return 0;
230 }
231 
232 static int fix_host_ownership(void)
233 {
234 	struct kvm_pgtable_walker walker = {
235 		.cb	= fix_host_ownership_walker,
236 		.flags	= KVM_PGTABLE_WALK_LEAF,
237 	};
238 	int i, ret;
239 
240 	for (i = 0; i < hyp_memblock_nr; i++) {
241 		struct memblock_region *reg = &hyp_memory[i];
242 		u64 start = (u64)hyp_phys_to_virt(reg->base);
243 
244 		ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
245 		if (ret)
246 			return ret;
247 	}
248 
249 	return 0;
250 }
251 
252 static int fix_hyp_pgtable_refcnt(void)
253 {
254 	struct kvm_pgtable_walker walker = {
255 		.cb	= fix_hyp_pgtable_refcnt_walker,
256 		.flags	= KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
257 		.arg	= pkvm_pgtable.mm_ops,
258 	};
259 
260 	return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits),
261 				&walker);
262 }
263 
264 void __noreturn __pkvm_init_finalise(void)
265 {
266 	struct kvm_cpu_context *host_ctxt = host_data_ptr(host_ctxt);
267 	unsigned long nr_pages, reserved_pages, pfn;
268 	int ret;
269 
270 	/* Now that the vmemmap is backed, install the full-fledged allocator */
271 	pfn = hyp_virt_to_pfn(hyp_pgt_base);
272 	nr_pages = hyp_s1_pgtable_pages();
273 	reserved_pages = hyp_early_alloc_nr_used_pages();
274 	ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
275 	if (ret)
276 		goto out;
277 
278 	ret = kvm_host_prepare_stage2(host_s2_pgt_base);
279 	if (ret)
280 		goto out;
281 
282 	pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
283 		.zalloc_page = hyp_zalloc_hyp_page,
284 		.phys_to_virt = hyp_phys_to_virt,
285 		.virt_to_phys = hyp_virt_to_phys,
286 		.get_page = hpool_get_page,
287 		.put_page = hpool_put_page,
288 		.page_count = hyp_page_count,
289 	};
290 	pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
291 
292 	ret = fix_host_ownership();
293 	if (ret)
294 		goto out;
295 
296 	ret = fix_hyp_pgtable_refcnt();
297 	if (ret)
298 		goto out;
299 
300 	ret = hyp_create_pcpu_fixmap();
301 	if (ret)
302 		goto out;
303 
304 	ret = hyp_ffa_init(ffa_proxy_pages);
305 	if (ret)
306 		goto out;
307 
308 	pkvm_hyp_vm_table_init(vm_table_base);
309 out:
310 	/*
311 	 * We tail-called to here from handle___pkvm_init() and will not return,
312 	 * so make sure to propagate the return value to the host.
313 	 */
314 	cpu_reg(host_ctxt, 1) = ret;
315 
316 	__host_enter(host_ctxt);
317 }
318 
319 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
320 		unsigned long *per_cpu_base, u32 hyp_va_bits)
321 {
322 	struct kvm_nvhe_init_params *params;
323 	void *virt = hyp_phys_to_virt(phys);
324 	typeof(__pkvm_init_switch_pgd) *fn;
325 	int ret;
326 
327 	BUG_ON(kvm_check_pvm_sysreg_table());
328 
329 	if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
330 		return -EINVAL;
331 
332 	hyp_spin_lock_init(&pkvm_pgd_lock);
333 	hyp_nr_cpus = nr_cpus;
334 
335 	ret = divide_memory_pool(virt, size);
336 	if (ret)
337 		return ret;
338 
339 	ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
340 	if (ret)
341 		return ret;
342 
343 	update_nvhe_init_params();
344 
345 	/* Jump in the idmap page to switch to the new page-tables */
346 	params = this_cpu_ptr(&kvm_init_params);
347 	fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
348 	fn(params->pgd_pa, params->stack_hyp_va, __pkvm_init_finalise);
349 
350 	unreachable();
351 }
352