xref: /linux/arch/arm64/kvm/hyp/nvhe/setup.c (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12 
13 #include <nvhe/early_alloc.h>
14 #include <nvhe/fixed_config.h>
15 #include <nvhe/gfp.h>
16 #include <nvhe/memory.h>
17 #include <nvhe/mem_protect.h>
18 #include <nvhe/mm.h>
19 #include <nvhe/trap_handler.h>
20 
21 unsigned long hyp_nr_cpus;
22 
23 #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
24 			 (unsigned long)__per_cpu_start)
25 
26 static void *vmemmap_base;
27 static void *hyp_pgt_base;
28 static void *host_s2_pgt_base;
29 static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
30 static struct hyp_pool hpool;
31 
32 static int divide_memory_pool(void *virt, unsigned long size)
33 {
34 	unsigned long vstart, vend, nr_pages;
35 
36 	hyp_early_alloc_init(virt, size);
37 
38 	hyp_vmemmap_range(__hyp_pa(virt), size, &vstart, &vend);
39 	nr_pages = (vend - vstart) >> PAGE_SHIFT;
40 	vmemmap_base = hyp_early_alloc_contig(nr_pages);
41 	if (!vmemmap_base)
42 		return -ENOMEM;
43 
44 	nr_pages = hyp_s1_pgtable_pages();
45 	hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
46 	if (!hyp_pgt_base)
47 		return -ENOMEM;
48 
49 	nr_pages = host_s2_pgtable_pages();
50 	host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
51 	if (!host_s2_pgt_base)
52 		return -ENOMEM;
53 
54 	return 0;
55 }
56 
57 static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
58 				 unsigned long *per_cpu_base,
59 				 u32 hyp_va_bits)
60 {
61 	void *start, *end, *virt = hyp_phys_to_virt(phys);
62 	unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
63 	enum kvm_pgtable_prot prot;
64 	int ret, i;
65 
66 	/* Recreate the hyp page-table using the early page allocator */
67 	hyp_early_alloc_init(hyp_pgt_base, pgt_size);
68 	ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
69 				   &hyp_early_alloc_mm_ops);
70 	if (ret)
71 		return ret;
72 
73 	ret = hyp_create_idmap(hyp_va_bits);
74 	if (ret)
75 		return ret;
76 
77 	ret = hyp_map_vectors();
78 	if (ret)
79 		return ret;
80 
81 	ret = hyp_back_vmemmap(phys, size, hyp_virt_to_phys(vmemmap_base));
82 	if (ret)
83 		return ret;
84 
85 	ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
86 	if (ret)
87 		return ret;
88 
89 	ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
90 	if (ret)
91 		return ret;
92 
93 	ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
94 	if (ret)
95 		return ret;
96 
97 	ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
98 	if (ret)
99 		return ret;
100 
101 	for (i = 0; i < hyp_nr_cpus; i++) {
102 		start = (void *)kern_hyp_va(per_cpu_base[i]);
103 		end = start + PAGE_ALIGN(hyp_percpu_size);
104 		ret = pkvm_create_mappings(start, end, PAGE_HYP);
105 		if (ret)
106 			return ret;
107 
108 		end = (void *)per_cpu_ptr(&kvm_init_params, i)->stack_hyp_va;
109 		start = end - PAGE_SIZE;
110 		ret = pkvm_create_mappings(start, end, PAGE_HYP);
111 		if (ret)
112 			return ret;
113 	}
114 
115 	/*
116 	 * Map the host's .bss and .rodata sections RO in the hypervisor, but
117 	 * transfer the ownership from the host to the hypervisor itself to
118 	 * make sure it can't be donated or shared with another entity.
119 	 *
120 	 * The ownership transition requires matching changes in the host
121 	 * stage-2. This will be done later (see finalize_host_mappings()) once
122 	 * the hyp_vmemmap is addressable.
123 	 */
124 	prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
125 	ret = pkvm_create_mappings(__start_rodata, __end_rodata, prot);
126 	if (ret)
127 		return ret;
128 
129 	ret = pkvm_create_mappings(__hyp_bss_end, __bss_stop, prot);
130 	if (ret)
131 		return ret;
132 
133 	return 0;
134 }
135 
136 static void update_nvhe_init_params(void)
137 {
138 	struct kvm_nvhe_init_params *params;
139 	unsigned long i;
140 
141 	for (i = 0; i < hyp_nr_cpus; i++) {
142 		params = per_cpu_ptr(&kvm_init_params, i);
143 		params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
144 		dcache_clean_inval_poc((unsigned long)params,
145 				    (unsigned long)params + sizeof(*params));
146 	}
147 }
148 
149 static void *hyp_zalloc_hyp_page(void *arg)
150 {
151 	return hyp_alloc_pages(&hpool, 0);
152 }
153 
154 static void hpool_get_page(void *addr)
155 {
156 	hyp_get_page(&hpool, addr);
157 }
158 
159 static void hpool_put_page(void *addr)
160 {
161 	hyp_put_page(&hpool, addr);
162 }
163 
164 static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
165 					 kvm_pte_t *ptep,
166 					 enum kvm_pgtable_walk_flags flag,
167 					 void * const arg)
168 {
169 	struct kvm_pgtable_mm_ops *mm_ops = arg;
170 	enum kvm_pgtable_prot prot;
171 	enum pkvm_page_state state;
172 	kvm_pte_t pte = *ptep;
173 	phys_addr_t phys;
174 
175 	if (!kvm_pte_valid(pte))
176 		return 0;
177 
178 	/*
179 	 * Fix-up the refcount for the page-table pages as the early allocator
180 	 * was unable to access the hyp_vmemmap and so the buddy allocator has
181 	 * initialised the refcount to '1'.
182 	 */
183 	mm_ops->get_page(ptep);
184 	if (flag != KVM_PGTABLE_WALK_LEAF)
185 		return 0;
186 
187 	if (level != (KVM_PGTABLE_MAX_LEVELS - 1))
188 		return -EINVAL;
189 
190 	phys = kvm_pte_to_phys(pte);
191 	if (!addr_is_memory(phys))
192 		return -EINVAL;
193 
194 	/*
195 	 * Adjust the host stage-2 mappings to match the ownership attributes
196 	 * configured in the hypervisor stage-1.
197 	 */
198 	state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
199 	switch (state) {
200 	case PKVM_PAGE_OWNED:
201 		return host_stage2_set_owner_locked(phys, PAGE_SIZE, pkvm_hyp_id);
202 	case PKVM_PAGE_SHARED_OWNED:
203 		prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
204 		break;
205 	case PKVM_PAGE_SHARED_BORROWED:
206 		prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
207 		break;
208 	default:
209 		return -EINVAL;
210 	}
211 
212 	return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
213 }
214 
215 static int finalize_host_mappings(void)
216 {
217 	struct kvm_pgtable_walker walker = {
218 		.cb	= finalize_host_mappings_walker,
219 		.flags	= KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
220 		.arg	= pkvm_pgtable.mm_ops,
221 	};
222 	int i, ret;
223 
224 	for (i = 0; i < hyp_memblock_nr; i++) {
225 		struct memblock_region *reg = &hyp_memory[i];
226 		u64 start = (u64)hyp_phys_to_virt(reg->base);
227 
228 		ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
229 		if (ret)
230 			return ret;
231 	}
232 
233 	return 0;
234 }
235 
236 void __noreturn __pkvm_init_finalise(void)
237 {
238 	struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
239 	struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt;
240 	unsigned long nr_pages, reserved_pages, pfn;
241 	int ret;
242 
243 	/* Now that the vmemmap is backed, install the full-fledged allocator */
244 	pfn = hyp_virt_to_pfn(hyp_pgt_base);
245 	nr_pages = hyp_s1_pgtable_pages();
246 	reserved_pages = hyp_early_alloc_nr_used_pages();
247 	ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
248 	if (ret)
249 		goto out;
250 
251 	ret = kvm_host_prepare_stage2(host_s2_pgt_base);
252 	if (ret)
253 		goto out;
254 
255 	pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
256 		.zalloc_page = hyp_zalloc_hyp_page,
257 		.phys_to_virt = hyp_phys_to_virt,
258 		.virt_to_phys = hyp_virt_to_phys,
259 		.get_page = hpool_get_page,
260 		.put_page = hpool_put_page,
261 		.page_count = hyp_page_count,
262 	};
263 	pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
264 
265 	ret = finalize_host_mappings();
266 	if (ret)
267 		goto out;
268 
269 out:
270 	/*
271 	 * We tail-called to here from handle___pkvm_init() and will not return,
272 	 * so make sure to propagate the return value to the host.
273 	 */
274 	cpu_reg(host_ctxt, 1) = ret;
275 
276 	__host_enter(host_ctxt);
277 }
278 
279 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
280 		unsigned long *per_cpu_base, u32 hyp_va_bits)
281 {
282 	struct kvm_nvhe_init_params *params;
283 	void *virt = hyp_phys_to_virt(phys);
284 	void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
285 	int ret;
286 
287 	BUG_ON(kvm_check_pvm_sysreg_table());
288 
289 	if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
290 		return -EINVAL;
291 
292 	hyp_spin_lock_init(&pkvm_pgd_lock);
293 	hyp_nr_cpus = nr_cpus;
294 
295 	ret = divide_memory_pool(virt, size);
296 	if (ret)
297 		return ret;
298 
299 	ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
300 	if (ret)
301 		return ret;
302 
303 	update_nvhe_init_params();
304 
305 	/* Jump in the idmap page to switch to the new page-tables */
306 	params = this_cpu_ptr(&kvm_init_params);
307 	fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
308 	fn(__hyp_pa(params), __pkvm_init_finalise);
309 
310 	unreachable();
311 }
312