xref: /linux/arch/arm64/include/asm/kvm_pgtable.h (revision 11e7861d680c3757eab18ec0a474ff680e007dc4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Will Deacon <will@kernel.org>
5  */
6 
7 #ifndef __ARM64_KVM_PGTABLE_H__
8 #define __ARM64_KVM_PGTABLE_H__
9 
10 #include <linux/bits.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13 
14 #define KVM_PGTABLE_FIRST_LEVEL		-1
15 #define KVM_PGTABLE_LAST_LEVEL		3
16 
17 /*
18  * The largest supported block sizes for KVM (no 52-bit PA support):
19  *  - 4K (level 1):	1GB
20  *  - 16K (level 2):	32MB
21  *  - 64K (level 2):	512MB
22  */
23 #ifdef CONFIG_ARM64_4K_PAGES
24 #define KVM_PGTABLE_MIN_BLOCK_LEVEL	1
25 #else
26 #define KVM_PGTABLE_MIN_BLOCK_LEVEL	2
27 #endif
28 
29 #define kvm_lpa2_is_enabled()		system_supports_lpa2()
30 
kvm_get_parange_max(void)31 static inline u64 kvm_get_parange_max(void)
32 {
33 	if (kvm_lpa2_is_enabled() ||
34 	   (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16))
35 		return ID_AA64MMFR0_EL1_PARANGE_52;
36 	else
37 		return ID_AA64MMFR0_EL1_PARANGE_48;
38 }
39 
kvm_get_parange(u64 mmfr0)40 static inline u64 kvm_get_parange(u64 mmfr0)
41 {
42 	u64 parange_max = kvm_get_parange_max();
43 	u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
44 				ID_AA64MMFR0_EL1_PARANGE_SHIFT);
45 	if (parange > parange_max)
46 		parange = parange_max;
47 
48 	return parange;
49 }
50 
51 typedef u64 kvm_pte_t;
52 
53 #define KVM_PTE_VALID			BIT(0)
54 
55 #define KVM_PTE_ADDR_MASK		GENMASK(47, PAGE_SHIFT)
56 #define KVM_PTE_ADDR_51_48		GENMASK(15, 12)
57 #define KVM_PTE_ADDR_MASK_LPA2		GENMASK(49, PAGE_SHIFT)
58 #define KVM_PTE_ADDR_51_50_LPA2		GENMASK(9, 8)
59 
60 #define KVM_PHYS_INVALID		(-1ULL)
61 
62 #define KVM_PTE_TYPE			BIT(1)
63 #define KVM_PTE_TYPE_BLOCK		0
64 #define KVM_PTE_TYPE_PAGE		1
65 #define KVM_PTE_TYPE_TABLE		1
66 
67 #define KVM_PTE_LEAF_ATTR_LO		GENMASK(11, 2)
68 
69 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX	GENMASK(4, 2)
70 #define KVM_PTE_LEAF_ATTR_LO_S1_AP	GENMASK(7, 6)
71 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO		\
72 	({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; })
73 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW		\
74 	({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; })
75 #define KVM_PTE_LEAF_ATTR_LO_S1_SH	GENMASK(9, 8)
76 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS	3
77 #define KVM_PTE_LEAF_ATTR_LO_S1_AF	BIT(10)
78 
79 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR	GENMASK(5, 2)
80 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R	BIT(6)
81 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W	BIT(7)
82 #define KVM_PTE_LEAF_ATTR_LO_S2_SH	GENMASK(9, 8)
83 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS	3
84 #define KVM_PTE_LEAF_ATTR_LO_S2_AF	BIT(10)
85 
86 #define KVM_PTE_LEAF_ATTR_HI		GENMASK(63, 50)
87 
88 #define KVM_PTE_LEAF_ATTR_HI_SW		GENMASK(58, 55)
89 
90 #define KVM_PTE_LEAF_ATTR_HI_S1_XN	BIT(54)
91 
92 #define KVM_PTE_LEAF_ATTR_HI_S2_XN	BIT(54)
93 
94 #define KVM_PTE_LEAF_ATTR_HI_S1_GP	BIT(50)
95 
96 #define KVM_PTE_LEAF_ATTR_S2_PERMS	(KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
97 					 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
98 					 KVM_PTE_LEAF_ATTR_HI_S2_XN)
99 
100 #define KVM_INVALID_PTE_OWNER_MASK	GENMASK(9, 2)
101 #define KVM_MAX_OWNER_ID		1
102 
103 /*
104  * Used to indicate a pte for which a 'break-before-make' sequence is in
105  * progress.
106  */
107 #define KVM_INVALID_PTE_LOCKED		BIT(10)
108 
kvm_pte_valid(kvm_pte_t pte)109 static inline bool kvm_pte_valid(kvm_pte_t pte)
110 {
111 	return pte & KVM_PTE_VALID;
112 }
113 
kvm_pte_to_phys(kvm_pte_t pte)114 static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
115 {
116 	u64 pa;
117 
118 	if (kvm_lpa2_is_enabled()) {
119 		pa = pte & KVM_PTE_ADDR_MASK_LPA2;
120 		pa |= FIELD_GET(KVM_PTE_ADDR_51_50_LPA2, pte) << 50;
121 	} else {
122 		pa = pte & KVM_PTE_ADDR_MASK;
123 		if (PAGE_SHIFT == 16)
124 			pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
125 	}
126 
127 	return pa;
128 }
129 
kvm_phys_to_pte(u64 pa)130 static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
131 {
132 	kvm_pte_t pte;
133 
134 	if (kvm_lpa2_is_enabled()) {
135 		pte = pa & KVM_PTE_ADDR_MASK_LPA2;
136 		pa &= GENMASK(51, 50);
137 		pte |= FIELD_PREP(KVM_PTE_ADDR_51_50_LPA2, pa >> 50);
138 	} else {
139 		pte = pa & KVM_PTE_ADDR_MASK;
140 		if (PAGE_SHIFT == 16) {
141 			pa &= GENMASK(51, 48);
142 			pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
143 		}
144 	}
145 
146 	return pte;
147 }
148 
kvm_pte_to_pfn(kvm_pte_t pte)149 static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte)
150 {
151 	return __phys_to_pfn(kvm_pte_to_phys(pte));
152 }
153 
kvm_granule_shift(s8 level)154 static inline u64 kvm_granule_shift(s8 level)
155 {
156 	/* Assumes KVM_PGTABLE_LAST_LEVEL is 3 */
157 	return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
158 }
159 
kvm_granule_size(s8 level)160 static inline u64 kvm_granule_size(s8 level)
161 {
162 	return BIT(kvm_granule_shift(level));
163 }
164 
kvm_level_supports_block_mapping(s8 level)165 static inline bool kvm_level_supports_block_mapping(s8 level)
166 {
167 	return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
168 }
169 
kvm_supported_block_sizes(void)170 static inline u32 kvm_supported_block_sizes(void)
171 {
172 	s8 level = KVM_PGTABLE_MIN_BLOCK_LEVEL;
173 	u32 r = 0;
174 
175 	for (; level <= KVM_PGTABLE_LAST_LEVEL; level++)
176 		r |= BIT(kvm_granule_shift(level));
177 
178 	return r;
179 }
180 
kvm_is_block_size_supported(u64 size)181 static inline bool kvm_is_block_size_supported(u64 size)
182 {
183 	bool is_power_of_two = IS_ALIGNED(size, size);
184 
185 	return is_power_of_two && (size & kvm_supported_block_sizes());
186 }
187 
188 /**
189  * struct kvm_pgtable_mm_ops - Memory management callbacks.
190  * @zalloc_page:		Allocate a single zeroed memory page.
191  *				The @arg parameter can be used by the walker
192  *				to pass a memcache. The initial refcount of
193  *				the page is 1.
194  * @zalloc_pages_exact:		Allocate an exact number of zeroed memory pages.
195  *				The @size parameter is in bytes, and is rounded
196  *				up to the next page boundary. The resulting
197  *				allocation is physically contiguous.
198  * @free_pages_exact:		Free an exact number of memory pages previously
199  *				allocated by zalloc_pages_exact.
200  * @free_unlinked_table:	Free an unlinked paging structure by unlinking and
201  *				dropping references.
202  * @get_page:			Increment the refcount on a page.
203  * @put_page:			Decrement the refcount on a page. When the
204  *				refcount reaches 0 the page is automatically
205  *				freed.
206  * @page_count:			Return the refcount of a page.
207  * @phys_to_virt:		Convert a physical address into a virtual
208  *				address	mapped in the current context.
209  * @virt_to_phys:		Convert a virtual address mapped in the current
210  *				context into a physical address.
211  * @dcache_clean_inval_poc:	Clean and invalidate the data cache to the PoC
212  *				for the	specified memory address range.
213  * @icache_inval_pou:		Invalidate the instruction cache to the PoU
214  *				for the specified memory address range.
215  */
216 struct kvm_pgtable_mm_ops {
217 	void*		(*zalloc_page)(void *arg);
218 	void*		(*zalloc_pages_exact)(size_t size);
219 	void		(*free_pages_exact)(void *addr, size_t size);
220 	void		(*free_unlinked_table)(void *addr, s8 level);
221 	void		(*get_page)(void *addr);
222 	void		(*put_page)(void *addr);
223 	int		(*page_count)(void *addr);
224 	void*		(*phys_to_virt)(phys_addr_t phys);
225 	phys_addr_t	(*virt_to_phys)(void *addr);
226 	void		(*dcache_clean_inval_poc)(void *addr, size_t size);
227 	void		(*icache_inval_pou)(void *addr, size_t size);
228 };
229 
230 /**
231  * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
232  * @KVM_PGTABLE_S2_NOFWB:	Don't enforce Normal-WB even if the CPUs have
233  *				ARM64_HAS_STAGE2_FWB.
234  * @KVM_PGTABLE_S2_IDMAP:	Only use identity mappings.
235  */
236 enum kvm_pgtable_stage2_flags {
237 	KVM_PGTABLE_S2_NOFWB			= BIT(0),
238 	KVM_PGTABLE_S2_IDMAP			= BIT(1),
239 };
240 
241 /**
242  * enum kvm_pgtable_prot - Page-table permissions and attributes.
243  * @KVM_PGTABLE_PROT_X:		Execute permission.
244  * @KVM_PGTABLE_PROT_W:		Write permission.
245  * @KVM_PGTABLE_PROT_R:		Read permission.
246  * @KVM_PGTABLE_PROT_DEVICE:	Device attributes.
247  * @KVM_PGTABLE_PROT_NORMAL_NC:	Normal noncacheable attributes.
248  * @KVM_PGTABLE_PROT_SW0:	Software bit 0.
249  * @KVM_PGTABLE_PROT_SW1:	Software bit 1.
250  * @KVM_PGTABLE_PROT_SW2:	Software bit 2.
251  * @KVM_PGTABLE_PROT_SW3:	Software bit 3.
252  */
253 enum kvm_pgtable_prot {
254 	KVM_PGTABLE_PROT_X			= BIT(0),
255 	KVM_PGTABLE_PROT_W			= BIT(1),
256 	KVM_PGTABLE_PROT_R			= BIT(2),
257 
258 	KVM_PGTABLE_PROT_DEVICE			= BIT(3),
259 	KVM_PGTABLE_PROT_NORMAL_NC		= BIT(4),
260 
261 	KVM_PGTABLE_PROT_SW0			= BIT(55),
262 	KVM_PGTABLE_PROT_SW1			= BIT(56),
263 	KVM_PGTABLE_PROT_SW2			= BIT(57),
264 	KVM_PGTABLE_PROT_SW3			= BIT(58),
265 };
266 
267 #define KVM_PGTABLE_PROT_RW	(KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
268 #define KVM_PGTABLE_PROT_RWX	(KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X)
269 
270 #define PKVM_HOST_MEM_PROT	KVM_PGTABLE_PROT_RWX
271 #define PKVM_HOST_MMIO_PROT	KVM_PGTABLE_PROT_RW
272 
273 #define PAGE_HYP		KVM_PGTABLE_PROT_RW
274 #define PAGE_HYP_EXEC		(KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X)
275 #define PAGE_HYP_RO		(KVM_PGTABLE_PROT_R)
276 #define PAGE_HYP_DEVICE		(PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
277 
278 typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
279 					   enum kvm_pgtable_prot prot);
280 
281 /**
282  * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
283  * @KVM_PGTABLE_WALK_LEAF:		Visit leaf entries, including invalid
284  *					entries.
285  * @KVM_PGTABLE_WALK_TABLE_PRE:		Visit table entries before their
286  *					children.
287  * @KVM_PGTABLE_WALK_TABLE_POST:	Visit table entries after their
288  *					children.
289  * @KVM_PGTABLE_WALK_SHARED:		Indicates the page-tables may be shared
290  *					with other software walkers.
291  * @KVM_PGTABLE_WALK_HANDLE_FAULT:	Indicates the page-table walk was
292  *					invoked from a fault handler.
293  * @KVM_PGTABLE_WALK_SKIP_BBM_TLBI:	Visit and update table entries
294  *					without Break-before-make's
295  *					TLB invalidation.
296  * @KVM_PGTABLE_WALK_SKIP_CMO:		Visit and update table entries
297  *					without Cache maintenance
298  *					operations required.
299  */
300 enum kvm_pgtable_walk_flags {
301 	KVM_PGTABLE_WALK_LEAF			= BIT(0),
302 	KVM_PGTABLE_WALK_TABLE_PRE		= BIT(1),
303 	KVM_PGTABLE_WALK_TABLE_POST		= BIT(2),
304 	KVM_PGTABLE_WALK_SHARED			= BIT(3),
305 	KVM_PGTABLE_WALK_HANDLE_FAULT		= BIT(4),
306 	KVM_PGTABLE_WALK_SKIP_BBM_TLBI		= BIT(5),
307 	KVM_PGTABLE_WALK_SKIP_CMO		= BIT(6),
308 };
309 
310 struct kvm_pgtable_visit_ctx {
311 	kvm_pte_t				*ptep;
312 	kvm_pte_t				old;
313 	void					*arg;
314 	struct kvm_pgtable_mm_ops		*mm_ops;
315 	u64					start;
316 	u64					addr;
317 	u64					end;
318 	s8					level;
319 	enum kvm_pgtable_walk_flags		flags;
320 };
321 
322 typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *ctx,
323 					enum kvm_pgtable_walk_flags visit);
324 
kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx * ctx)325 static inline bool kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx *ctx)
326 {
327 	return ctx->flags & KVM_PGTABLE_WALK_SHARED;
328 }
329 
330 /**
331  * struct kvm_pgtable_walker - Hook into a page-table walk.
332  * @cb:		Callback function to invoke during the walk.
333  * @arg:	Argument passed to the callback function.
334  * @flags:	Bitwise-OR of flags to identify the entry types on which to
335  *		invoke the callback function.
336  */
337 struct kvm_pgtable_walker {
338 	const kvm_pgtable_visitor_fn_t		cb;
339 	void * const				arg;
340 	const enum kvm_pgtable_walk_flags	flags;
341 };
342 
343 /*
344  * RCU cannot be used in a non-kernel context such as the hyp. As such, page
345  * table walkers used in hyp do not call into RCU and instead use other
346  * synchronization mechanisms (such as a spinlock).
347  */
348 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
349 
350 typedef kvm_pte_t *kvm_pteref_t;
351 
kvm_dereference_pteref(struct kvm_pgtable_walker * walker,kvm_pteref_t pteref)352 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
353 						kvm_pteref_t pteref)
354 {
355 	return pteref;
356 }
357 
kvm_dereference_pteref_raw(kvm_pteref_t pteref)358 static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
359 {
360 	return pteref;
361 }
362 
kvm_pgtable_walk_begin(struct kvm_pgtable_walker * walker)363 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
364 {
365 	/*
366 	 * Due to the lack of RCU (or a similar protection scheme), only
367 	 * non-shared table walkers are allowed in the hypervisor.
368 	 */
369 	if (walker->flags & KVM_PGTABLE_WALK_SHARED)
370 		return -EPERM;
371 
372 	return 0;
373 }
374 
kvm_pgtable_walk_end(struct kvm_pgtable_walker * walker)375 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {}
376 
kvm_pgtable_walk_lock_held(void)377 static inline bool kvm_pgtable_walk_lock_held(void)
378 {
379 	return true;
380 }
381 
382 #else
383 
384 typedef kvm_pte_t __rcu *kvm_pteref_t;
385 
kvm_dereference_pteref(struct kvm_pgtable_walker * walker,kvm_pteref_t pteref)386 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
387 						kvm_pteref_t pteref)
388 {
389 	return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
390 }
391 
kvm_dereference_pteref_raw(kvm_pteref_t pteref)392 static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
393 {
394 	return rcu_dereference_raw(pteref);
395 }
396 
kvm_pgtable_walk_begin(struct kvm_pgtable_walker * walker)397 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
398 {
399 	if (walker->flags & KVM_PGTABLE_WALK_SHARED)
400 		rcu_read_lock();
401 
402 	return 0;
403 }
404 
kvm_pgtable_walk_end(struct kvm_pgtable_walker * walker)405 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker)
406 {
407 	if (walker->flags & KVM_PGTABLE_WALK_SHARED)
408 		rcu_read_unlock();
409 }
410 
kvm_pgtable_walk_lock_held(void)411 static inline bool kvm_pgtable_walk_lock_held(void)
412 {
413 	return rcu_read_lock_held();
414 }
415 
416 #endif
417 
418 /**
419  * struct kvm_pgtable - KVM page-table.
420  * @ia_bits:		Maximum input address size, in bits.
421  * @start_level:	Level at which the page-table walk starts.
422  * @pgd:		Pointer to the first top-level entry of the page-table.
423  * @mm_ops:		Memory management callbacks.
424  * @mmu:		Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
425  * @flags:		Stage-2 page-table flags.
426  * @force_pte_cb:	Function that returns true if page level mappings must
427  *			be used instead of block mappings.
428  */
429 struct kvm_pgtable {
430 	union {
431 		struct rb_root_cached				pkvm_mappings;
432 		struct {
433 			u32					ia_bits;
434 			s8					start_level;
435 			kvm_pteref_t				pgd;
436 			struct kvm_pgtable_mm_ops		*mm_ops;
437 
438 			/* Stage-2 only */
439 			enum kvm_pgtable_stage2_flags		flags;
440 			kvm_pgtable_force_pte_cb_t		force_pte_cb;
441 		};
442 	};
443 	struct kvm_s2_mmu					*mmu;
444 };
445 
446 /**
447  * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
448  * @pgt:	Uninitialised page-table structure to initialise.
449  * @va_bits:	Maximum virtual address bits.
450  * @mm_ops:	Memory management callbacks.
451  *
452  * Return: 0 on success, negative error code on failure.
453  */
454 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
455 			 struct kvm_pgtable_mm_ops *mm_ops);
456 
457 /**
458  * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table.
459  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
460  *
461  * The page-table is assumed to be unreachable by any hardware walkers prior
462  * to freeing and therefore no TLB invalidation is performed.
463  */
464 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
465 
466 /**
467  * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table.
468  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
469  * @addr:	Virtual address at which to place the mapping.
470  * @size:	Size of the mapping.
471  * @phys:	Physical address of the memory to map.
472  * @prot:	Permissions and attributes for the mapping.
473  *
474  * The offset of @addr within a page is ignored, @size is rounded-up to
475  * the next page boundary and @phys is rounded-down to the previous page
476  * boundary.
477  *
478  * If device attributes are not explicitly requested in @prot, then the
479  * mapping will be normal, cacheable. Attempts to install a new mapping
480  * for a virtual address that is already mapped will be rejected with an
481  * error and a WARN().
482  *
483  * Return: 0 on success, negative error code on failure.
484  */
485 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
486 			enum kvm_pgtable_prot prot);
487 
488 /**
489  * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
490  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
491  * @addr:	Virtual address from which to remove the mapping.
492  * @size:	Size of the mapping.
493  *
494  * The offset of @addr within a page is ignored, @size is rounded-up to
495  * the next page boundary and @phys is rounded-down to the previous page
496  * boundary.
497  *
498  * TLB invalidation is performed for each page-table entry cleared during the
499  * unmapping operation and the reference count for the page-table page
500  * containing the cleared entry is decremented, with unreferenced pages being
501  * freed. The unmapping operation will stop early if it encounters either an
502  * invalid page-table entry or a valid block mapping which maps beyond the range
503  * being unmapped.
504  *
505  * Return: Number of bytes unmapped, which may be 0.
506  */
507 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
508 
509 /**
510  * kvm_get_vtcr() - Helper to construct VTCR_EL2
511  * @mmfr0:	Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
512  * @mmfr1:	Sanitized value of SYS_ID_AA64MMFR1_EL1 register.
513  * @phys_shfit:	Value to set in VTCR_EL2.T0SZ.
514  *
515  * The VTCR value is common across all the physical CPUs on the system.
516  * We use system wide sanitised values to fill in different fields,
517  * except for Hardware Management of Access Flags. HA Flag is set
518  * unconditionally on all CPUs, as it is safe to run with or without
519  * the feature and the bit is RES0 on CPUs that don't support it.
520  *
521  * Return: VTCR_EL2 value
522  */
523 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
524 
525 /**
526  * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD
527  * @vtcr:	Content of the VTCR register.
528  *
529  * Return: the size (in bytes) of the stage-2 PGD
530  */
531 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr);
532 
533 /**
534  * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
535  * @pgt:	Uninitialised page-table structure to initialise.
536  * @mmu:	S2 MMU context for this S2 translation
537  * @mm_ops:	Memory management callbacks.
538  * @flags:	Stage-2 configuration flags.
539  * @force_pte_cb: Function that returns true if page level mappings must
540  *		be used instead of block mappings.
541  *
542  * Return: 0 on success, negative error code on failure.
543  */
544 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
545 			      struct kvm_pgtable_mm_ops *mm_ops,
546 			      enum kvm_pgtable_stage2_flags flags,
547 			      kvm_pgtable_force_pte_cb_t force_pte_cb);
548 
kvm_pgtable_stage2_init(struct kvm_pgtable * pgt,struct kvm_s2_mmu * mmu,struct kvm_pgtable_mm_ops * mm_ops)549 static inline int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
550 					  struct kvm_pgtable_mm_ops *mm_ops)
551 {
552 	return __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL);
553 }
554 
555 /**
556  * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
557  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
558  *
559  * The page-table is assumed to be unreachable by any hardware walkers prior
560  * to freeing and therefore no TLB invalidation is performed.
561  */
562 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
563 
564 /**
565  * kvm_pgtable_stage2_destroy_range() - Destroy the unlinked range of addresses.
566  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
567  * @addr:      Intermediate physical address at which to place the mapping.
568  * @size:      Size of the mapping.
569  *
570  * The page-table is assumed to be unreachable by any hardware walkers prior
571  * to freeing and therefore no TLB invalidation is performed.
572  */
573 void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
574 					u64 addr, u64 size);
575 
576 /**
577  * kvm_pgtable_stage2_destroy_pgd() - Destroy the PGD of guest stage-2 page-table.
578  * @pgt:       Page-table structure initialised by kvm_pgtable_stage2_init*().
579  *
580  * It is assumed that the rest of the page-table is freed before this operation.
581  */
582 void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
583 
584 /**
585  * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
586  * @mm_ops:	Memory management callbacks.
587  * @pgtable:	Unlinked stage-2 paging structure to be freed.
588  * @level:	Level of the stage-2 paging structure to be freed.
589  *
590  * The page-table is assumed to be unreachable by any hardware walkers prior to
591  * freeing and therefore no TLB invalidation is performed.
592  */
593 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level);
594 
595 /**
596  * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure.
597  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
598  * @phys:	Physical address of the memory to map.
599  * @level:	Starting level of the stage-2 paging structure to be created.
600  * @prot:	Permissions and attributes for the mapping.
601  * @mc:		Cache of pre-allocated and zeroed memory from which to allocate
602  *		page-table pages.
603  * @force_pte:  Force mappings to PAGE_SIZE granularity.
604  *
605  * Returns an unlinked page-table tree.  This new page-table tree is
606  * not reachable (i.e., it is unlinked) from the root pgd and it's
607  * therefore unreachableby the hardware page-table walker. No TLB
608  * invalidation or CMOs are performed.
609  *
610  * If device attributes are not explicitly requested in @prot, then the
611  * mapping will be normal, cacheable.
612  *
613  * Return: The fully populated (unlinked) stage-2 paging structure, or
614  * an ERR_PTR(error) on failure.
615  */
616 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
617 					      u64 phys, s8 level,
618 					      enum kvm_pgtable_prot prot,
619 					      void *mc, bool force_pte);
620 
621 /**
622  * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
623  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
624  * @addr:	Intermediate physical address at which to place the mapping.
625  * @size:	Size of the mapping.
626  * @phys:	Physical address of the memory to map.
627  * @prot:	Permissions and attributes for the mapping.
628  * @mc:		Cache of pre-allocated and zeroed memory from which to allocate
629  *		page-table pages.
630  * @flags:	Flags to control the page-table walk (ex. a shared walk)
631  *
632  * The offset of @addr within a page is ignored, @size is rounded-up to
633  * the next page boundary and @phys is rounded-down to the previous page
634  * boundary.
635  *
636  * If device attributes are not explicitly requested in @prot, then the
637  * mapping will be normal, cacheable.
638  *
639  * Note that the update of a valid leaf PTE in this function will be aborted,
640  * if it's trying to recreate the exact same mapping or only change the access
641  * permissions. Instead, the vCPU will exit one more time from guest if still
642  * needed and then go through the path of relaxing permissions.
643  *
644  * Note that this function will both coalesce existing table entries and split
645  * existing block mappings, relying on page-faults to fault back areas outside
646  * of the new mapping lazily.
647  *
648  * Return: 0 on success, negative error code on failure.
649  */
650 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
651 			   u64 phys, enum kvm_pgtable_prot prot,
652 			   void *mc, enum kvm_pgtable_walk_flags flags);
653 
654 /**
655  * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
656  *				    track ownership.
657  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
658  * @addr:	Base intermediate physical address to annotate.
659  * @size:	Size of the annotated range.
660  * @mc:		Cache of pre-allocated and zeroed memory from which to allocate
661  *		page-table pages.
662  * @owner_id:	Unique identifier for the owner of the page.
663  *
664  * By default, all page-tables are owned by identifier 0. This function can be
665  * used to mark portions of the IPA space as owned by other entities. When a
666  * stage 2 is used with identity-mappings, these annotations allow to use the
667  * page-table data structure as a simple rmap.
668  *
669  * Return: 0 on success, negative error code on failure.
670  */
671 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
672 				 void *mc, u8 owner_id);
673 
674 /**
675  * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
676  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
677  * @addr:	Intermediate physical address from which to remove the mapping.
678  * @size:	Size of the mapping.
679  *
680  * The offset of @addr within a page is ignored and @size is rounded-up to
681  * the next page boundary.
682  *
683  * TLB invalidation is performed for each page-table entry cleared during the
684  * unmapping operation and the reference count for the page-table page
685  * containing the cleared entry is decremented, with unreferenced pages being
686  * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if
687  * FWB is not supported by the CPU.
688  *
689  * Return: 0 on success, negative error code on failure.
690  */
691 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
692 
693 /**
694  * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
695  *                                  without TLB invalidation.
696  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
697  * @addr:	Intermediate physical address from which to write-protect,
698  * @size:	Size of the range.
699  *
700  * The offset of @addr within a page is ignored and @size is rounded-up to
701  * the next page boundary.
702  *
703  * Note that it is the caller's responsibility to invalidate the TLB after
704  * calling this function to ensure that the updated permissions are visible
705  * to the CPUs.
706  *
707  * Return: 0 on success, negative error code on failure.
708  */
709 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
710 
711 /**
712  * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
713  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
714  * @addr:	Intermediate physical address to identify the page-table entry.
715  * @flags:	Flags to control the page-table walk (ex. a shared walk)
716  *
717  * The offset of @addr within a page is ignored.
718  *
719  * If there is a valid, leaf page-table entry used to translate @addr, then
720  * set the access flag in that entry.
721  */
722 void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
723 				enum kvm_pgtable_walk_flags flags);
724 
725 /**
726  * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
727  *					   flag in a page-table entry.
728  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
729  * @addr:	Intermediate physical address to identify the page-table entry.
730  * @size:	Size of the address range to visit.
731  * @mkold:	True if the access flag should be cleared.
732  *
733  * The offset of @addr within a page is ignored.
734  *
735  * Tests and conditionally clears the access flag for every valid, leaf
736  * page-table entry used to translate the range [@addr, @addr + @size).
737  *
738  * Note that it is the caller's responsibility to invalidate the TLB after
739  * calling this function to ensure that the updated permissions are visible
740  * to the CPUs.
741  *
742  * Return: True if any of the visited PTEs had the access flag set.
743  */
744 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
745 					 u64 size, bool mkold);
746 
747 /**
748  * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
749  *				      page-table entry.
750  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
751  * @addr:	Intermediate physical address to identify the page-table entry.
752  * @prot:	Additional permissions to grant for the mapping.
753  * @flags:	Flags to control the page-table walk (ex. a shared walk)
754  *
755  * The offset of @addr within a page is ignored.
756  *
757  * If there is a valid, leaf page-table entry used to translate @addr, then
758  * relax the permissions in that entry according to the read, write and
759  * execute permissions specified by @prot. No permissions are removed, and
760  * TLB invalidation is performed after updating the entry. Software bits cannot
761  * be set or cleared using kvm_pgtable_stage2_relax_perms().
762  *
763  * Return: 0 on success, negative error code on failure.
764  */
765 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
766 				   enum kvm_pgtable_prot prot,
767 				   enum kvm_pgtable_walk_flags flags);
768 
769 /**
770  * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
771  * 				      of Coherency for guest stage-2 address
772  *				      range.
773  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
774  * @addr:	Intermediate physical address from which to flush.
775  * @size:	Size of the range.
776  *
777  * The offset of @addr within a page is ignored and @size is rounded-up to
778  * the next page boundary.
779  *
780  * Return: 0 on success, negative error code on failure.
781  */
782 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
783 
784 /**
785  * kvm_pgtable_stage2_split() - Split a range of huge pages into leaf PTEs pointing
786  *				to PAGE_SIZE guest pages.
787  * @pgt:	 Page-table structure initialised by kvm_pgtable_stage2_init().
788  * @addr:	 Intermediate physical address from which to split.
789  * @size:	 Size of the range.
790  * @mc:		 Cache of pre-allocated and zeroed memory from which to allocate
791  *		 page-table pages.
792  *
793  * The function tries to split any level 1 or 2 entry that overlaps
794  * with the input range (given by @addr and @size).
795  *
796  * Return: 0 on success, negative error code on failure. Note that
797  * kvm_pgtable_stage2_split() is best effort: it tries to break as many
798  * blocks in the input range as allowed by @mc_capacity.
799  */
800 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
801 			     struct kvm_mmu_memory_cache *mc);
802 
803 /**
804  * kvm_pgtable_walk() - Walk a page-table.
805  * @pgt:	Page-table structure initialised by kvm_pgtable_*_init().
806  * @addr:	Input address for the start of the walk.
807  * @size:	Size of the range to walk.
808  * @walker:	Walker callback description.
809  *
810  * The offset of @addr within a page is ignored and @size is rounded-up to
811  * the next page boundary.
812  *
813  * The walker will walk the page-table entries corresponding to the input
814  * address range specified, visiting entries according to the walker flags.
815  * Invalid entries are treated as leaf entries. The visited page table entry is
816  * reloaded after invoking the walker callback, allowing the walker to descend
817  * into a newly installed table.
818  *
819  * Returning a negative error code from the walker callback function will
820  * terminate the walk immediately with the same error code.
821  *
822  * Return: 0 on success, negative error code on failure.
823  */
824 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
825 		     struct kvm_pgtable_walker *walker);
826 
827 /**
828  * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry
829  *			    with its level.
830  * @pgt:	Page-table structure initialised by kvm_pgtable_*_init()
831  *		or a similar initialiser.
832  * @addr:	Input address for the start of the walk.
833  * @ptep:	Pointer to storage for the retrieved PTE.
834  * @level:	Pointer to storage for the level of the retrieved PTE.
835  *
836  * The offset of @addr within a page is ignored.
837  *
838  * The walker will walk the page-table entries corresponding to the input
839  * address specified, retrieving the leaf corresponding to this address.
840  * Invalid entries are treated as leaf entries.
841  *
842  * Return: 0 on success, negative error code on failure.
843  */
844 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
845 			 kvm_pte_t *ptep, s8 *level);
846 
847 /**
848  * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
849  *				   stage-2 Page-Table Entry.
850  * @pte:	Page-table entry
851  *
852  * Return: protection attributes of the page-table entry in the enum
853  *	   kvm_pgtable_prot format.
854  */
855 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
856 
857 /**
858  * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1
859  *				Page-Table Entry.
860  * @pte:	Page-table entry
861  *
862  * Return: protection attributes of the page-table entry in the enum
863  *	   kvm_pgtable_prot format.
864  */
865 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
866 
867 /**
868  * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries
869  *
870  * @mmu:	Stage-2 KVM MMU struct
871  * @addr:	The base Intermediate physical address from which to invalidate
872  * @size:	Size of the range from the base to invalidate
873  */
874 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
875 				phys_addr_t addr, size_t size);
876 #endif	/* __ARM64_KVM_PGTABLE_H__ */
877