xref: /linux/arch/arm64/include/asm/kvm_pgtable.h (revision 1b1934dbbdcf9aa2d507932ff488cec47999cf3f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Will Deacon <will@kernel.org>
5  */
6 
7 #ifndef __ARM64_KVM_PGTABLE_H__
8 #define __ARM64_KVM_PGTABLE_H__
9 
10 #include <linux/bits.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13 
14 #define KVM_PGTABLE_MAX_LEVELS		4U
15 
16 /*
17  * The largest supported block sizes for KVM (no 52-bit PA support):
18  *  - 4K (level 1):	1GB
19  *  - 16K (level 2):	32MB
20  *  - 64K (level 2):	512MB
21  */
22 #ifdef CONFIG_ARM64_4K_PAGES
23 #define KVM_PGTABLE_MIN_BLOCK_LEVEL	1U
24 #else
25 #define KVM_PGTABLE_MIN_BLOCK_LEVEL	2U
26 #endif
27 
28 #define kvm_lpa2_is_enabled()		false
29 
30 static inline u64 kvm_get_parange(u64 mmfr0)
31 {
32 	u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
33 				ID_AA64MMFR0_EL1_PARANGE_SHIFT);
34 	if (parange > ID_AA64MMFR0_EL1_PARANGE_MAX)
35 		parange = ID_AA64MMFR0_EL1_PARANGE_MAX;
36 
37 	return parange;
38 }
39 
40 typedef u64 kvm_pte_t;
41 
42 #define KVM_PTE_VALID			BIT(0)
43 
44 #define KVM_PTE_ADDR_MASK		GENMASK(47, PAGE_SHIFT)
45 #define KVM_PTE_ADDR_51_48		GENMASK(15, 12)
46 
47 #define KVM_PHYS_INVALID		(-1ULL)
48 
49 static inline bool kvm_pte_valid(kvm_pte_t pte)
50 {
51 	return pte & KVM_PTE_VALID;
52 }
53 
54 static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
55 {
56 	u64 pa = pte & KVM_PTE_ADDR_MASK;
57 
58 	if (PAGE_SHIFT == 16)
59 		pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
60 
61 	return pa;
62 }
63 
64 static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
65 {
66 	kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
67 
68 	if (PAGE_SHIFT == 16) {
69 		pa &= GENMASK(51, 48);
70 		pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
71 	}
72 
73 	return pte;
74 }
75 
76 static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte)
77 {
78 	return __phys_to_pfn(kvm_pte_to_phys(pte));
79 }
80 
81 static inline u64 kvm_granule_shift(u32 level)
82 {
83 	/* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
84 	return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
85 }
86 
87 static inline u64 kvm_granule_size(u32 level)
88 {
89 	return BIT(kvm_granule_shift(level));
90 }
91 
92 static inline bool kvm_level_supports_block_mapping(u32 level)
93 {
94 	return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
95 }
96 
97 static inline u32 kvm_supported_block_sizes(void)
98 {
99 	u32 level = KVM_PGTABLE_MIN_BLOCK_LEVEL;
100 	u32 r = 0;
101 
102 	for (; level < KVM_PGTABLE_MAX_LEVELS; level++)
103 		r |= BIT(kvm_granule_shift(level));
104 
105 	return r;
106 }
107 
108 static inline bool kvm_is_block_size_supported(u64 size)
109 {
110 	bool is_power_of_two = IS_ALIGNED(size, size);
111 
112 	return is_power_of_two && (size & kvm_supported_block_sizes());
113 }
114 
115 /**
116  * struct kvm_pgtable_mm_ops - Memory management callbacks.
117  * @zalloc_page:		Allocate a single zeroed memory page.
118  *				The @arg parameter can be used by the walker
119  *				to pass a memcache. The initial refcount of
120  *				the page is 1.
121  * @zalloc_pages_exact:		Allocate an exact number of zeroed memory pages.
122  *				The @size parameter is in bytes, and is rounded
123  *				up to the next page boundary. The resulting
124  *				allocation is physically contiguous.
125  * @free_pages_exact:		Free an exact number of memory pages previously
126  *				allocated by zalloc_pages_exact.
127  * @free_unlinked_table:	Free an unlinked paging structure by unlinking and
128  *				dropping references.
129  * @get_page:			Increment the refcount on a page.
130  * @put_page:			Decrement the refcount on a page. When the
131  *				refcount reaches 0 the page is automatically
132  *				freed.
133  * @page_count:			Return the refcount of a page.
134  * @phys_to_virt:		Convert a physical address into a virtual
135  *				address	mapped in the current context.
136  * @virt_to_phys:		Convert a virtual address mapped in the current
137  *				context into a physical address.
138  * @dcache_clean_inval_poc:	Clean and invalidate the data cache to the PoC
139  *				for the	specified memory address range.
140  * @icache_inval_pou:		Invalidate the instruction cache to the PoU
141  *				for the specified memory address range.
142  */
143 struct kvm_pgtable_mm_ops {
144 	void*		(*zalloc_page)(void *arg);
145 	void*		(*zalloc_pages_exact)(size_t size);
146 	void		(*free_pages_exact)(void *addr, size_t size);
147 	void		(*free_unlinked_table)(void *addr, u32 level);
148 	void		(*get_page)(void *addr);
149 	void		(*put_page)(void *addr);
150 	int		(*page_count)(void *addr);
151 	void*		(*phys_to_virt)(phys_addr_t phys);
152 	phys_addr_t	(*virt_to_phys)(void *addr);
153 	void		(*dcache_clean_inval_poc)(void *addr, size_t size);
154 	void		(*icache_inval_pou)(void *addr, size_t size);
155 };
156 
157 /**
158  * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
159  * @KVM_PGTABLE_S2_NOFWB:	Don't enforce Normal-WB even if the CPUs have
160  *				ARM64_HAS_STAGE2_FWB.
161  * @KVM_PGTABLE_S2_IDMAP:	Only use identity mappings.
162  */
163 enum kvm_pgtable_stage2_flags {
164 	KVM_PGTABLE_S2_NOFWB			= BIT(0),
165 	KVM_PGTABLE_S2_IDMAP			= BIT(1),
166 };
167 
168 /**
169  * enum kvm_pgtable_prot - Page-table permissions and attributes.
170  * @KVM_PGTABLE_PROT_X:		Execute permission.
171  * @KVM_PGTABLE_PROT_W:		Write permission.
172  * @KVM_PGTABLE_PROT_R:		Read permission.
173  * @KVM_PGTABLE_PROT_DEVICE:	Device attributes.
174  * @KVM_PGTABLE_PROT_SW0:	Software bit 0.
175  * @KVM_PGTABLE_PROT_SW1:	Software bit 1.
176  * @KVM_PGTABLE_PROT_SW2:	Software bit 2.
177  * @KVM_PGTABLE_PROT_SW3:	Software bit 3.
178  */
179 enum kvm_pgtable_prot {
180 	KVM_PGTABLE_PROT_X			= BIT(0),
181 	KVM_PGTABLE_PROT_W			= BIT(1),
182 	KVM_PGTABLE_PROT_R			= BIT(2),
183 
184 	KVM_PGTABLE_PROT_DEVICE			= BIT(3),
185 
186 	KVM_PGTABLE_PROT_SW0			= BIT(55),
187 	KVM_PGTABLE_PROT_SW1			= BIT(56),
188 	KVM_PGTABLE_PROT_SW2			= BIT(57),
189 	KVM_PGTABLE_PROT_SW3			= BIT(58),
190 };
191 
192 #define KVM_PGTABLE_PROT_RW	(KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
193 #define KVM_PGTABLE_PROT_RWX	(KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X)
194 
195 #define PKVM_HOST_MEM_PROT	KVM_PGTABLE_PROT_RWX
196 #define PKVM_HOST_MMIO_PROT	KVM_PGTABLE_PROT_RW
197 
198 #define PAGE_HYP		KVM_PGTABLE_PROT_RW
199 #define PAGE_HYP_EXEC		(KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X)
200 #define PAGE_HYP_RO		(KVM_PGTABLE_PROT_R)
201 #define PAGE_HYP_DEVICE		(PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
202 
203 typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
204 					   enum kvm_pgtable_prot prot);
205 
206 /**
207  * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
208  * @KVM_PGTABLE_WALK_LEAF:		Visit leaf entries, including invalid
209  *					entries.
210  * @KVM_PGTABLE_WALK_TABLE_PRE:		Visit table entries before their
211  *					children.
212  * @KVM_PGTABLE_WALK_TABLE_POST:	Visit table entries after their
213  *					children.
214  * @KVM_PGTABLE_WALK_SHARED:		Indicates the page-tables may be shared
215  *					with other software walkers.
216  * @KVM_PGTABLE_WALK_HANDLE_FAULT:	Indicates the page-table walk was
217  *					invoked from a fault handler.
218  * @KVM_PGTABLE_WALK_SKIP_BBM_TLBI:	Visit and update table entries
219  *					without Break-before-make's
220  *					TLB invalidation.
221  * @KVM_PGTABLE_WALK_SKIP_CMO:		Visit and update table entries
222  *					without Cache maintenance
223  *					operations required.
224  */
225 enum kvm_pgtable_walk_flags {
226 	KVM_PGTABLE_WALK_LEAF			= BIT(0),
227 	KVM_PGTABLE_WALK_TABLE_PRE		= BIT(1),
228 	KVM_PGTABLE_WALK_TABLE_POST		= BIT(2),
229 	KVM_PGTABLE_WALK_SHARED			= BIT(3),
230 	KVM_PGTABLE_WALK_HANDLE_FAULT		= BIT(4),
231 	KVM_PGTABLE_WALK_SKIP_BBM_TLBI		= BIT(5),
232 	KVM_PGTABLE_WALK_SKIP_CMO		= BIT(6),
233 };
234 
235 struct kvm_pgtable_visit_ctx {
236 	kvm_pte_t				*ptep;
237 	kvm_pte_t				old;
238 	void					*arg;
239 	struct kvm_pgtable_mm_ops		*mm_ops;
240 	u64					start;
241 	u64					addr;
242 	u64					end;
243 	u32					level;
244 	enum kvm_pgtable_walk_flags		flags;
245 };
246 
247 typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *ctx,
248 					enum kvm_pgtable_walk_flags visit);
249 
250 static inline bool kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx *ctx)
251 {
252 	return ctx->flags & KVM_PGTABLE_WALK_SHARED;
253 }
254 
255 /**
256  * struct kvm_pgtable_walker - Hook into a page-table walk.
257  * @cb:		Callback function to invoke during the walk.
258  * @arg:	Argument passed to the callback function.
259  * @flags:	Bitwise-OR of flags to identify the entry types on which to
260  *		invoke the callback function.
261  */
262 struct kvm_pgtable_walker {
263 	const kvm_pgtable_visitor_fn_t		cb;
264 	void * const				arg;
265 	const enum kvm_pgtable_walk_flags	flags;
266 };
267 
268 /*
269  * RCU cannot be used in a non-kernel context such as the hyp. As such, page
270  * table walkers used in hyp do not call into RCU and instead use other
271  * synchronization mechanisms (such as a spinlock).
272  */
273 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
274 
275 typedef kvm_pte_t *kvm_pteref_t;
276 
277 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
278 						kvm_pteref_t pteref)
279 {
280 	return pteref;
281 }
282 
283 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
284 {
285 	/*
286 	 * Due to the lack of RCU (or a similar protection scheme), only
287 	 * non-shared table walkers are allowed in the hypervisor.
288 	 */
289 	if (walker->flags & KVM_PGTABLE_WALK_SHARED)
290 		return -EPERM;
291 
292 	return 0;
293 }
294 
295 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {}
296 
297 static inline bool kvm_pgtable_walk_lock_held(void)
298 {
299 	return true;
300 }
301 
302 #else
303 
304 typedef kvm_pte_t __rcu *kvm_pteref_t;
305 
306 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
307 						kvm_pteref_t pteref)
308 {
309 	return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
310 }
311 
312 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
313 {
314 	if (walker->flags & KVM_PGTABLE_WALK_SHARED)
315 		rcu_read_lock();
316 
317 	return 0;
318 }
319 
320 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker)
321 {
322 	if (walker->flags & KVM_PGTABLE_WALK_SHARED)
323 		rcu_read_unlock();
324 }
325 
326 static inline bool kvm_pgtable_walk_lock_held(void)
327 {
328 	return rcu_read_lock_held();
329 }
330 
331 #endif
332 
333 /**
334  * struct kvm_pgtable - KVM page-table.
335  * @ia_bits:		Maximum input address size, in bits.
336  * @start_level:	Level at which the page-table walk starts.
337  * @pgd:		Pointer to the first top-level entry of the page-table.
338  * @mm_ops:		Memory management callbacks.
339  * @mmu:		Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
340  * @flags:		Stage-2 page-table flags.
341  * @force_pte_cb:	Function that returns true if page level mappings must
342  *			be used instead of block mappings.
343  */
344 struct kvm_pgtable {
345 	u32					ia_bits;
346 	u32					start_level;
347 	kvm_pteref_t				pgd;
348 	struct kvm_pgtable_mm_ops		*mm_ops;
349 
350 	/* Stage-2 only */
351 	struct kvm_s2_mmu			*mmu;
352 	enum kvm_pgtable_stage2_flags		flags;
353 	kvm_pgtable_force_pte_cb_t		force_pte_cb;
354 };
355 
356 /**
357  * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
358  * @pgt:	Uninitialised page-table structure to initialise.
359  * @va_bits:	Maximum virtual address bits.
360  * @mm_ops:	Memory management callbacks.
361  *
362  * Return: 0 on success, negative error code on failure.
363  */
364 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
365 			 struct kvm_pgtable_mm_ops *mm_ops);
366 
367 /**
368  * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table.
369  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
370  *
371  * The page-table is assumed to be unreachable by any hardware walkers prior
372  * to freeing and therefore no TLB invalidation is performed.
373  */
374 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
375 
376 /**
377  * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table.
378  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
379  * @addr:	Virtual address at which to place the mapping.
380  * @size:	Size of the mapping.
381  * @phys:	Physical address of the memory to map.
382  * @prot:	Permissions and attributes for the mapping.
383  *
384  * The offset of @addr within a page is ignored, @size is rounded-up to
385  * the next page boundary and @phys is rounded-down to the previous page
386  * boundary.
387  *
388  * If device attributes are not explicitly requested in @prot, then the
389  * mapping will be normal, cacheable. Attempts to install a new mapping
390  * for a virtual address that is already mapped will be rejected with an
391  * error and a WARN().
392  *
393  * Return: 0 on success, negative error code on failure.
394  */
395 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
396 			enum kvm_pgtable_prot prot);
397 
398 /**
399  * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
400  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
401  * @addr:	Virtual address from which to remove the mapping.
402  * @size:	Size of the mapping.
403  *
404  * The offset of @addr within a page is ignored, @size is rounded-up to
405  * the next page boundary and @phys is rounded-down to the previous page
406  * boundary.
407  *
408  * TLB invalidation is performed for each page-table entry cleared during the
409  * unmapping operation and the reference count for the page-table page
410  * containing the cleared entry is decremented, with unreferenced pages being
411  * freed. The unmapping operation will stop early if it encounters either an
412  * invalid page-table entry or a valid block mapping which maps beyond the range
413  * being unmapped.
414  *
415  * Return: Number of bytes unmapped, which may be 0.
416  */
417 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
418 
419 /**
420  * kvm_get_vtcr() - Helper to construct VTCR_EL2
421  * @mmfr0:	Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
422  * @mmfr1:	Sanitized value of SYS_ID_AA64MMFR1_EL1 register.
423  * @phys_shfit:	Value to set in VTCR_EL2.T0SZ.
424  *
425  * The VTCR value is common across all the physical CPUs on the system.
426  * We use system wide sanitised values to fill in different fields,
427  * except for Hardware Management of Access Flags. HA Flag is set
428  * unconditionally on all CPUs, as it is safe to run with or without
429  * the feature and the bit is RES0 on CPUs that don't support it.
430  *
431  * Return: VTCR_EL2 value
432  */
433 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
434 
435 /**
436  * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD
437  * @vtcr:	Content of the VTCR register.
438  *
439  * Return: the size (in bytes) of the stage-2 PGD
440  */
441 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr);
442 
443 /**
444  * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
445  * @pgt:	Uninitialised page-table structure to initialise.
446  * @mmu:	S2 MMU context for this S2 translation
447  * @mm_ops:	Memory management callbacks.
448  * @flags:	Stage-2 configuration flags.
449  * @force_pte_cb: Function that returns true if page level mappings must
450  *		be used instead of block mappings.
451  *
452  * Return: 0 on success, negative error code on failure.
453  */
454 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
455 			      struct kvm_pgtable_mm_ops *mm_ops,
456 			      enum kvm_pgtable_stage2_flags flags,
457 			      kvm_pgtable_force_pte_cb_t force_pte_cb);
458 
459 #define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \
460 	__kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL)
461 
462 /**
463  * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
464  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
465  *
466  * The page-table is assumed to be unreachable by any hardware walkers prior
467  * to freeing and therefore no TLB invalidation is performed.
468  */
469 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
470 
471 /**
472  * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
473  * @mm_ops:	Memory management callbacks.
474  * @pgtable:	Unlinked stage-2 paging structure to be freed.
475  * @level:	Level of the stage-2 paging structure to be freed.
476  *
477  * The page-table is assumed to be unreachable by any hardware walkers prior to
478  * freeing and therefore no TLB invalidation is performed.
479  */
480 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level);
481 
482 /**
483  * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure.
484  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
485  * @phys:	Physical address of the memory to map.
486  * @level:	Starting level of the stage-2 paging structure to be created.
487  * @prot:	Permissions and attributes for the mapping.
488  * @mc:		Cache of pre-allocated and zeroed memory from which to allocate
489  *		page-table pages.
490  * @force_pte:  Force mappings to PAGE_SIZE granularity.
491  *
492  * Returns an unlinked page-table tree.  This new page-table tree is
493  * not reachable (i.e., it is unlinked) from the root pgd and it's
494  * therefore unreachableby the hardware page-table walker. No TLB
495  * invalidation or CMOs are performed.
496  *
497  * If device attributes are not explicitly requested in @prot, then the
498  * mapping will be normal, cacheable.
499  *
500  * Return: The fully populated (unlinked) stage-2 paging structure, or
501  * an ERR_PTR(error) on failure.
502  */
503 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
504 					      u64 phys, u32 level,
505 					      enum kvm_pgtable_prot prot,
506 					      void *mc, bool force_pte);
507 
508 /**
509  * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
510  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
511  * @addr:	Intermediate physical address at which to place the mapping.
512  * @size:	Size of the mapping.
513  * @phys:	Physical address of the memory to map.
514  * @prot:	Permissions and attributes for the mapping.
515  * @mc:		Cache of pre-allocated and zeroed memory from which to allocate
516  *		page-table pages.
517  * @flags:	Flags to control the page-table walk (ex. a shared walk)
518  *
519  * The offset of @addr within a page is ignored, @size is rounded-up to
520  * the next page boundary and @phys is rounded-down to the previous page
521  * boundary.
522  *
523  * If device attributes are not explicitly requested in @prot, then the
524  * mapping will be normal, cacheable.
525  *
526  * Note that the update of a valid leaf PTE in this function will be aborted,
527  * if it's trying to recreate the exact same mapping or only change the access
528  * permissions. Instead, the vCPU will exit one more time from guest if still
529  * needed and then go through the path of relaxing permissions.
530  *
531  * Note that this function will both coalesce existing table entries and split
532  * existing block mappings, relying on page-faults to fault back areas outside
533  * of the new mapping lazily.
534  *
535  * Return: 0 on success, negative error code on failure.
536  */
537 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
538 			   u64 phys, enum kvm_pgtable_prot prot,
539 			   void *mc, enum kvm_pgtable_walk_flags flags);
540 
541 /**
542  * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
543  *				    track ownership.
544  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
545  * @addr:	Base intermediate physical address to annotate.
546  * @size:	Size of the annotated range.
547  * @mc:		Cache of pre-allocated and zeroed memory from which to allocate
548  *		page-table pages.
549  * @owner_id:	Unique identifier for the owner of the page.
550  *
551  * By default, all page-tables are owned by identifier 0. This function can be
552  * used to mark portions of the IPA space as owned by other entities. When a
553  * stage 2 is used with identity-mappings, these annotations allow to use the
554  * page-table data structure as a simple rmap.
555  *
556  * Return: 0 on success, negative error code on failure.
557  */
558 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
559 				 void *mc, u8 owner_id);
560 
561 /**
562  * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
563  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
564  * @addr:	Intermediate physical address from which to remove the mapping.
565  * @size:	Size of the mapping.
566  *
567  * The offset of @addr within a page is ignored and @size is rounded-up to
568  * the next page boundary.
569  *
570  * TLB invalidation is performed for each page-table entry cleared during the
571  * unmapping operation and the reference count for the page-table page
572  * containing the cleared entry is decremented, with unreferenced pages being
573  * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if
574  * FWB is not supported by the CPU.
575  *
576  * Return: 0 on success, negative error code on failure.
577  */
578 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
579 
580 /**
581  * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
582  *                                  without TLB invalidation.
583  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
584  * @addr:	Intermediate physical address from which to write-protect,
585  * @size:	Size of the range.
586  *
587  * The offset of @addr within a page is ignored and @size is rounded-up to
588  * the next page boundary.
589  *
590  * Note that it is the caller's responsibility to invalidate the TLB after
591  * calling this function to ensure that the updated permissions are visible
592  * to the CPUs.
593  *
594  * Return: 0 on success, negative error code on failure.
595  */
596 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
597 
598 /**
599  * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
600  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
601  * @addr:	Intermediate physical address to identify the page-table entry.
602  *
603  * The offset of @addr within a page is ignored.
604  *
605  * If there is a valid, leaf page-table entry used to translate @addr, then
606  * set the access flag in that entry.
607  *
608  * Return: The old page-table entry prior to setting the flag, 0 on failure.
609  */
610 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
611 
612 /**
613  * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
614  *					   flag in a page-table entry.
615  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
616  * @addr:	Intermediate physical address to identify the page-table entry.
617  * @size:	Size of the address range to visit.
618  * @mkold:	True if the access flag should be cleared.
619  *
620  * The offset of @addr within a page is ignored.
621  *
622  * Tests and conditionally clears the access flag for every valid, leaf
623  * page-table entry used to translate the range [@addr, @addr + @size).
624  *
625  * Note that it is the caller's responsibility to invalidate the TLB after
626  * calling this function to ensure that the updated permissions are visible
627  * to the CPUs.
628  *
629  * Return: True if any of the visited PTEs had the access flag set.
630  */
631 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
632 					 u64 size, bool mkold);
633 
634 /**
635  * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
636  *				      page-table entry.
637  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
638  * @addr:	Intermediate physical address to identify the page-table entry.
639  * @prot:	Additional permissions to grant for the mapping.
640  *
641  * The offset of @addr within a page is ignored.
642  *
643  * If there is a valid, leaf page-table entry used to translate @addr, then
644  * relax the permissions in that entry according to the read, write and
645  * execute permissions specified by @prot. No permissions are removed, and
646  * TLB invalidation is performed after updating the entry. Software bits cannot
647  * be set or cleared using kvm_pgtable_stage2_relax_perms().
648  *
649  * Return: 0 on success, negative error code on failure.
650  */
651 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
652 				   enum kvm_pgtable_prot prot);
653 
654 /**
655  * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
656  * 				      of Coherency for guest stage-2 address
657  *				      range.
658  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
659  * @addr:	Intermediate physical address from which to flush.
660  * @size:	Size of the range.
661  *
662  * The offset of @addr within a page is ignored and @size is rounded-up to
663  * the next page boundary.
664  *
665  * Return: 0 on success, negative error code on failure.
666  */
667 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
668 
669 /**
670  * kvm_pgtable_stage2_split() - Split a range of huge pages into leaf PTEs pointing
671  *				to PAGE_SIZE guest pages.
672  * @pgt:	 Page-table structure initialised by kvm_pgtable_stage2_init().
673  * @addr:	 Intermediate physical address from which to split.
674  * @size:	 Size of the range.
675  * @mc:		 Cache of pre-allocated and zeroed memory from which to allocate
676  *		 page-table pages.
677  *
678  * The function tries to split any level 1 or 2 entry that overlaps
679  * with the input range (given by @addr and @size).
680  *
681  * Return: 0 on success, negative error code on failure. Note that
682  * kvm_pgtable_stage2_split() is best effort: it tries to break as many
683  * blocks in the input range as allowed by @mc_capacity.
684  */
685 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
686 			     struct kvm_mmu_memory_cache *mc);
687 
688 /**
689  * kvm_pgtable_walk() - Walk a page-table.
690  * @pgt:	Page-table structure initialised by kvm_pgtable_*_init().
691  * @addr:	Input address for the start of the walk.
692  * @size:	Size of the range to walk.
693  * @walker:	Walker callback description.
694  *
695  * The offset of @addr within a page is ignored and @size is rounded-up to
696  * the next page boundary.
697  *
698  * The walker will walk the page-table entries corresponding to the input
699  * address range specified, visiting entries according to the walker flags.
700  * Invalid entries are treated as leaf entries. The visited page table entry is
701  * reloaded after invoking the walker callback, allowing the walker to descend
702  * into a newly installed table.
703  *
704  * Returning a negative error code from the walker callback function will
705  * terminate the walk immediately with the same error code.
706  *
707  * Return: 0 on success, negative error code on failure.
708  */
709 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
710 		     struct kvm_pgtable_walker *walker);
711 
712 /**
713  * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry
714  *			    with its level.
715  * @pgt:	Page-table structure initialised by kvm_pgtable_*_init()
716  *		or a similar initialiser.
717  * @addr:	Input address for the start of the walk.
718  * @ptep:	Pointer to storage for the retrieved PTE.
719  * @level:	Pointer to storage for the level of the retrieved PTE.
720  *
721  * The offset of @addr within a page is ignored.
722  *
723  * The walker will walk the page-table entries corresponding to the input
724  * address specified, retrieving the leaf corresponding to this address.
725  * Invalid entries are treated as leaf entries.
726  *
727  * Return: 0 on success, negative error code on failure.
728  */
729 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
730 			 kvm_pte_t *ptep, u32 *level);
731 
732 /**
733  * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
734  *				   stage-2 Page-Table Entry.
735  * @pte:	Page-table entry
736  *
737  * Return: protection attributes of the page-table entry in the enum
738  *	   kvm_pgtable_prot format.
739  */
740 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
741 
742 /**
743  * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1
744  *				Page-Table Entry.
745  * @pte:	Page-table entry
746  *
747  * Return: protection attributes of the page-table entry in the enum
748  *	   kvm_pgtable_prot format.
749  */
750 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
751 
752 /**
753  * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries
754  *
755  * @mmu:	Stage-2 KVM MMU struct
756  * @addr:	The base Intermediate physical address from which to invalidate
757  * @size:	Size of the range from the base to invalidate
758  */
759 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
760 				phys_addr_t addr, size_t size);
761 #endif	/* __ARM64_KVM_PGTABLE_H__ */
762