1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 Google LLC
4 * Author: Will Deacon <will@kernel.org>
5 */
6
7 #ifndef __ARM64_KVM_PGTABLE_H__
8 #define __ARM64_KVM_PGTABLE_H__
9
10 #include <linux/bits.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13
14 #define KVM_PGTABLE_FIRST_LEVEL -1
15 #define KVM_PGTABLE_LAST_LEVEL 3
16
17 /*
18 * The largest supported block sizes for KVM (no 52-bit PA support):
19 * - 4K (level 1): 1GB
20 * - 16K (level 2): 32MB
21 * - 64K (level 2): 512MB
22 */
23 #ifdef CONFIG_ARM64_4K_PAGES
24 #define KVM_PGTABLE_MIN_BLOCK_LEVEL 1
25 #else
26 #define KVM_PGTABLE_MIN_BLOCK_LEVEL 2
27 #endif
28
29 #define kvm_lpa2_is_enabled() system_supports_lpa2()
30
kvm_get_parange_max(void)31 static inline u64 kvm_get_parange_max(void)
32 {
33 if (kvm_lpa2_is_enabled() ||
34 (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16))
35 return ID_AA64MMFR0_EL1_PARANGE_52;
36 else
37 return ID_AA64MMFR0_EL1_PARANGE_48;
38 }
39
kvm_get_parange(u64 mmfr0)40 static inline u64 kvm_get_parange(u64 mmfr0)
41 {
42 u64 parange_max = kvm_get_parange_max();
43 u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
44 ID_AA64MMFR0_EL1_PARANGE_SHIFT);
45 if (parange > parange_max)
46 parange = parange_max;
47
48 return parange;
49 }
50
51 typedef u64 kvm_pte_t;
52
53 #define KVM_PTE_VALID BIT(0)
54
55 #define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
56 #define KVM_PTE_ADDR_51_48 GENMASK(15, 12)
57 #define KVM_PTE_ADDR_MASK_LPA2 GENMASK(49, PAGE_SHIFT)
58 #define KVM_PTE_ADDR_51_50_LPA2 GENMASK(9, 8)
59
60 #define KVM_PHYS_INVALID (-1ULL)
61
62 #define KVM_PTE_TYPE BIT(1)
63 #define KVM_PTE_TYPE_BLOCK 0
64 #define KVM_PTE_TYPE_PAGE 1
65 #define KVM_PTE_TYPE_TABLE 1
66
67 #define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2)
68
69 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
70 #define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6)
71 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO \
72 ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; })
73 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW \
74 ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; })
75 #define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8)
76 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3
77 #define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10)
78
79 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2)
80 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6)
81 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7)
82 #define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8)
83 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3
84 #define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10)
85
86 #define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 50)
87
88 #define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55)
89
90 #define __KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
91 #define __KVM_PTE_LEAF_ATTR_HI_S1_UXN BIT(54)
92 #define __KVM_PTE_LEAF_ATTR_HI_S1_PXN BIT(53)
93
94 #define KVM_PTE_LEAF_ATTR_HI_S1_XN \
95 ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? \
96 (__KVM_PTE_LEAF_ATTR_HI_S1_UXN | \
97 __KVM_PTE_LEAF_ATTR_HI_S1_PXN) : \
98 __KVM_PTE_LEAF_ATTR_HI_S1_XN; })
99
100 #define KVM_PTE_LEAF_ATTR_HI_S2_XN GENMASK(54, 53)
101
102 #define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50)
103
104 #define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
105 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
106 KVM_PTE_LEAF_ATTR_HI_S2_XN)
107
108 #define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
109 #define KVM_MAX_OWNER_ID 1
110
111 /*
112 * Used to indicate a pte for which a 'break-before-make' sequence is in
113 * progress.
114 */
115 #define KVM_INVALID_PTE_LOCKED BIT(10)
116
kvm_pte_valid(kvm_pte_t pte)117 static inline bool kvm_pte_valid(kvm_pte_t pte)
118 {
119 return pte & KVM_PTE_VALID;
120 }
121
kvm_pte_to_phys(kvm_pte_t pte)122 static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
123 {
124 u64 pa;
125
126 if (kvm_lpa2_is_enabled()) {
127 pa = pte & KVM_PTE_ADDR_MASK_LPA2;
128 pa |= FIELD_GET(KVM_PTE_ADDR_51_50_LPA2, pte) << 50;
129 } else {
130 pa = pte & KVM_PTE_ADDR_MASK;
131 if (PAGE_SHIFT == 16)
132 pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
133 }
134
135 return pa;
136 }
137
kvm_phys_to_pte(u64 pa)138 static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
139 {
140 kvm_pte_t pte;
141
142 if (kvm_lpa2_is_enabled()) {
143 pte = pa & KVM_PTE_ADDR_MASK_LPA2;
144 pa &= GENMASK(51, 50);
145 pte |= FIELD_PREP(KVM_PTE_ADDR_51_50_LPA2, pa >> 50);
146 } else {
147 pte = pa & KVM_PTE_ADDR_MASK;
148 if (PAGE_SHIFT == 16) {
149 pa &= GENMASK(51, 48);
150 pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
151 }
152 }
153
154 return pte;
155 }
156
kvm_pte_to_pfn(kvm_pte_t pte)157 static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte)
158 {
159 return __phys_to_pfn(kvm_pte_to_phys(pte));
160 }
161
kvm_granule_shift(s8 level)162 static inline u64 kvm_granule_shift(s8 level)
163 {
164 /* Assumes KVM_PGTABLE_LAST_LEVEL is 3 */
165 return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
166 }
167
kvm_granule_size(s8 level)168 static inline u64 kvm_granule_size(s8 level)
169 {
170 return BIT(kvm_granule_shift(level));
171 }
172
kvm_level_supports_block_mapping(s8 level)173 static inline bool kvm_level_supports_block_mapping(s8 level)
174 {
175 return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
176 }
177
kvm_supported_block_sizes(void)178 static inline u32 kvm_supported_block_sizes(void)
179 {
180 s8 level = KVM_PGTABLE_MIN_BLOCK_LEVEL;
181 u32 r = 0;
182
183 for (; level <= KVM_PGTABLE_LAST_LEVEL; level++)
184 r |= BIT(kvm_granule_shift(level));
185
186 return r;
187 }
188
kvm_is_block_size_supported(u64 size)189 static inline bool kvm_is_block_size_supported(u64 size)
190 {
191 bool is_power_of_two = IS_ALIGNED(size, size);
192
193 return is_power_of_two && (size & kvm_supported_block_sizes());
194 }
195
196 /**
197 * struct kvm_pgtable_mm_ops - Memory management callbacks.
198 * @zalloc_page: Allocate a single zeroed memory page.
199 * The @arg parameter can be used by the walker
200 * to pass a memcache. The initial refcount of
201 * the page is 1.
202 * @zalloc_pages_exact: Allocate an exact number of zeroed memory pages.
203 * The @size parameter is in bytes, and is rounded
204 * up to the next page boundary. The resulting
205 * allocation is physically contiguous.
206 * @free_pages_exact: Free an exact number of memory pages previously
207 * allocated by zalloc_pages_exact.
208 * @free_unlinked_table: Free an unlinked paging structure by unlinking and
209 * dropping references.
210 * @get_page: Increment the refcount on a page.
211 * @put_page: Decrement the refcount on a page. When the
212 * refcount reaches 0 the page is automatically
213 * freed.
214 * @page_count: Return the refcount of a page.
215 * @phys_to_virt: Convert a physical address into a virtual
216 * address mapped in the current context.
217 * @virt_to_phys: Convert a virtual address mapped in the current
218 * context into a physical address.
219 * @dcache_clean_inval_poc: Clean and invalidate the data cache to the PoC
220 * for the specified memory address range.
221 * @icache_inval_pou: Invalidate the instruction cache to the PoU
222 * for the specified memory address range.
223 */
224 struct kvm_pgtable_mm_ops {
225 void* (*zalloc_page)(void *arg);
226 void* (*zalloc_pages_exact)(size_t size);
227 void (*free_pages_exact)(void *addr, size_t size);
228 void (*free_unlinked_table)(void *addr, s8 level);
229 void (*get_page)(void *addr);
230 void (*put_page)(void *addr);
231 int (*page_count)(void *addr);
232 void* (*phys_to_virt)(phys_addr_t phys);
233 phys_addr_t (*virt_to_phys)(void *addr);
234 void (*dcache_clean_inval_poc)(void *addr, size_t size);
235 void (*icache_inval_pou)(void *addr, size_t size);
236 };
237
238 /**
239 * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
240 * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have
241 * ARM64_HAS_STAGE2_FWB.
242 * @KVM_PGTABLE_S2_IDMAP: Only use identity mappings.
243 */
244 enum kvm_pgtable_stage2_flags {
245 KVM_PGTABLE_S2_NOFWB = BIT(0),
246 KVM_PGTABLE_S2_IDMAP = BIT(1),
247 };
248
249 /**
250 * enum kvm_pgtable_prot - Page-table permissions and attributes.
251 * @KVM_PGTABLE_PROT_UX: Unprivileged execute permission.
252 * @KVM_PGTABLE_PROT_PX: Privileged execute permission.
253 * @KVM_PGTABLE_PROT_X: Privileged and unprivileged execute permission.
254 * @KVM_PGTABLE_PROT_W: Write permission.
255 * @KVM_PGTABLE_PROT_R: Read permission.
256 * @KVM_PGTABLE_PROT_DEVICE: Device attributes.
257 * @KVM_PGTABLE_PROT_NORMAL_NC: Normal noncacheable attributes.
258 * @KVM_PGTABLE_PROT_SW0: Software bit 0.
259 * @KVM_PGTABLE_PROT_SW1: Software bit 1.
260 * @KVM_PGTABLE_PROT_SW2: Software bit 2.
261 * @KVM_PGTABLE_PROT_SW3: Software bit 3.
262 */
263 enum kvm_pgtable_prot {
264 KVM_PGTABLE_PROT_PX = BIT(0),
265 KVM_PGTABLE_PROT_UX = BIT(1),
266 KVM_PGTABLE_PROT_X = KVM_PGTABLE_PROT_PX |
267 KVM_PGTABLE_PROT_UX,
268 KVM_PGTABLE_PROT_W = BIT(2),
269 KVM_PGTABLE_PROT_R = BIT(3),
270
271 KVM_PGTABLE_PROT_DEVICE = BIT(4),
272 KVM_PGTABLE_PROT_NORMAL_NC = BIT(5),
273
274 KVM_PGTABLE_PROT_SW0 = BIT(55),
275 KVM_PGTABLE_PROT_SW1 = BIT(56),
276 KVM_PGTABLE_PROT_SW2 = BIT(57),
277 KVM_PGTABLE_PROT_SW3 = BIT(58),
278 };
279
280 #define KVM_PGTABLE_PROT_RW (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
281 #define KVM_PGTABLE_PROT_RWX (KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X)
282
283 #define PKVM_HOST_MEM_PROT KVM_PGTABLE_PROT_RWX
284 #define PKVM_HOST_MMIO_PROT KVM_PGTABLE_PROT_RW
285
286 #define PAGE_HYP KVM_PGTABLE_PROT_RW
287 #define PAGE_HYP_EXEC (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X)
288 #define PAGE_HYP_RO (KVM_PGTABLE_PROT_R)
289 #define PAGE_HYP_DEVICE (PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
290
291 typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
292 enum kvm_pgtable_prot prot);
293
294 /**
295 * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
296 * @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid
297 * entries.
298 * @KVM_PGTABLE_WALK_TABLE_PRE: Visit table entries before their
299 * children.
300 * @KVM_PGTABLE_WALK_TABLE_POST: Visit table entries after their
301 * children.
302 * @KVM_PGTABLE_WALK_SHARED: Indicates the page-tables may be shared
303 * with other software walkers.
304 * @KVM_PGTABLE_WALK_IGNORE_EAGAIN: Don't terminate the walk early if
305 * the walker returns -EAGAIN.
306 * @KVM_PGTABLE_WALK_SKIP_BBM_TLBI: Visit and update table entries
307 * without Break-before-make's
308 * TLB invalidation.
309 * @KVM_PGTABLE_WALK_SKIP_CMO: Visit and update table entries
310 * without Cache maintenance
311 * operations required.
312 */
313 enum kvm_pgtable_walk_flags {
314 KVM_PGTABLE_WALK_LEAF = BIT(0),
315 KVM_PGTABLE_WALK_TABLE_PRE = BIT(1),
316 KVM_PGTABLE_WALK_TABLE_POST = BIT(2),
317 KVM_PGTABLE_WALK_SHARED = BIT(3),
318 KVM_PGTABLE_WALK_IGNORE_EAGAIN = BIT(4),
319 KVM_PGTABLE_WALK_SKIP_BBM_TLBI = BIT(5),
320 KVM_PGTABLE_WALK_SKIP_CMO = BIT(6),
321 };
322
323 struct kvm_pgtable_visit_ctx {
324 kvm_pte_t *ptep;
325 kvm_pte_t old;
326 void *arg;
327 struct kvm_pgtable_mm_ops *mm_ops;
328 u64 start;
329 u64 addr;
330 u64 end;
331 s8 level;
332 enum kvm_pgtable_walk_flags flags;
333 };
334
335 typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *ctx,
336 enum kvm_pgtable_walk_flags visit);
337
kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx * ctx)338 static inline bool kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx *ctx)
339 {
340 return ctx->flags & KVM_PGTABLE_WALK_SHARED;
341 }
342
343 /**
344 * struct kvm_pgtable_walker - Hook into a page-table walk.
345 * @cb: Callback function to invoke during the walk.
346 * @arg: Argument passed to the callback function.
347 * @flags: Bitwise-OR of flags to identify the entry types on which to
348 * invoke the callback function.
349 */
350 struct kvm_pgtable_walker {
351 const kvm_pgtable_visitor_fn_t cb;
352 void * const arg;
353 const enum kvm_pgtable_walk_flags flags;
354 };
355
356 /*
357 * RCU cannot be used in a non-kernel context such as the hyp. As such, page
358 * table walkers used in hyp do not call into RCU and instead use other
359 * synchronization mechanisms (such as a spinlock).
360 */
361 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
362
363 typedef kvm_pte_t *kvm_pteref_t;
364
kvm_dereference_pteref(struct kvm_pgtable_walker * walker,kvm_pteref_t pteref)365 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
366 kvm_pteref_t pteref)
367 {
368 return pteref;
369 }
370
kvm_dereference_pteref_raw(kvm_pteref_t pteref)371 static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
372 {
373 return pteref;
374 }
375
kvm_pgtable_walk_begin(struct kvm_pgtable_walker * walker)376 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
377 {
378 /*
379 * Due to the lack of RCU (or a similar protection scheme), only
380 * non-shared table walkers are allowed in the hypervisor.
381 */
382 if (walker->flags & KVM_PGTABLE_WALK_SHARED)
383 return -EPERM;
384
385 return 0;
386 }
387
kvm_pgtable_walk_end(struct kvm_pgtable_walker * walker)388 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {}
389
kvm_pgtable_walk_lock_held(void)390 static inline bool kvm_pgtable_walk_lock_held(void)
391 {
392 return true;
393 }
394
395 #else
396
397 typedef kvm_pte_t __rcu *kvm_pteref_t;
398
kvm_dereference_pteref(struct kvm_pgtable_walker * walker,kvm_pteref_t pteref)399 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
400 kvm_pteref_t pteref)
401 {
402 return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
403 }
404
kvm_dereference_pteref_raw(kvm_pteref_t pteref)405 static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
406 {
407 return rcu_dereference_raw(pteref);
408 }
409
kvm_pgtable_walk_begin(struct kvm_pgtable_walker * walker)410 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
411 {
412 if (walker->flags & KVM_PGTABLE_WALK_SHARED)
413 rcu_read_lock();
414
415 return 0;
416 }
417
kvm_pgtable_walk_end(struct kvm_pgtable_walker * walker)418 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker)
419 {
420 if (walker->flags & KVM_PGTABLE_WALK_SHARED)
421 rcu_read_unlock();
422 }
423
kvm_pgtable_walk_lock_held(void)424 static inline bool kvm_pgtable_walk_lock_held(void)
425 {
426 return rcu_read_lock_held();
427 }
428
429 #endif
430
431 /**
432 * struct kvm_pgtable - KVM page-table.
433 * @ia_bits: Maximum input address size, in bits.
434 * @start_level: Level at which the page-table walk starts.
435 * @pgd: Pointer to the first top-level entry of the page-table.
436 * @mm_ops: Memory management callbacks.
437 * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
438 * @flags: Stage-2 page-table flags.
439 * @force_pte_cb: Function that returns true if page level mappings must
440 * be used instead of block mappings.
441 */
442 struct kvm_pgtable {
443 union {
444 struct rb_root_cached pkvm_mappings;
445 struct {
446 u32 ia_bits;
447 s8 start_level;
448 kvm_pteref_t pgd;
449 struct kvm_pgtable_mm_ops *mm_ops;
450
451 /* Stage-2 only */
452 enum kvm_pgtable_stage2_flags flags;
453 kvm_pgtable_force_pte_cb_t force_pte_cb;
454 };
455 };
456 struct kvm_s2_mmu *mmu;
457 };
458
459 /**
460 * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
461 * @pgt: Uninitialised page-table structure to initialise.
462 * @va_bits: Maximum virtual address bits.
463 * @mm_ops: Memory management callbacks.
464 *
465 * Return: 0 on success, negative error code on failure.
466 */
467 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
468 struct kvm_pgtable_mm_ops *mm_ops);
469
470 /**
471 * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table.
472 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
473 *
474 * The page-table is assumed to be unreachable by any hardware walkers prior
475 * to freeing and therefore no TLB invalidation is performed.
476 */
477 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
478
479 /**
480 * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table.
481 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
482 * @addr: Virtual address at which to place the mapping.
483 * @size: Size of the mapping.
484 * @phys: Physical address of the memory to map.
485 * @prot: Permissions and attributes for the mapping.
486 *
487 * The offset of @addr within a page is ignored, @size is rounded-up to
488 * the next page boundary and @phys is rounded-down to the previous page
489 * boundary.
490 *
491 * If device attributes are not explicitly requested in @prot, then the
492 * mapping will be normal, cacheable. Attempts to install a new mapping
493 * for a virtual address that is already mapped will be rejected with an
494 * error and a WARN().
495 *
496 * Return: 0 on success, negative error code on failure.
497 */
498 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
499 enum kvm_pgtable_prot prot);
500
501 /**
502 * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
503 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
504 * @addr: Virtual address from which to remove the mapping.
505 * @size: Size of the mapping.
506 *
507 * The offset of @addr within a page is ignored, @size is rounded-up to
508 * the next page boundary and @phys is rounded-down to the previous page
509 * boundary.
510 *
511 * TLB invalidation is performed for each page-table entry cleared during the
512 * unmapping operation and the reference count for the page-table page
513 * containing the cleared entry is decremented, with unreferenced pages being
514 * freed. The unmapping operation will stop early if it encounters either an
515 * invalid page-table entry or a valid block mapping which maps beyond the range
516 * being unmapped.
517 *
518 * Return: Number of bytes unmapped, which may be 0.
519 */
520 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
521
522 /**
523 * kvm_get_vtcr() - Helper to construct VTCR_EL2
524 * @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
525 * @mmfr1: Sanitized value of SYS_ID_AA64MMFR1_EL1 register.
526 * @phys_shfit: Value to set in VTCR_EL2.T0SZ.
527 *
528 * The VTCR value is common across all the physical CPUs on the system.
529 * We use system wide sanitised values to fill in different fields,
530 * except for Hardware Management of Access Flags. HA Flag is set
531 * unconditionally on all CPUs, as it is safe to run with or without
532 * the feature and the bit is RES0 on CPUs that don't support it.
533 *
534 * Return: VTCR_EL2 value
535 */
536 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
537
538 /**
539 * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD
540 * @vtcr: Content of the VTCR register.
541 *
542 * Return: the size (in bytes) of the stage-2 PGD
543 */
544 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr);
545
546 /**
547 * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
548 * @pgt: Uninitialised page-table structure to initialise.
549 * @mmu: S2 MMU context for this S2 translation
550 * @mm_ops: Memory management callbacks.
551 * @flags: Stage-2 configuration flags.
552 * @force_pte_cb: Function that returns true if page level mappings must
553 * be used instead of block mappings.
554 *
555 * Return: 0 on success, negative error code on failure.
556 */
557 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
558 struct kvm_pgtable_mm_ops *mm_ops,
559 enum kvm_pgtable_stage2_flags flags,
560 kvm_pgtable_force_pte_cb_t force_pte_cb);
561
kvm_pgtable_stage2_init(struct kvm_pgtable * pgt,struct kvm_s2_mmu * mmu,struct kvm_pgtable_mm_ops * mm_ops)562 static inline int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
563 struct kvm_pgtable_mm_ops *mm_ops)
564 {
565 return __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL);
566 }
567
568 /**
569 * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
570 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
571 *
572 * The page-table is assumed to be unreachable by any hardware walkers prior
573 * to freeing and therefore no TLB invalidation is performed.
574 */
575 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
576
577 /**
578 * kvm_pgtable_stage2_destroy_range() - Destroy the unlinked range of addresses.
579 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
580 * @addr: Intermediate physical address at which to place the mapping.
581 * @size: Size of the mapping.
582 *
583 * The page-table is assumed to be unreachable by any hardware walkers prior
584 * to freeing and therefore no TLB invalidation is performed.
585 */
586 void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
587 u64 addr, u64 size);
588
589 /**
590 * kvm_pgtable_stage2_destroy_pgd() - Destroy the PGD of guest stage-2 page-table.
591 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
592 *
593 * It is assumed that the rest of the page-table is freed before this operation.
594 */
595 void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
596
597 /**
598 * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
599 * @mm_ops: Memory management callbacks.
600 * @pgtable: Unlinked stage-2 paging structure to be freed.
601 * @level: Level of the stage-2 paging structure to be freed.
602 *
603 * The page-table is assumed to be unreachable by any hardware walkers prior to
604 * freeing and therefore no TLB invalidation is performed.
605 */
606 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level);
607
608 /**
609 * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure.
610 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
611 * @phys: Physical address of the memory to map.
612 * @level: Starting level of the stage-2 paging structure to be created.
613 * @prot: Permissions and attributes for the mapping.
614 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
615 * page-table pages.
616 * @force_pte: Force mappings to PAGE_SIZE granularity.
617 *
618 * Returns an unlinked page-table tree. This new page-table tree is
619 * not reachable (i.e., it is unlinked) from the root pgd and it's
620 * therefore unreachableby the hardware page-table walker. No TLB
621 * invalidation or CMOs are performed.
622 *
623 * If device attributes are not explicitly requested in @prot, then the
624 * mapping will be normal, cacheable.
625 *
626 * Return: The fully populated (unlinked) stage-2 paging structure, or
627 * an ERR_PTR(error) on failure.
628 */
629 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
630 u64 phys, s8 level,
631 enum kvm_pgtable_prot prot,
632 void *mc, bool force_pte);
633
634 /**
635 * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
636 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
637 * @addr: Intermediate physical address at which to place the mapping.
638 * @size: Size of the mapping.
639 * @phys: Physical address of the memory to map.
640 * @prot: Permissions and attributes for the mapping.
641 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
642 * page-table pages.
643 * @flags: Flags to control the page-table walk (ex. a shared walk)
644 *
645 * The offset of @addr within a page is ignored, @size is rounded-up to
646 * the next page boundary and @phys is rounded-down to the previous page
647 * boundary.
648 *
649 * If device attributes are not explicitly requested in @prot, then the
650 * mapping will be normal, cacheable.
651 *
652 * Note that the update of a valid leaf PTE in this function will be aborted,
653 * if it's trying to recreate the exact same mapping or only change the access
654 * permissions. Instead, the vCPU will exit one more time from guest if still
655 * needed and then go through the path of relaxing permissions.
656 *
657 * Note that this function will both coalesce existing table entries and split
658 * existing block mappings, relying on page-faults to fault back areas outside
659 * of the new mapping lazily.
660 *
661 * Return: 0 on success, negative error code on failure.
662 */
663 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
664 u64 phys, enum kvm_pgtable_prot prot,
665 void *mc, enum kvm_pgtable_walk_flags flags);
666
667 /**
668 * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
669 * track ownership.
670 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
671 * @addr: Base intermediate physical address to annotate.
672 * @size: Size of the annotated range.
673 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
674 * page-table pages.
675 * @owner_id: Unique identifier for the owner of the page.
676 *
677 * By default, all page-tables are owned by identifier 0. This function can be
678 * used to mark portions of the IPA space as owned by other entities. When a
679 * stage 2 is used with identity-mappings, these annotations allow to use the
680 * page-table data structure as a simple rmap.
681 *
682 * Return: 0 on success, negative error code on failure.
683 */
684 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
685 void *mc, u8 owner_id);
686
687 /**
688 * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
689 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
690 * @addr: Intermediate physical address from which to remove the mapping.
691 * @size: Size of the mapping.
692 *
693 * The offset of @addr within a page is ignored and @size is rounded-up to
694 * the next page boundary.
695 *
696 * TLB invalidation is performed for each page-table entry cleared during the
697 * unmapping operation and the reference count for the page-table page
698 * containing the cleared entry is decremented, with unreferenced pages being
699 * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if
700 * FWB is not supported by the CPU.
701 *
702 * Return: 0 on success, negative error code on failure.
703 */
704 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
705
706 /**
707 * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
708 * without TLB invalidation.
709 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
710 * @addr: Intermediate physical address from which to write-protect,
711 * @size: Size of the range.
712 *
713 * The offset of @addr within a page is ignored and @size is rounded-up to
714 * the next page boundary.
715 *
716 * Note that it is the caller's responsibility to invalidate the TLB after
717 * calling this function to ensure that the updated permissions are visible
718 * to the CPUs.
719 *
720 * Return: 0 on success, negative error code on failure.
721 */
722 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
723
724 /**
725 * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
726 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
727 * @addr: Intermediate physical address to identify the page-table entry.
728 * @flags: Flags to control the page-table walk (ex. a shared walk)
729 *
730 * The offset of @addr within a page is ignored.
731 *
732 * If there is a valid, leaf page-table entry used to translate @addr, then
733 * set the access flag in that entry.
734 */
735 void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
736 enum kvm_pgtable_walk_flags flags);
737
738 /**
739 * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
740 * flag in a page-table entry.
741 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
742 * @addr: Intermediate physical address to identify the page-table entry.
743 * @size: Size of the address range to visit.
744 * @mkold: True if the access flag should be cleared.
745 *
746 * The offset of @addr within a page is ignored.
747 *
748 * Tests and conditionally clears the access flag for every valid, leaf
749 * page-table entry used to translate the range [@addr, @addr + @size).
750 *
751 * Note that it is the caller's responsibility to invalidate the TLB after
752 * calling this function to ensure that the updated permissions are visible
753 * to the CPUs.
754 *
755 * Return: True if any of the visited PTEs had the access flag set.
756 */
757 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
758 u64 size, bool mkold);
759
760 /**
761 * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
762 * page-table entry.
763 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
764 * @addr: Intermediate physical address to identify the page-table entry.
765 * @prot: Additional permissions to grant for the mapping.
766 * @flags: Flags to control the page-table walk (ex. a shared walk)
767 *
768 * The offset of @addr within a page is ignored.
769 *
770 * If there is a valid, leaf page-table entry used to translate @addr, then
771 * relax the permissions in that entry according to the read, write and
772 * execute permissions specified by @prot. No permissions are removed, and
773 * TLB invalidation is performed after updating the entry. Software bits cannot
774 * be set or cleared using kvm_pgtable_stage2_relax_perms().
775 *
776 * Return: 0 on success, negative error code on failure.
777 */
778 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
779 enum kvm_pgtable_prot prot,
780 enum kvm_pgtable_walk_flags flags);
781
782 /**
783 * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
784 * of Coherency for guest stage-2 address
785 * range.
786 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
787 * @addr: Intermediate physical address from which to flush.
788 * @size: Size of the range.
789 *
790 * The offset of @addr within a page is ignored and @size is rounded-up to
791 * the next page boundary.
792 *
793 * Return: 0 on success, negative error code on failure.
794 */
795 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
796
797 /**
798 * kvm_pgtable_stage2_split() - Split a range of huge pages into leaf PTEs pointing
799 * to PAGE_SIZE guest pages.
800 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
801 * @addr: Intermediate physical address from which to split.
802 * @size: Size of the range.
803 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
804 * page-table pages.
805 *
806 * The function tries to split any level 1 or 2 entry that overlaps
807 * with the input range (given by @addr and @size).
808 *
809 * Return: 0 on success, negative error code on failure. Note that
810 * kvm_pgtable_stage2_split() is best effort: it tries to break as many
811 * blocks in the input range as allowed by @mc_capacity.
812 */
813 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
814 struct kvm_mmu_memory_cache *mc);
815
816 /**
817 * kvm_pgtable_walk() - Walk a page-table.
818 * @pgt: Page-table structure initialised by kvm_pgtable_*_init().
819 * @addr: Input address for the start of the walk.
820 * @size: Size of the range to walk.
821 * @walker: Walker callback description.
822 *
823 * The offset of @addr within a page is ignored and @size is rounded-up to
824 * the next page boundary.
825 *
826 * The walker will walk the page-table entries corresponding to the input
827 * address range specified, visiting entries according to the walker flags.
828 * Invalid entries are treated as leaf entries. The visited page table entry is
829 * reloaded after invoking the walker callback, allowing the walker to descend
830 * into a newly installed table.
831 *
832 * Returning a negative error code from the walker callback function will
833 * terminate the walk immediately with the same error code.
834 *
835 * Return: 0 on success, negative error code on failure.
836 */
837 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
838 struct kvm_pgtable_walker *walker);
839
840 /**
841 * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry
842 * with its level.
843 * @pgt: Page-table structure initialised by kvm_pgtable_*_init()
844 * or a similar initialiser.
845 * @addr: Input address for the start of the walk.
846 * @ptep: Pointer to storage for the retrieved PTE.
847 * @level: Pointer to storage for the level of the retrieved PTE.
848 *
849 * The offset of @addr within a page is ignored.
850 *
851 * The walker will walk the page-table entries corresponding to the input
852 * address specified, retrieving the leaf corresponding to this address.
853 * Invalid entries are treated as leaf entries.
854 *
855 * Return: 0 on success, negative error code on failure.
856 */
857 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
858 kvm_pte_t *ptep, s8 *level);
859
860 /**
861 * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
862 * stage-2 Page-Table Entry.
863 * @pte: Page-table entry
864 *
865 * Return: protection attributes of the page-table entry in the enum
866 * kvm_pgtable_prot format.
867 */
868 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
869
870 /**
871 * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1
872 * Page-Table Entry.
873 * @pte: Page-table entry
874 *
875 * Return: protection attributes of the page-table entry in the enum
876 * kvm_pgtable_prot format.
877 */
878 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
879
880 /**
881 * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries
882 *
883 * @mmu: Stage-2 KVM MMU struct
884 * @addr: The base Intermediate physical address from which to invalidate
885 * @size: Size of the range from the base to invalidate
886 */
887 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
888 phys_addr_t addr, size_t size);
889 #endif /* __ARM64_KVM_PGTABLE_H__ */
890