1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Stand-alone page-table allocator for hyp stage-1 and guest stage-2.
4 * No bombay mix was harmed in the writing of this file.
5 *
6 * Copyright (C) 2020 Google LLC
7 * Author: Will Deacon <will@kernel.org>
8 */
9
10 #include <linux/bitfield.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/stage2_pgtable.h>
13
14 struct kvm_pgtable_walk_data {
15 struct kvm_pgtable_walker *walker;
16
17 const u64 start;
18 u64 addr;
19 const u64 end;
20 };
21
kvm_pgtable_walk_skip_bbm_tlbi(const struct kvm_pgtable_visit_ctx * ctx)22 static bool kvm_pgtable_walk_skip_bbm_tlbi(const struct kvm_pgtable_visit_ctx *ctx)
23 {
24 return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_BBM_TLBI);
25 }
26
kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx * ctx)27 static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
28 {
29 return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO);
30 }
31
kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx * ctx,u64 phys)32 static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
33 {
34 u64 granule = kvm_granule_size(ctx->level);
35
36 if (!kvm_level_supports_block_mapping(ctx->level))
37 return false;
38
39 if (granule > (ctx->end - ctx->addr))
40 return false;
41
42 if (!IS_ALIGNED(phys, granule))
43 return false;
44
45 return IS_ALIGNED(ctx->addr, granule);
46 }
47
kvm_pgtable_idx(struct kvm_pgtable_walk_data * data,s8 level)48 static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, s8 level)
49 {
50 u64 shift = kvm_granule_shift(level);
51 u64 mask = BIT(PAGE_SHIFT - 3) - 1;
52
53 return (data->addr >> shift) & mask;
54 }
55
kvm_pgd_page_idx(struct kvm_pgtable * pgt,u64 addr)56 static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
57 {
58 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
59 u64 mask = BIT(pgt->ia_bits) - 1;
60
61 return (addr & mask) >> shift;
62 }
63
kvm_pgd_pages(u32 ia_bits,s8 start_level)64 static u32 kvm_pgd_pages(u32 ia_bits, s8 start_level)
65 {
66 struct kvm_pgtable pgt = {
67 .ia_bits = ia_bits,
68 .start_level = start_level,
69 };
70
71 return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
72 }
73
kvm_pte_table(kvm_pte_t pte,s8 level)74 static bool kvm_pte_table(kvm_pte_t pte, s8 level)
75 {
76 if (level == KVM_PGTABLE_LAST_LEVEL)
77 return false;
78
79 if (!kvm_pte_valid(pte))
80 return false;
81
82 return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
83 }
84
kvm_pte_follow(kvm_pte_t pte,struct kvm_pgtable_mm_ops * mm_ops)85 static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
86 {
87 return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
88 }
89
kvm_clear_pte(kvm_pte_t * ptep)90 static void kvm_clear_pte(kvm_pte_t *ptep)
91 {
92 WRITE_ONCE(*ptep, 0);
93 }
94
kvm_init_table_pte(kvm_pte_t * childp,struct kvm_pgtable_mm_ops * mm_ops)95 static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops)
96 {
97 kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
98
99 pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
100 pte |= KVM_PTE_VALID;
101 return pte;
102 }
103
kvm_init_valid_leaf_pte(u64 pa,kvm_pte_t attr,s8 level)104 static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, s8 level)
105 {
106 kvm_pte_t pte = kvm_phys_to_pte(pa);
107 u64 type = (level == KVM_PGTABLE_LAST_LEVEL) ? KVM_PTE_TYPE_PAGE :
108 KVM_PTE_TYPE_BLOCK;
109
110 pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI);
111 pte |= FIELD_PREP(KVM_PTE_TYPE, type);
112 pte |= KVM_PTE_VALID;
113
114 return pte;
115 }
116
kvm_init_invalid_leaf_owner(u8 owner_id)117 static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
118 {
119 return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id);
120 }
121
kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data * data,const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)122 static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data,
123 const struct kvm_pgtable_visit_ctx *ctx,
124 enum kvm_pgtable_walk_flags visit)
125 {
126 struct kvm_pgtable_walker *walker = data->walker;
127
128 /* Ensure the appropriate lock is held (e.g. RCU lock for stage-2 MMU) */
129 WARN_ON_ONCE(kvm_pgtable_walk_shared(ctx) && !kvm_pgtable_walk_lock_held());
130 return walker->cb(ctx, visit);
131 }
132
kvm_pgtable_walk_continue(const struct kvm_pgtable_walker * walker,int r)133 static bool kvm_pgtable_walk_continue(const struct kvm_pgtable_walker *walker,
134 int r)
135 {
136 /*
137 * Visitor callbacks return EAGAIN when the conditions that led to a
138 * fault are no longer reflected in the page tables due to a race to
139 * update a PTE. In the context of a fault handler this is interpreted
140 * as a signal to retry guest execution.
141 *
142 * Ignore the return code altogether for walkers outside a fault handler
143 * (e.g. write protecting a range of memory) and chug along with the
144 * page table walk.
145 */
146 if (r == -EAGAIN)
147 return walker->flags & KVM_PGTABLE_WALK_IGNORE_EAGAIN;
148
149 return !r;
150 }
151
152 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
153 struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level);
154
__kvm_pgtable_visit(struct kvm_pgtable_walk_data * data,struct kvm_pgtable_mm_ops * mm_ops,kvm_pteref_t pteref,s8 level)155 static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
156 struct kvm_pgtable_mm_ops *mm_ops,
157 kvm_pteref_t pteref, s8 level)
158 {
159 enum kvm_pgtable_walk_flags flags = data->walker->flags;
160 kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
161 struct kvm_pgtable_visit_ctx ctx = {
162 .ptep = ptep,
163 .old = READ_ONCE(*ptep),
164 .arg = data->walker->arg,
165 .mm_ops = mm_ops,
166 .start = data->start,
167 .addr = data->addr,
168 .end = data->end,
169 .level = level,
170 .flags = flags,
171 };
172 int ret = 0;
173 bool reload = false;
174 kvm_pteref_t childp;
175 bool table = kvm_pte_table(ctx.old, level);
176
177 if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
178 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
179 reload = true;
180 }
181
182 if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
183 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
184 reload = true;
185 }
186
187 /*
188 * Reload the page table after invoking the walker callback for leaf
189 * entries or after pre-order traversal, to allow the walker to descend
190 * into a newly installed or replaced table.
191 */
192 if (reload) {
193 ctx.old = READ_ONCE(*ptep);
194 table = kvm_pte_table(ctx.old, level);
195 }
196
197 if (!kvm_pgtable_walk_continue(data->walker, ret))
198 goto out;
199
200 if (!table) {
201 data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
202 data->addr += kvm_granule_size(level);
203 goto out;
204 }
205
206 childp = (kvm_pteref_t)kvm_pte_follow(ctx.old, mm_ops);
207 ret = __kvm_pgtable_walk(data, mm_ops, childp, level + 1);
208 if (!kvm_pgtable_walk_continue(data->walker, ret))
209 goto out;
210
211 if (ctx.flags & KVM_PGTABLE_WALK_TABLE_POST)
212 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_POST);
213
214 out:
215 if (kvm_pgtable_walk_continue(data->walker, ret))
216 return 0;
217
218 return ret;
219 }
220
__kvm_pgtable_walk(struct kvm_pgtable_walk_data * data,struct kvm_pgtable_mm_ops * mm_ops,kvm_pteref_t pgtable,s8 level)221 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
222 struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level)
223 {
224 u32 idx;
225 int ret = 0;
226
227 if (WARN_ON_ONCE(level < KVM_PGTABLE_FIRST_LEVEL ||
228 level > KVM_PGTABLE_LAST_LEVEL))
229 return -EINVAL;
230
231 for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
232 kvm_pteref_t pteref = &pgtable[idx];
233
234 if (data->addr >= data->end)
235 break;
236
237 ret = __kvm_pgtable_visit(data, mm_ops, pteref, level);
238 if (ret)
239 break;
240 }
241
242 return ret;
243 }
244
_kvm_pgtable_walk(struct kvm_pgtable * pgt,struct kvm_pgtable_walk_data * data)245 static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data)
246 {
247 u32 idx;
248 int ret = 0;
249 u64 limit = BIT(pgt->ia_bits);
250
251 if (data->addr > limit || data->end > limit)
252 return -ERANGE;
253
254 if (!pgt->pgd)
255 return -EINVAL;
256
257 for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) {
258 kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE];
259
260 ret = __kvm_pgtable_walk(data, pgt->mm_ops, pteref, pgt->start_level);
261 if (ret)
262 break;
263 }
264
265 return ret;
266 }
267
kvm_pgtable_walk(struct kvm_pgtable * pgt,u64 addr,u64 size,struct kvm_pgtable_walker * walker)268 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
269 struct kvm_pgtable_walker *walker)
270 {
271 struct kvm_pgtable_walk_data walk_data = {
272 .start = ALIGN_DOWN(addr, PAGE_SIZE),
273 .addr = ALIGN_DOWN(addr, PAGE_SIZE),
274 .end = PAGE_ALIGN(walk_data.addr + size),
275 .walker = walker,
276 };
277 int r;
278
279 r = kvm_pgtable_walk_begin(walker);
280 if (r)
281 return r;
282
283 r = _kvm_pgtable_walk(pgt, &walk_data);
284 kvm_pgtable_walk_end(walker);
285
286 return r;
287 }
288
289 struct leaf_walk_data {
290 kvm_pte_t pte;
291 s8 level;
292 };
293
leaf_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)294 static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx,
295 enum kvm_pgtable_walk_flags visit)
296 {
297 struct leaf_walk_data *data = ctx->arg;
298
299 data->pte = ctx->old;
300 data->level = ctx->level;
301
302 return 0;
303 }
304
kvm_pgtable_get_leaf(struct kvm_pgtable * pgt,u64 addr,kvm_pte_t * ptep,s8 * level)305 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
306 kvm_pte_t *ptep, s8 *level)
307 {
308 struct leaf_walk_data data;
309 struct kvm_pgtable_walker walker = {
310 .cb = leaf_walker,
311 .flags = KVM_PGTABLE_WALK_LEAF,
312 .arg = &data,
313 };
314 int ret;
315
316 ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE),
317 PAGE_SIZE, &walker);
318 if (!ret) {
319 if (ptep)
320 *ptep = data.pte;
321 if (level)
322 *level = data.level;
323 }
324
325 return ret;
326 }
327
328 struct hyp_map_data {
329 const u64 phys;
330 kvm_pte_t attr;
331 };
332
hyp_set_prot_attr(enum kvm_pgtable_prot prot,kvm_pte_t * ptep)333 static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
334 {
335 bool device = prot & KVM_PGTABLE_PROT_DEVICE;
336 u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL;
337 kvm_pte_t attr = FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, mtype);
338 u32 sh = KVM_PTE_LEAF_ATTR_LO_S1_SH_IS;
339 u32 ap = (prot & KVM_PGTABLE_PROT_W) ? KVM_PTE_LEAF_ATTR_LO_S1_AP_RW :
340 KVM_PTE_LEAF_ATTR_LO_S1_AP_RO;
341
342 if (!(prot & KVM_PGTABLE_PROT_R))
343 return -EINVAL;
344
345 if (!cpus_have_final_cap(ARM64_KVM_HVHE))
346 prot &= ~KVM_PGTABLE_PROT_UX;
347
348 if (prot & KVM_PGTABLE_PROT_X) {
349 if (prot & KVM_PGTABLE_PROT_W)
350 return -EINVAL;
351
352 if (device)
353 return -EINVAL;
354
355 if (system_supports_bti_kernel())
356 attr |= KVM_PTE_LEAF_ATTR_HI_S1_GP;
357 }
358
359 if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
360 if (!(prot & KVM_PGTABLE_PROT_PX))
361 attr |= KVM_PTE_LEAF_ATTR_HI_S1_PXN;
362 if (!(prot & KVM_PGTABLE_PROT_UX))
363 attr |= KVM_PTE_LEAF_ATTR_HI_S1_UXN;
364 } else {
365 if (!(prot & KVM_PGTABLE_PROT_PX))
366 attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN;
367 }
368
369 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
370 if (!kvm_lpa2_is_enabled())
371 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
372 attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF;
373 attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
374 *ptep = attr;
375
376 return 0;
377 }
378
kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)379 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
380 {
381 enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
382 u32 ap;
383
384 if (!kvm_pte_valid(pte))
385 return prot;
386
387 if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
388 if (!(pte & KVM_PTE_LEAF_ATTR_HI_S1_PXN))
389 prot |= KVM_PGTABLE_PROT_PX;
390 if (!(pte & KVM_PTE_LEAF_ATTR_HI_S1_UXN))
391 prot |= KVM_PGTABLE_PROT_UX;
392 } else {
393 if (!(pte & KVM_PTE_LEAF_ATTR_HI_S1_XN))
394 prot |= KVM_PGTABLE_PROT_PX;
395 }
396
397 ap = FIELD_GET(KVM_PTE_LEAF_ATTR_LO_S1_AP, pte);
398 if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RO)
399 prot |= KVM_PGTABLE_PROT_R;
400 else if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RW)
401 prot |= KVM_PGTABLE_PROT_RW;
402
403 return prot;
404 }
405
hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx * ctx,struct hyp_map_data * data)406 static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
407 struct hyp_map_data *data)
408 {
409 u64 phys = data->phys + (ctx->addr - ctx->start);
410 kvm_pte_t new;
411
412 if (!kvm_block_mapping_supported(ctx, phys))
413 return false;
414
415 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
416 if (ctx->old == new)
417 return true;
418 if (!kvm_pte_valid(ctx->old))
419 ctx->mm_ops->get_page(ctx->ptep);
420 else if (WARN_ON((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW))
421 return false;
422
423 smp_store_release(ctx->ptep, new);
424 return true;
425 }
426
hyp_map_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)427 static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
428 enum kvm_pgtable_walk_flags visit)
429 {
430 kvm_pte_t *childp, new;
431 struct hyp_map_data *data = ctx->arg;
432 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
433
434 if (hyp_map_walker_try_leaf(ctx, data))
435 return 0;
436
437 if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
438 return -EINVAL;
439
440 childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
441 if (!childp)
442 return -ENOMEM;
443
444 new = kvm_init_table_pte(childp, mm_ops);
445 mm_ops->get_page(ctx->ptep);
446 smp_store_release(ctx->ptep, new);
447
448 return 0;
449 }
450
kvm_pgtable_hyp_map(struct kvm_pgtable * pgt,u64 addr,u64 size,u64 phys,enum kvm_pgtable_prot prot)451 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
452 enum kvm_pgtable_prot prot)
453 {
454 int ret;
455 struct hyp_map_data map_data = {
456 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
457 };
458 struct kvm_pgtable_walker walker = {
459 .cb = hyp_map_walker,
460 .flags = KVM_PGTABLE_WALK_LEAF,
461 .arg = &map_data,
462 };
463
464 ret = hyp_set_prot_attr(prot, &map_data.attr);
465 if (ret)
466 return ret;
467
468 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
469 dsb(ishst);
470 isb();
471 return ret;
472 }
473
hyp_unmap_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)474 static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
475 enum kvm_pgtable_walk_flags visit)
476 {
477 kvm_pte_t *childp = NULL;
478 u64 granule = kvm_granule_size(ctx->level);
479 u64 *unmapped = ctx->arg;
480 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
481
482 if (!kvm_pte_valid(ctx->old))
483 return -EINVAL;
484
485 if (kvm_pte_table(ctx->old, ctx->level)) {
486 childp = kvm_pte_follow(ctx->old, mm_ops);
487
488 if (mm_ops->page_count(childp) != 1)
489 return 0;
490
491 kvm_clear_pte(ctx->ptep);
492 dsb(ishst);
493 __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), TLBI_TTL_UNKNOWN);
494 } else {
495 if (ctx->end - ctx->addr < granule)
496 return -EINVAL;
497
498 kvm_clear_pte(ctx->ptep);
499 dsb(ishst);
500 __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
501 *unmapped += granule;
502 }
503
504 dsb(ish);
505 isb();
506 mm_ops->put_page(ctx->ptep);
507
508 if (childp)
509 mm_ops->put_page(childp);
510
511 return 0;
512 }
513
kvm_pgtable_hyp_unmap(struct kvm_pgtable * pgt,u64 addr,u64 size)514 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
515 {
516 u64 unmapped = 0;
517 struct kvm_pgtable_walker walker = {
518 .cb = hyp_unmap_walker,
519 .arg = &unmapped,
520 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
521 };
522
523 if (!pgt->mm_ops->page_count)
524 return 0;
525
526 kvm_pgtable_walk(pgt, addr, size, &walker);
527 return unmapped;
528 }
529
kvm_pgtable_hyp_init(struct kvm_pgtable * pgt,u32 va_bits,struct kvm_pgtable_mm_ops * mm_ops)530 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
531 struct kvm_pgtable_mm_ops *mm_ops)
532 {
533 s8 start_level = KVM_PGTABLE_LAST_LEVEL + 1 -
534 ARM64_HW_PGTABLE_LEVELS(va_bits);
535
536 if (start_level < KVM_PGTABLE_FIRST_LEVEL ||
537 start_level > KVM_PGTABLE_LAST_LEVEL)
538 return -EINVAL;
539
540 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL);
541 if (!pgt->pgd)
542 return -ENOMEM;
543
544 pgt->ia_bits = va_bits;
545 pgt->start_level = start_level;
546 pgt->mm_ops = mm_ops;
547 pgt->mmu = NULL;
548 pgt->force_pte_cb = NULL;
549
550 return 0;
551 }
552
hyp_free_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)553 static int hyp_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
554 enum kvm_pgtable_walk_flags visit)
555 {
556 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
557
558 if (!kvm_pte_valid(ctx->old))
559 return 0;
560
561 mm_ops->put_page(ctx->ptep);
562
563 if (kvm_pte_table(ctx->old, ctx->level))
564 mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
565
566 return 0;
567 }
568
kvm_pgtable_hyp_destroy(struct kvm_pgtable * pgt)569 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
570 {
571 struct kvm_pgtable_walker walker = {
572 .cb = hyp_free_walker,
573 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
574 };
575
576 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
577 pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd));
578 pgt->pgd = NULL;
579 }
580
581 struct stage2_map_data {
582 const u64 phys;
583 kvm_pte_t attr;
584 u8 owner_id;
585
586 kvm_pte_t *anchor;
587 kvm_pte_t *childp;
588
589 struct kvm_s2_mmu *mmu;
590 void *memcache;
591
592 /* Force mappings to page granularity */
593 bool force_pte;
594
595 /* Walk should update owner_id only */
596 bool annotation;
597 };
598
kvm_get_vtcr(u64 mmfr0,u64 mmfr1,u32 phys_shift)599 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
600 {
601 u64 vtcr = VTCR_EL2_FLAGS;
602 s8 lvls;
603
604 vtcr |= FIELD_PREP(VTCR_EL2_PS, kvm_get_parange(mmfr0));
605 vtcr |= FIELD_PREP(VTCR_EL2_T0SZ, (UL(64) - phys_shift));
606 /*
607 * Use a minimum 2 level page table to prevent splitting
608 * host PMD huge pages at stage2.
609 */
610 lvls = stage2_pgtable_levels(phys_shift);
611 if (lvls < 2)
612 lvls = 2;
613
614 /*
615 * When LPA2 is enabled, the HW supports an extra level of translation
616 * (for 5 in total) when using 4K pages. It also introduces VTCR_EL2.SL2
617 * to as an addition to SL0 to enable encoding this extra start level.
618 * However, since we always use concatenated pages for the first level
619 * lookup, we will never need this extra level and therefore do not need
620 * to touch SL2.
621 */
622 vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
623
624 #ifdef CONFIG_ARM64_HW_AFDBM
625 /*
626 * Enable the Hardware Access Flag management, unconditionally
627 * on all CPUs. In systems that have asymmetric support for the feature
628 * this allows KVM to leverage hardware support on the subset of cores
629 * that implement the feature.
630 *
631 * The architecture requires VTCR_EL2.HA to be RES0 (thus ignored by
632 * hardware) on implementations that do not advertise support for the
633 * feature. As such, setting HA unconditionally is safe, unless you
634 * happen to be running on a design that has unadvertised support for
635 * HAFDBS. Here be dragons.
636 */
637 if (!cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
638 vtcr |= VTCR_EL2_HA;
639 #endif /* CONFIG_ARM64_HW_AFDBM */
640
641 if (kvm_lpa2_is_enabled())
642 vtcr |= VTCR_EL2_DS;
643
644 /* Set the vmid bits */
645 vtcr |= (get_vmid_bits(mmfr1) == 16) ? VTCR_EL2_VS : 0;
646
647 return vtcr;
648 }
649
kvm_tlb_flush_vmid_range(struct kvm_s2_mmu * mmu,phys_addr_t addr,size_t size)650 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
651 phys_addr_t addr, size_t size)
652 {
653 unsigned long pages, inval_pages;
654
655 if (!system_supports_tlb_range()) {
656 kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
657 return;
658 }
659
660 pages = size >> PAGE_SHIFT;
661 while (pages > 0) {
662 inval_pages = min(pages, MAX_TLBI_RANGE_PAGES);
663 kvm_call_hyp(__kvm_tlb_flush_vmid_range, mmu, addr, inval_pages);
664
665 addr += inval_pages << PAGE_SHIFT;
666 pages -= inval_pages;
667 }
668 }
669
670 #define KVM_S2_MEMATTR(pgt, attr) \
671 ({ \
672 kvm_pte_t __attr; \
673 \
674 if ((pgt)->flags & KVM_PGTABLE_S2_AS_S1) \
675 __attr = PAGE_S2_MEMATTR(AS_S1); \
676 else \
677 __attr = PAGE_S2_MEMATTR(attr); \
678 \
679 __attr; \
680 })
681
stage2_set_xn_attr(enum kvm_pgtable_prot prot,kvm_pte_t * attr)682 static int stage2_set_xn_attr(enum kvm_pgtable_prot prot, kvm_pte_t *attr)
683 {
684 bool px, ux;
685 u8 xn;
686
687 px = prot & KVM_PGTABLE_PROT_PX;
688 ux = prot & KVM_PGTABLE_PROT_UX;
689
690 if (!cpus_have_final_cap(ARM64_HAS_XNX) && px != ux)
691 return -EINVAL;
692
693 if (px && ux)
694 xn = 0b00;
695 else if (!px && ux)
696 xn = 0b01;
697 else if (!px && !ux)
698 xn = 0b10;
699 else
700 xn = 0b11;
701
702 *attr &= ~KVM_PTE_LEAF_ATTR_HI_S2_XN;
703 *attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, xn);
704 return 0;
705 }
706
stage2_set_prot_attr(struct kvm_pgtable * pgt,enum kvm_pgtable_prot prot,kvm_pte_t * ptep)707 static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
708 kvm_pte_t *ptep)
709 {
710 kvm_pte_t attr;
711 u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
712 int r;
713
714 switch (prot & (KVM_PGTABLE_PROT_DEVICE |
715 KVM_PGTABLE_PROT_NORMAL_NC)) {
716 case KVM_PGTABLE_PROT_DEVICE | KVM_PGTABLE_PROT_NORMAL_NC:
717 return -EINVAL;
718 case KVM_PGTABLE_PROT_DEVICE:
719 if (prot & KVM_PGTABLE_PROT_X)
720 return -EINVAL;
721 attr = KVM_S2_MEMATTR(pgt, DEVICE_nGnRE);
722 break;
723 case KVM_PGTABLE_PROT_NORMAL_NC:
724 if (prot & KVM_PGTABLE_PROT_X)
725 return -EINVAL;
726 attr = KVM_S2_MEMATTR(pgt, NORMAL_NC);
727 break;
728 default:
729 attr = KVM_S2_MEMATTR(pgt, NORMAL);
730 }
731
732 r = stage2_set_xn_attr(prot, &attr);
733 if (r)
734 return r;
735
736 if (prot & KVM_PGTABLE_PROT_R)
737 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
738
739 if (prot & KVM_PGTABLE_PROT_W)
740 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
741
742 if (!kvm_lpa2_is_enabled())
743 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
744
745 attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
746 attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
747 *ptep = attr;
748
749 return 0;
750 }
751
kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)752 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
753 {
754 enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
755
756 if (!kvm_pte_valid(pte))
757 return prot;
758
759 if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R)
760 prot |= KVM_PGTABLE_PROT_R;
761 if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W)
762 prot |= KVM_PGTABLE_PROT_W;
763
764 switch (FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, pte)) {
765 case 0b00:
766 prot |= KVM_PGTABLE_PROT_PX | KVM_PGTABLE_PROT_UX;
767 break;
768 case 0b01:
769 prot |= KVM_PGTABLE_PROT_UX;
770 break;
771 case 0b11:
772 prot |= KVM_PGTABLE_PROT_PX;
773 break;
774 default:
775 break;
776 }
777
778 return prot;
779 }
780
stage2_pte_needs_update(kvm_pte_t old,kvm_pte_t new)781 static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
782 {
783 if (!kvm_pte_valid(old) || !kvm_pte_valid(new))
784 return true;
785
786 return ((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS));
787 }
788
stage2_pte_is_counted(kvm_pte_t pte)789 static bool stage2_pte_is_counted(kvm_pte_t pte)
790 {
791 /*
792 * The refcount tracks valid entries as well as invalid entries if they
793 * encode ownership of a page to another entity than the page-table
794 * owner, whose id is 0.
795 */
796 return !!pte;
797 }
798
stage2_pte_is_locked(kvm_pte_t pte)799 static bool stage2_pte_is_locked(kvm_pte_t pte)
800 {
801 return !kvm_pte_valid(pte) && (pte & KVM_INVALID_PTE_LOCKED);
802 }
803
stage2_try_set_pte(const struct kvm_pgtable_visit_ctx * ctx,kvm_pte_t new)804 static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
805 {
806 if (!kvm_pgtable_walk_shared(ctx)) {
807 WRITE_ONCE(*ctx->ptep, new);
808 return true;
809 }
810
811 return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
812 }
813
814 /**
815 * stage2_try_break_pte() - Invalidates a pte according to the
816 * 'break-before-make' requirements of the
817 * architecture.
818 *
819 * @ctx: context of the visited pte.
820 * @mmu: stage-2 mmu
821 *
822 * Returns: true if the pte was successfully broken.
823 *
824 * If the removed pte was valid, performs the necessary serialization and TLB
825 * invalidation for the old value. For counted ptes, drops the reference count
826 * on the containing table page.
827 */
stage2_try_break_pte(const struct kvm_pgtable_visit_ctx * ctx,struct kvm_s2_mmu * mmu)828 static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
829 struct kvm_s2_mmu *mmu)
830 {
831 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
832
833 if (stage2_pte_is_locked(ctx->old)) {
834 /*
835 * Should never occur if this walker has exclusive access to the
836 * page tables.
837 */
838 WARN_ON(!kvm_pgtable_walk_shared(ctx));
839 return false;
840 }
841
842 if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED))
843 return false;
844
845 if (!kvm_pgtable_walk_skip_bbm_tlbi(ctx)) {
846 /*
847 * Perform the appropriate TLB invalidation based on the
848 * evicted pte value (if any).
849 */
850 if (kvm_pte_table(ctx->old, ctx->level)) {
851 u64 size = kvm_granule_size(ctx->level);
852 u64 addr = ALIGN_DOWN(ctx->addr, size);
853
854 kvm_tlb_flush_vmid_range(mmu, addr, size);
855 } else if (kvm_pte_valid(ctx->old)) {
856 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
857 ctx->addr, ctx->level);
858 }
859 }
860
861 if (stage2_pte_is_counted(ctx->old))
862 mm_ops->put_page(ctx->ptep);
863
864 return true;
865 }
866
stage2_make_pte(const struct kvm_pgtable_visit_ctx * ctx,kvm_pte_t new)867 static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
868 {
869 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
870
871 WARN_ON(!stage2_pte_is_locked(*ctx->ptep));
872
873 if (stage2_pte_is_counted(new))
874 mm_ops->get_page(ctx->ptep);
875
876 smp_store_release(ctx->ptep, new);
877 }
878
stage2_unmap_defer_tlb_flush(struct kvm_pgtable * pgt)879 static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt)
880 {
881 /*
882 * If FEAT_TLBIRANGE is implemented, defer the individual
883 * TLB invalidations until the entire walk is finished, and
884 * then use the range-based TLBI instructions to do the
885 * invalidations. Condition deferred TLB invalidation on the
886 * system supporting FWB as the optimization is entirely
887 * pointless when the unmap walker needs to perform CMOs.
888 */
889 return system_supports_tlb_range() && cpus_have_final_cap(ARM64_HAS_STAGE2_FWB);
890 }
891
stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx * ctx,struct kvm_s2_mmu * mmu,struct kvm_pgtable_mm_ops * mm_ops)892 static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
893 struct kvm_s2_mmu *mmu,
894 struct kvm_pgtable_mm_ops *mm_ops)
895 {
896 struct kvm_pgtable *pgt = ctx->arg;
897
898 /*
899 * Clear the existing PTE, and perform break-before-make if it was
900 * valid. Depending on the system support, defer the TLB maintenance
901 * for the same until the entire unmap walk is completed.
902 */
903 if (kvm_pte_valid(ctx->old)) {
904 kvm_clear_pte(ctx->ptep);
905
906 if (kvm_pte_table(ctx->old, ctx->level)) {
907 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
908 TLBI_TTL_UNKNOWN);
909 } else if (!stage2_unmap_defer_tlb_flush(pgt)) {
910 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
911 ctx->level);
912 }
913 }
914
915 mm_ops->put_page(ctx->ptep);
916 }
917
stage2_pte_cacheable(struct kvm_pgtable * pgt,kvm_pte_t pte)918 static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
919 {
920 u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
921 return kvm_pte_valid(pte) && memattr == KVM_S2_MEMATTR(pgt, NORMAL);
922 }
923
stage2_pte_executable(kvm_pte_t pte)924 static bool stage2_pte_executable(kvm_pte_t pte)
925 {
926 return kvm_pte_valid(pte) && !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
927 }
928
stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx * ctx,const struct stage2_map_data * data)929 static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
930 const struct stage2_map_data *data)
931 {
932 u64 phys = data->phys;
933
934 /* Work out the correct PA based on how far the walk has gotten */
935 return phys + (ctx->addr - ctx->start);
936 }
937
stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx * ctx,struct stage2_map_data * data)938 static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
939 struct stage2_map_data *data)
940 {
941 u64 phys = stage2_map_walker_phys_addr(ctx, data);
942
943 if (data->force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL)
944 return false;
945
946 if (data->annotation)
947 return true;
948
949 return kvm_block_mapping_supported(ctx, phys);
950 }
951
stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx * ctx,struct stage2_map_data * data)952 static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
953 struct stage2_map_data *data)
954 {
955 kvm_pte_t new;
956 u64 phys = stage2_map_walker_phys_addr(ctx, data);
957 u64 granule = kvm_granule_size(ctx->level);
958 struct kvm_pgtable *pgt = data->mmu->pgt;
959 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
960
961 if (!stage2_leaf_mapping_allowed(ctx, data))
962 return -E2BIG;
963
964 if (!data->annotation)
965 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
966 else
967 new = kvm_init_invalid_leaf_owner(data->owner_id);
968
969 /*
970 * Skip updating the PTE if we are trying to recreate the exact
971 * same mapping or only change the access permissions. Instead,
972 * the vCPU will exit one more time from guest if still needed
973 * and then go through the path of relaxing permissions.
974 */
975 if (!stage2_pte_needs_update(ctx->old, new))
976 return -EAGAIN;
977
978 /* If we're only changing software bits, then store them and go! */
979 if (!kvm_pgtable_walk_shared(ctx) &&
980 !((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW)) {
981 bool old_is_counted = stage2_pte_is_counted(ctx->old);
982
983 if (old_is_counted != stage2_pte_is_counted(new)) {
984 if (old_is_counted)
985 mm_ops->put_page(ctx->ptep);
986 else
987 mm_ops->get_page(ctx->ptep);
988 }
989 WARN_ON_ONCE(!stage2_try_set_pte(ctx, new));
990 return 0;
991 }
992
993 if (!stage2_try_break_pte(ctx, data->mmu))
994 return -EAGAIN;
995
996 /* Perform CMOs before installation of the guest stage-2 PTE */
997 if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->dcache_clean_inval_poc &&
998 stage2_pte_cacheable(pgt, new))
999 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
1000 granule);
1001
1002 if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->icache_inval_pou &&
1003 stage2_pte_executable(new))
1004 mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
1005
1006 stage2_make_pte(ctx, new);
1007
1008 return 0;
1009 }
1010
stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx * ctx,struct stage2_map_data * data)1011 static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx,
1012 struct stage2_map_data *data)
1013 {
1014 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1015 kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops);
1016 int ret;
1017
1018 if (!stage2_leaf_mapping_allowed(ctx, data))
1019 return 0;
1020
1021 ret = stage2_map_walker_try_leaf(ctx, data);
1022 if (ret)
1023 return ret;
1024
1025 mm_ops->free_unlinked_table(childp, ctx->level);
1026 return 0;
1027 }
1028
stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx * ctx,struct stage2_map_data * data)1029 static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
1030 struct stage2_map_data *data)
1031 {
1032 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1033 kvm_pte_t *childp, new;
1034 int ret;
1035
1036 ret = stage2_map_walker_try_leaf(ctx, data);
1037 if (ret != -E2BIG)
1038 return ret;
1039
1040 if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
1041 return -EINVAL;
1042
1043 if (!data->memcache)
1044 return -ENOMEM;
1045
1046 childp = mm_ops->zalloc_page(data->memcache);
1047 if (!childp)
1048 return -ENOMEM;
1049
1050 if (!stage2_try_break_pte(ctx, data->mmu)) {
1051 mm_ops->put_page(childp);
1052 return -EAGAIN;
1053 }
1054
1055 /*
1056 * If we've run into an existing block mapping then replace it with
1057 * a table. Accesses beyond 'end' that fall within the new table
1058 * will be mapped lazily.
1059 */
1060 new = kvm_init_table_pte(childp, mm_ops);
1061 stage2_make_pte(ctx, new);
1062
1063 return 0;
1064 }
1065
1066 /*
1067 * The TABLE_PRE callback runs for table entries on the way down, looking
1068 * for table entries which we could conceivably replace with a block entry
1069 * for this mapping. If it finds one it replaces the entry and calls
1070 * kvm_pgtable_mm_ops::free_unlinked_table() to tear down the detached table.
1071 *
1072 * Otherwise, the LEAF callback performs the mapping at the existing leaves
1073 * instead.
1074 */
stage2_map_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)1075 static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
1076 enum kvm_pgtable_walk_flags visit)
1077 {
1078 struct stage2_map_data *data = ctx->arg;
1079
1080 switch (visit) {
1081 case KVM_PGTABLE_WALK_TABLE_PRE:
1082 return stage2_map_walk_table_pre(ctx, data);
1083 case KVM_PGTABLE_WALK_LEAF:
1084 return stage2_map_walk_leaf(ctx, data);
1085 default:
1086 return -EINVAL;
1087 }
1088 }
1089
kvm_pgtable_stage2_map(struct kvm_pgtable * pgt,u64 addr,u64 size,u64 phys,enum kvm_pgtable_prot prot,void * mc,enum kvm_pgtable_walk_flags flags)1090 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
1091 u64 phys, enum kvm_pgtable_prot prot,
1092 void *mc, enum kvm_pgtable_walk_flags flags)
1093 {
1094 int ret;
1095 struct stage2_map_data map_data = {
1096 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
1097 .mmu = pgt->mmu,
1098 .memcache = mc,
1099 .force_pte = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot),
1100 };
1101 struct kvm_pgtable_walker walker = {
1102 .cb = stage2_map_walker,
1103 .flags = flags |
1104 KVM_PGTABLE_WALK_TABLE_PRE |
1105 KVM_PGTABLE_WALK_LEAF,
1106 .arg = &map_data,
1107 };
1108
1109 if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys)))
1110 return -EINVAL;
1111
1112 ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
1113 if (ret)
1114 return ret;
1115
1116 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1117 dsb(ishst);
1118 return ret;
1119 }
1120
kvm_pgtable_stage2_set_owner(struct kvm_pgtable * pgt,u64 addr,u64 size,void * mc,u8 owner_id)1121 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
1122 void *mc, u8 owner_id)
1123 {
1124 int ret;
1125 struct stage2_map_data map_data = {
1126 .mmu = pgt->mmu,
1127 .memcache = mc,
1128 .owner_id = owner_id,
1129 .force_pte = true,
1130 .annotation = true,
1131 };
1132 struct kvm_pgtable_walker walker = {
1133 .cb = stage2_map_walker,
1134 .flags = KVM_PGTABLE_WALK_TABLE_PRE |
1135 KVM_PGTABLE_WALK_LEAF,
1136 .arg = &map_data,
1137 };
1138
1139 if (owner_id > KVM_MAX_OWNER_ID)
1140 return -EINVAL;
1141
1142 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1143 return ret;
1144 }
1145
stage2_unmap_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)1146 static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
1147 enum kvm_pgtable_walk_flags visit)
1148 {
1149 struct kvm_pgtable *pgt = ctx->arg;
1150 struct kvm_s2_mmu *mmu = pgt->mmu;
1151 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1152 kvm_pte_t *childp = NULL;
1153 bool need_flush = false;
1154
1155 if (!kvm_pte_valid(ctx->old)) {
1156 if (stage2_pte_is_counted(ctx->old)) {
1157 kvm_clear_pte(ctx->ptep);
1158 mm_ops->put_page(ctx->ptep);
1159 }
1160 return 0;
1161 }
1162
1163 if (kvm_pte_table(ctx->old, ctx->level)) {
1164 childp = kvm_pte_follow(ctx->old, mm_ops);
1165
1166 if (mm_ops->page_count(childp) != 1)
1167 return 0;
1168 } else if (stage2_pte_cacheable(pgt, ctx->old)) {
1169 need_flush = !cpus_have_final_cap(ARM64_HAS_STAGE2_FWB);
1170 }
1171
1172 /*
1173 * This is similar to the map() path in that we unmap the entire
1174 * block entry and rely on the remaining portions being faulted
1175 * back lazily.
1176 */
1177 stage2_unmap_put_pte(ctx, mmu, mm_ops);
1178
1179 if (need_flush && mm_ops->dcache_clean_inval_poc)
1180 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1181 kvm_granule_size(ctx->level));
1182
1183 if (childp)
1184 mm_ops->put_page(childp);
1185
1186 return 0;
1187 }
1188
kvm_pgtable_stage2_unmap(struct kvm_pgtable * pgt,u64 addr,u64 size)1189 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
1190 {
1191 int ret;
1192 struct kvm_pgtable_walker walker = {
1193 .cb = stage2_unmap_walker,
1194 .arg = pgt,
1195 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
1196 };
1197
1198 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1199 if (stage2_unmap_defer_tlb_flush(pgt))
1200 /* Perform the deferred TLB invalidations */
1201 kvm_tlb_flush_vmid_range(pgt->mmu, addr, size);
1202
1203 return ret;
1204 }
1205
1206 struct stage2_attr_data {
1207 kvm_pte_t attr_set;
1208 kvm_pte_t attr_clr;
1209 kvm_pte_t pte;
1210 s8 level;
1211 };
1212
stage2_attr_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)1213 static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx,
1214 enum kvm_pgtable_walk_flags visit)
1215 {
1216 kvm_pte_t pte = ctx->old;
1217 struct stage2_attr_data *data = ctx->arg;
1218 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1219
1220 if (!kvm_pte_valid(ctx->old))
1221 return -EAGAIN;
1222
1223 data->level = ctx->level;
1224 data->pte = pte;
1225 pte &= ~data->attr_clr;
1226 pte |= data->attr_set;
1227
1228 /*
1229 * We may race with the CPU trying to set the access flag here,
1230 * but worst-case the access flag update gets lost and will be
1231 * set on the next access instead.
1232 */
1233 if (data->pte != pte) {
1234 /*
1235 * Invalidate instruction cache before updating the guest
1236 * stage-2 PTE if we are going to add executable permission.
1237 */
1238 if (mm_ops->icache_inval_pou &&
1239 stage2_pte_executable(pte) && !stage2_pte_executable(ctx->old))
1240 mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops),
1241 kvm_granule_size(ctx->level));
1242
1243 if (!stage2_try_set_pte(ctx, pte))
1244 return -EAGAIN;
1245 }
1246
1247 return 0;
1248 }
1249
stage2_update_leaf_attrs(struct kvm_pgtable * pgt,u64 addr,u64 size,kvm_pte_t attr_set,kvm_pte_t attr_clr,kvm_pte_t * orig_pte,s8 * level,enum kvm_pgtable_walk_flags flags)1250 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
1251 u64 size, kvm_pte_t attr_set,
1252 kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
1253 s8 *level, enum kvm_pgtable_walk_flags flags)
1254 {
1255 int ret;
1256 kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
1257 struct stage2_attr_data data = {
1258 .attr_set = attr_set & attr_mask,
1259 .attr_clr = attr_clr & attr_mask,
1260 };
1261 struct kvm_pgtable_walker walker = {
1262 .cb = stage2_attr_walker,
1263 .arg = &data,
1264 .flags = flags | KVM_PGTABLE_WALK_LEAF,
1265 };
1266
1267 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1268 if (ret)
1269 return ret;
1270
1271 if (orig_pte)
1272 *orig_pte = data.pte;
1273
1274 if (level)
1275 *level = data.level;
1276 return 0;
1277 }
1278
kvm_pgtable_stage2_wrprotect(struct kvm_pgtable * pgt,u64 addr,u64 size)1279 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
1280 {
1281 return stage2_update_leaf_attrs(pgt, addr, size, 0,
1282 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
1283 NULL, NULL,
1284 KVM_PGTABLE_WALK_IGNORE_EAGAIN);
1285 }
1286
kvm_pgtable_stage2_mkyoung(struct kvm_pgtable * pgt,u64 addr,enum kvm_pgtable_walk_flags flags)1287 void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
1288 enum kvm_pgtable_walk_flags flags)
1289 {
1290 int ret;
1291
1292 ret = stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
1293 NULL, NULL, flags);
1294 if (!ret)
1295 dsb(ishst);
1296 }
1297
1298 struct stage2_age_data {
1299 bool mkold;
1300 bool young;
1301 };
1302
stage2_age_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)1303 static int stage2_age_walker(const struct kvm_pgtable_visit_ctx *ctx,
1304 enum kvm_pgtable_walk_flags visit)
1305 {
1306 kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF;
1307 struct stage2_age_data *data = ctx->arg;
1308
1309 if (!kvm_pte_valid(ctx->old) || new == ctx->old)
1310 return 0;
1311
1312 data->young = true;
1313
1314 /*
1315 * stage2_age_walker() is always called while holding the MMU lock for
1316 * write, so this will always succeed. Nonetheless, this deliberately
1317 * follows the race detection pattern of the other stage-2 walkers in
1318 * case the locking mechanics of the MMU notifiers is ever changed.
1319 */
1320 if (data->mkold && !stage2_try_set_pte(ctx, new))
1321 return -EAGAIN;
1322
1323 /*
1324 * "But where's the TLBI?!", you scream.
1325 * "Over in the core code", I sigh.
1326 *
1327 * See the '->clear_flush_young()' callback on the KVM mmu notifier.
1328 */
1329 return 0;
1330 }
1331
kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable * pgt,u64 addr,u64 size,bool mkold)1332 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
1333 u64 size, bool mkold)
1334 {
1335 struct stage2_age_data data = {
1336 .mkold = mkold,
1337 };
1338 struct kvm_pgtable_walker walker = {
1339 .cb = stage2_age_walker,
1340 .arg = &data,
1341 .flags = KVM_PGTABLE_WALK_LEAF,
1342 };
1343
1344 WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
1345 return data.young;
1346 }
1347
kvm_pgtable_stage2_relax_perms(struct kvm_pgtable * pgt,u64 addr,enum kvm_pgtable_prot prot,enum kvm_pgtable_walk_flags flags)1348 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
1349 enum kvm_pgtable_prot prot, enum kvm_pgtable_walk_flags flags)
1350 {
1351 kvm_pte_t xn = 0, set = 0, clr = 0;
1352 s8 level;
1353 int ret;
1354
1355 if (prot & KVM_PTE_LEAF_ATTR_HI_SW)
1356 return -EINVAL;
1357
1358 if (prot & KVM_PGTABLE_PROT_R)
1359 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
1360
1361 if (prot & KVM_PGTABLE_PROT_W)
1362 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
1363
1364 ret = stage2_set_xn_attr(prot, &xn);
1365 if (ret)
1366 return ret;
1367
1368 set |= xn & KVM_PTE_LEAF_ATTR_HI_S2_XN;
1369 clr |= ~xn & KVM_PTE_LEAF_ATTR_HI_S2_XN;
1370
1371 ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level, flags);
1372 if (!ret || ret == -EAGAIN)
1373 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa_nsh, pgt->mmu, addr, level);
1374 return ret;
1375 }
1376
stage2_flush_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)1377 static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx,
1378 enum kvm_pgtable_walk_flags visit)
1379 {
1380 struct kvm_pgtable *pgt = ctx->arg;
1381 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1382
1383 if (!stage2_pte_cacheable(pgt, ctx->old))
1384 return 0;
1385
1386 if (mm_ops->dcache_clean_inval_poc)
1387 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1388 kvm_granule_size(ctx->level));
1389 return 0;
1390 }
1391
kvm_pgtable_stage2_flush(struct kvm_pgtable * pgt,u64 addr,u64 size)1392 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
1393 {
1394 struct kvm_pgtable_walker walker = {
1395 .cb = stage2_flush_walker,
1396 .flags = KVM_PGTABLE_WALK_LEAF,
1397 .arg = pgt,
1398 };
1399
1400 if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
1401 return 0;
1402
1403 return kvm_pgtable_walk(pgt, addr, size, &walker);
1404 }
1405
kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable * pgt,u64 phys,s8 level,enum kvm_pgtable_prot prot,void * mc,bool force_pte)1406 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
1407 u64 phys, s8 level,
1408 enum kvm_pgtable_prot prot,
1409 void *mc, bool force_pte)
1410 {
1411 struct stage2_map_data map_data = {
1412 .phys = phys,
1413 .mmu = pgt->mmu,
1414 .memcache = mc,
1415 .force_pte = force_pte,
1416 };
1417 struct kvm_pgtable_walker walker = {
1418 .cb = stage2_map_walker,
1419 .flags = KVM_PGTABLE_WALK_LEAF |
1420 KVM_PGTABLE_WALK_SKIP_BBM_TLBI |
1421 KVM_PGTABLE_WALK_SKIP_CMO,
1422 .arg = &map_data,
1423 };
1424 /*
1425 * The input address (.addr) is irrelevant for walking an
1426 * unlinked table. Construct an ambiguous IA range to map
1427 * kvm_granule_size(level) worth of memory.
1428 */
1429 struct kvm_pgtable_walk_data data = {
1430 .walker = &walker,
1431 .addr = 0,
1432 .end = kvm_granule_size(level),
1433 };
1434 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1435 kvm_pte_t *pgtable;
1436 int ret;
1437
1438 if (!IS_ALIGNED(phys, kvm_granule_size(level)))
1439 return ERR_PTR(-EINVAL);
1440
1441 ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
1442 if (ret)
1443 return ERR_PTR(ret);
1444
1445 pgtable = mm_ops->zalloc_page(mc);
1446 if (!pgtable)
1447 return ERR_PTR(-ENOMEM);
1448
1449 ret = __kvm_pgtable_walk(&data, mm_ops, (kvm_pteref_t)pgtable,
1450 level + 1);
1451 if (ret) {
1452 kvm_pgtable_stage2_free_unlinked(mm_ops, pgtable, level);
1453 return ERR_PTR(ret);
1454 }
1455
1456 return pgtable;
1457 }
1458
1459 /*
1460 * Get the number of page-tables needed to replace a block with a
1461 * fully populated tree up to the PTE entries. Note that @level is
1462 * interpreted as in "level @level entry".
1463 */
stage2_block_get_nr_page_tables(s8 level)1464 static int stage2_block_get_nr_page_tables(s8 level)
1465 {
1466 switch (level) {
1467 case 1:
1468 return PTRS_PER_PTE + 1;
1469 case 2:
1470 return 1;
1471 case 3:
1472 return 0;
1473 default:
1474 WARN_ON_ONCE(level < KVM_PGTABLE_MIN_BLOCK_LEVEL ||
1475 level > KVM_PGTABLE_LAST_LEVEL);
1476 return -EINVAL;
1477 };
1478 }
1479
stage2_split_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)1480 static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
1481 enum kvm_pgtable_walk_flags visit)
1482 {
1483 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1484 struct kvm_mmu_memory_cache *mc = ctx->arg;
1485 struct kvm_s2_mmu *mmu;
1486 kvm_pte_t pte = ctx->old, new, *childp;
1487 enum kvm_pgtable_prot prot;
1488 s8 level = ctx->level;
1489 bool force_pte;
1490 int nr_pages;
1491 u64 phys;
1492
1493 /* No huge-pages exist at the last level */
1494 if (level == KVM_PGTABLE_LAST_LEVEL)
1495 return 0;
1496
1497 /* We only split valid block mappings */
1498 if (!kvm_pte_valid(pte))
1499 return 0;
1500
1501 nr_pages = stage2_block_get_nr_page_tables(level);
1502 if (nr_pages < 0)
1503 return nr_pages;
1504
1505 if (mc->nobjs >= nr_pages) {
1506 /* Build a tree mapped down to the PTE granularity. */
1507 force_pte = true;
1508 } else {
1509 /*
1510 * Don't force PTEs, so create_unlinked() below does
1511 * not populate the tree up to the PTE level. The
1512 * consequence is that the call will require a single
1513 * page of level 2 entries at level 1, or a single
1514 * page of PTEs at level 2. If we are at level 1, the
1515 * PTEs will be created recursively.
1516 */
1517 force_pte = false;
1518 nr_pages = 1;
1519 }
1520
1521 if (mc->nobjs < nr_pages)
1522 return -ENOMEM;
1523
1524 mmu = container_of(mc, struct kvm_s2_mmu, split_page_cache);
1525 phys = kvm_pte_to_phys(pte);
1526 prot = kvm_pgtable_stage2_pte_prot(pte);
1527
1528 childp = kvm_pgtable_stage2_create_unlinked(mmu->pgt, phys,
1529 level, prot, mc, force_pte);
1530 if (IS_ERR(childp))
1531 return PTR_ERR(childp);
1532
1533 if (!stage2_try_break_pte(ctx, mmu)) {
1534 kvm_pgtable_stage2_free_unlinked(mm_ops, childp, level);
1535 return -EAGAIN;
1536 }
1537
1538 /*
1539 * Note, the contents of the page table are guaranteed to be made
1540 * visible before the new PTE is assigned because stage2_make_pte()
1541 * writes the PTE using smp_store_release().
1542 */
1543 new = kvm_init_table_pte(childp, mm_ops);
1544 stage2_make_pte(ctx, new);
1545 return 0;
1546 }
1547
kvm_pgtable_stage2_split(struct kvm_pgtable * pgt,u64 addr,u64 size,struct kvm_mmu_memory_cache * mc)1548 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
1549 struct kvm_mmu_memory_cache *mc)
1550 {
1551 struct kvm_pgtable_walker walker = {
1552 .cb = stage2_split_walker,
1553 .flags = KVM_PGTABLE_WALK_LEAF,
1554 .arg = mc,
1555 };
1556 int ret;
1557
1558 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1559 dsb(ishst);
1560 return ret;
1561 }
1562
__kvm_pgtable_stage2_init(struct kvm_pgtable * pgt,struct kvm_s2_mmu * mmu,struct kvm_pgtable_mm_ops * mm_ops,enum kvm_pgtable_stage2_flags flags,kvm_pgtable_force_pte_cb_t force_pte_cb)1563 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
1564 struct kvm_pgtable_mm_ops *mm_ops,
1565 enum kvm_pgtable_stage2_flags flags,
1566 kvm_pgtable_force_pte_cb_t force_pte_cb)
1567 {
1568 size_t pgd_sz;
1569 u64 vtcr = mmu->vtcr;
1570 u32 ia_bits = VTCR_EL2_IPA(vtcr);
1571 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1572 s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1573
1574 pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1575 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz);
1576 if (!pgt->pgd)
1577 return -ENOMEM;
1578
1579 pgt->ia_bits = ia_bits;
1580 pgt->start_level = start_level;
1581 pgt->mm_ops = mm_ops;
1582 pgt->mmu = mmu;
1583 pgt->flags = flags;
1584 pgt->force_pte_cb = force_pte_cb;
1585
1586 /* Ensure zeroed PGD pages are visible to the hardware walker */
1587 dsb(ishst);
1588 return 0;
1589 }
1590
kvm_pgtable_stage2_pgd_size(u64 vtcr)1591 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
1592 {
1593 u32 ia_bits = VTCR_EL2_IPA(vtcr);
1594 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1595 s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1596
1597 return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1598 }
1599
stage2_free_leaf(const struct kvm_pgtable_visit_ctx * ctx)1600 static int stage2_free_leaf(const struct kvm_pgtable_visit_ctx *ctx)
1601 {
1602 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1603
1604 mm_ops->put_page(ctx->ptep);
1605 return 0;
1606 }
1607
stage2_free_table_post(const struct kvm_pgtable_visit_ctx * ctx)1608 static int stage2_free_table_post(const struct kvm_pgtable_visit_ctx *ctx)
1609 {
1610 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1611 kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops);
1612
1613 if (mm_ops->page_count(childp) != 1)
1614 return 0;
1615
1616 /*
1617 * Drop references and clear the now stale PTE to avoid rewalking the
1618 * freed page table.
1619 */
1620 mm_ops->put_page(ctx->ptep);
1621 mm_ops->put_page(childp);
1622 kvm_clear_pte(ctx->ptep);
1623 return 0;
1624 }
1625
stage2_free_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)1626 static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
1627 enum kvm_pgtable_walk_flags visit)
1628 {
1629 if (!stage2_pte_is_counted(ctx->old))
1630 return 0;
1631
1632 switch (visit) {
1633 case KVM_PGTABLE_WALK_LEAF:
1634 return stage2_free_leaf(ctx);
1635 case KVM_PGTABLE_WALK_TABLE_POST:
1636 return stage2_free_table_post(ctx);
1637 default:
1638 return -EINVAL;
1639 }
1640 }
1641
kvm_pgtable_stage2_destroy_range(struct kvm_pgtable * pgt,u64 addr,u64 size)1642 void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
1643 u64 addr, u64 size)
1644 {
1645 struct kvm_pgtable_walker walker = {
1646 .cb = stage2_free_walker,
1647 .flags = KVM_PGTABLE_WALK_LEAF |
1648 KVM_PGTABLE_WALK_TABLE_POST,
1649 };
1650
1651 WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
1652 }
1653
kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable * pgt)1654 void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt)
1655 {
1656 size_t pgd_sz;
1657
1658 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
1659
1660 /*
1661 * Since the pgtable is unlinked at this point, and not shared with
1662 * other walkers, safely deference pgd with kvm_dereference_pteref_raw()
1663 */
1664 pgt->mm_ops->free_pages_exact(kvm_dereference_pteref_raw(pgt->pgd), pgd_sz);
1665 pgt->pgd = NULL;
1666 }
1667
kvm_pgtable_stage2_destroy(struct kvm_pgtable * pgt)1668 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
1669 {
1670 kvm_pgtable_stage2_destroy_range(pgt, 0, BIT(pgt->ia_bits));
1671 kvm_pgtable_stage2_destroy_pgd(pgt);
1672 }
1673
kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops * mm_ops,void * pgtable,s8 level)1674 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
1675 {
1676 kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
1677 struct kvm_pgtable_walker walker = {
1678 .cb = stage2_free_walker,
1679 .flags = KVM_PGTABLE_WALK_LEAF |
1680 KVM_PGTABLE_WALK_TABLE_POST,
1681 };
1682 struct kvm_pgtable_walk_data data = {
1683 .walker = &walker,
1684
1685 /*
1686 * At this point the IPA really doesn't matter, as the page
1687 * table being traversed has already been removed from the stage
1688 * 2. Set an appropriate range to cover the entire page table.
1689 */
1690 .addr = 0,
1691 .end = kvm_granule_size(level),
1692 };
1693
1694 WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));
1695
1696 WARN_ON(mm_ops->page_count(pgtable) != 1);
1697 mm_ops->put_page(pgtable);
1698 }
1699