1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Stand-alone page-table allocator for hyp stage-1 and guest stage-2.
4 * No bombay mix was harmed in the writing of this file.
5 *
6 * Copyright (C) 2020 Google LLC
7 * Author: Will Deacon <will@kernel.org>
8 */
9
10 #include <linux/bitfield.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/stage2_pgtable.h>
13
14 struct kvm_pgtable_walk_data {
15 struct kvm_pgtable_walker *walker;
16
17 const u64 start;
18 u64 addr;
19 const u64 end;
20 };
21
kvm_pgtable_walk_skip_bbm_tlbi(const struct kvm_pgtable_visit_ctx * ctx)22 static bool kvm_pgtable_walk_skip_bbm_tlbi(const struct kvm_pgtable_visit_ctx *ctx)
23 {
24 return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_BBM_TLBI);
25 }
26
kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx * ctx)27 static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
28 {
29 return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO);
30 }
31
kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx * ctx,u64 phys)32 static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
33 {
34 u64 granule = kvm_granule_size(ctx->level);
35
36 if (!kvm_level_supports_block_mapping(ctx->level))
37 return false;
38
39 if (granule > (ctx->end - ctx->addr))
40 return false;
41
42 if (!IS_ALIGNED(phys, granule))
43 return false;
44
45 return IS_ALIGNED(ctx->addr, granule);
46 }
47
kvm_pgtable_idx(struct kvm_pgtable_walk_data * data,s8 level)48 static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, s8 level)
49 {
50 u64 shift = kvm_granule_shift(level);
51 u64 mask = BIT(PAGE_SHIFT - 3) - 1;
52
53 return (data->addr >> shift) & mask;
54 }
55
kvm_pgd_page_idx(struct kvm_pgtable * pgt,u64 addr)56 static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
57 {
58 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
59 u64 mask = BIT(pgt->ia_bits) - 1;
60
61 return (addr & mask) >> shift;
62 }
63
kvm_pgd_pages(u32 ia_bits,s8 start_level)64 static u32 kvm_pgd_pages(u32 ia_bits, s8 start_level)
65 {
66 struct kvm_pgtable pgt = {
67 .ia_bits = ia_bits,
68 .start_level = start_level,
69 };
70
71 return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
72 }
73
kvm_pte_table(kvm_pte_t pte,s8 level)74 static bool kvm_pte_table(kvm_pte_t pte, s8 level)
75 {
76 if (level == KVM_PGTABLE_LAST_LEVEL)
77 return false;
78
79 if (!kvm_pte_valid(pte))
80 return false;
81
82 return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
83 }
84
kvm_pte_follow(kvm_pte_t pte,struct kvm_pgtable_mm_ops * mm_ops)85 static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
86 {
87 return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
88 }
89
kvm_clear_pte(kvm_pte_t * ptep)90 static void kvm_clear_pte(kvm_pte_t *ptep)
91 {
92 WRITE_ONCE(*ptep, 0);
93 }
94
kvm_init_table_pte(kvm_pte_t * childp,struct kvm_pgtable_mm_ops * mm_ops)95 static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops)
96 {
97 kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
98
99 pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
100 pte |= KVM_PTE_VALID;
101 return pte;
102 }
103
kvm_init_valid_leaf_pte(u64 pa,kvm_pte_t attr,s8 level)104 static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, s8 level)
105 {
106 kvm_pte_t pte = kvm_phys_to_pte(pa);
107 u64 type = (level == KVM_PGTABLE_LAST_LEVEL) ? KVM_PTE_TYPE_PAGE :
108 KVM_PTE_TYPE_BLOCK;
109
110 pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI);
111 pte |= FIELD_PREP(KVM_PTE_TYPE, type);
112 pte |= KVM_PTE_VALID;
113
114 return pte;
115 }
116
kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data * data,const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)117 static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data,
118 const struct kvm_pgtable_visit_ctx *ctx,
119 enum kvm_pgtable_walk_flags visit)
120 {
121 struct kvm_pgtable_walker *walker = data->walker;
122
123 /* Ensure the appropriate lock is held (e.g. RCU lock for stage-2 MMU) */
124 WARN_ON_ONCE(kvm_pgtable_walk_shared(ctx) && !kvm_pgtable_walk_lock_held());
125 return walker->cb(ctx, visit);
126 }
127
kvm_pgtable_walk_continue(const struct kvm_pgtable_walker * walker,int r)128 static bool kvm_pgtable_walk_continue(const struct kvm_pgtable_walker *walker,
129 int r)
130 {
131 /*
132 * Visitor callbacks return EAGAIN when the conditions that led to a
133 * fault are no longer reflected in the page tables due to a race to
134 * update a PTE. In the context of a fault handler this is interpreted
135 * as a signal to retry guest execution.
136 *
137 * Ignore the return code altogether for walkers outside a fault handler
138 * (e.g. write protecting a range of memory) and chug along with the
139 * page table walk.
140 */
141 if (r == -EAGAIN)
142 return walker->flags & KVM_PGTABLE_WALK_IGNORE_EAGAIN;
143
144 return !r;
145 }
146
147 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
148 struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level);
149
__kvm_pgtable_visit(struct kvm_pgtable_walk_data * data,struct kvm_pgtable_mm_ops * mm_ops,kvm_pteref_t pteref,s8 level)150 static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
151 struct kvm_pgtable_mm_ops *mm_ops,
152 kvm_pteref_t pteref, s8 level)
153 {
154 enum kvm_pgtable_walk_flags flags = data->walker->flags;
155 kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
156 struct kvm_pgtable_visit_ctx ctx = {
157 .ptep = ptep,
158 .old = READ_ONCE(*ptep),
159 .arg = data->walker->arg,
160 .mm_ops = mm_ops,
161 .start = data->start,
162 .addr = data->addr,
163 .end = data->end,
164 .level = level,
165 .flags = flags,
166 };
167 int ret = 0;
168 bool reload = false;
169 kvm_pteref_t childp;
170 bool table = kvm_pte_table(ctx.old, level);
171
172 if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
173 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
174 reload = true;
175 }
176
177 if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
178 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
179 reload = true;
180 }
181
182 /*
183 * Reload the page table after invoking the walker callback for leaf
184 * entries or after pre-order traversal, to allow the walker to descend
185 * into a newly installed or replaced table.
186 */
187 if (reload) {
188 ctx.old = READ_ONCE(*ptep);
189 table = kvm_pte_table(ctx.old, level);
190 }
191
192 if (!kvm_pgtable_walk_continue(data->walker, ret))
193 goto out;
194
195 if (!table) {
196 data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
197 data->addr += kvm_granule_size(level);
198 goto out;
199 }
200
201 childp = (kvm_pteref_t)kvm_pte_follow(ctx.old, mm_ops);
202 ret = __kvm_pgtable_walk(data, mm_ops, childp, level + 1);
203 if (!kvm_pgtable_walk_continue(data->walker, ret))
204 goto out;
205
206 if (ctx.flags & KVM_PGTABLE_WALK_TABLE_POST)
207 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_POST);
208
209 out:
210 if (kvm_pgtable_walk_continue(data->walker, ret))
211 return 0;
212
213 return ret;
214 }
215
__kvm_pgtable_walk(struct kvm_pgtable_walk_data * data,struct kvm_pgtable_mm_ops * mm_ops,kvm_pteref_t pgtable,s8 level)216 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
217 struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level)
218 {
219 u32 idx;
220 int ret = 0;
221
222 if (WARN_ON_ONCE(level < KVM_PGTABLE_FIRST_LEVEL ||
223 level > KVM_PGTABLE_LAST_LEVEL))
224 return -EINVAL;
225
226 for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
227 kvm_pteref_t pteref = &pgtable[idx];
228
229 if (data->addr >= data->end)
230 break;
231
232 ret = __kvm_pgtable_visit(data, mm_ops, pteref, level);
233 if (ret)
234 break;
235 }
236
237 return ret;
238 }
239
_kvm_pgtable_walk(struct kvm_pgtable * pgt,struct kvm_pgtable_walk_data * data)240 static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data)
241 {
242 u32 idx;
243 int ret = 0;
244 u64 limit = BIT(pgt->ia_bits);
245
246 if (data->addr > limit || data->end > limit)
247 return -ERANGE;
248
249 if (!pgt->pgd)
250 return -EINVAL;
251
252 for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) {
253 kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE];
254
255 ret = __kvm_pgtable_walk(data, pgt->mm_ops, pteref, pgt->start_level);
256 if (ret)
257 break;
258 }
259
260 return ret;
261 }
262
kvm_pgtable_walk(struct kvm_pgtable * pgt,u64 addr,u64 size,struct kvm_pgtable_walker * walker)263 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
264 struct kvm_pgtable_walker *walker)
265 {
266 struct kvm_pgtable_walk_data walk_data = {
267 .start = ALIGN_DOWN(addr, PAGE_SIZE),
268 .addr = ALIGN_DOWN(addr, PAGE_SIZE),
269 .end = PAGE_ALIGN(walk_data.addr + size),
270 .walker = walker,
271 };
272 int r;
273
274 r = kvm_pgtable_walk_begin(walker);
275 if (r)
276 return r;
277
278 r = _kvm_pgtable_walk(pgt, &walk_data);
279 kvm_pgtable_walk_end(walker);
280
281 return r;
282 }
283
284 struct leaf_walk_data {
285 kvm_pte_t pte;
286 s8 level;
287 };
288
leaf_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)289 static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx,
290 enum kvm_pgtable_walk_flags visit)
291 {
292 struct leaf_walk_data *data = ctx->arg;
293
294 data->pte = ctx->old;
295 data->level = ctx->level;
296
297 return 0;
298 }
299
kvm_pgtable_get_leaf(struct kvm_pgtable * pgt,u64 addr,kvm_pte_t * ptep,s8 * level)300 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
301 kvm_pte_t *ptep, s8 *level)
302 {
303 struct leaf_walk_data data;
304 struct kvm_pgtable_walker walker = {
305 .cb = leaf_walker,
306 .flags = KVM_PGTABLE_WALK_LEAF,
307 .arg = &data,
308 };
309 int ret;
310
311 ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE),
312 PAGE_SIZE, &walker);
313 if (!ret) {
314 if (ptep)
315 *ptep = data.pte;
316 if (level)
317 *level = data.level;
318 }
319
320 return ret;
321 }
322
323 struct hyp_map_data {
324 const u64 phys;
325 kvm_pte_t attr;
326 };
327
hyp_set_prot_attr(enum kvm_pgtable_prot prot,kvm_pte_t * ptep)328 static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
329 {
330 bool device = prot & KVM_PGTABLE_PROT_DEVICE;
331 u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL;
332 kvm_pte_t attr = FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, mtype);
333 u32 sh = KVM_PTE_LEAF_ATTR_LO_S1_SH_IS;
334 u32 ap = (prot & KVM_PGTABLE_PROT_W) ? KVM_PTE_LEAF_ATTR_LO_S1_AP_RW :
335 KVM_PTE_LEAF_ATTR_LO_S1_AP_RO;
336
337 if (!(prot & KVM_PGTABLE_PROT_R))
338 return -EINVAL;
339
340 if (!cpus_have_final_cap(ARM64_KVM_HVHE))
341 prot &= ~KVM_PGTABLE_PROT_UX;
342
343 if (prot & KVM_PGTABLE_PROT_X) {
344 if (prot & KVM_PGTABLE_PROT_W)
345 return -EINVAL;
346
347 if (device)
348 return -EINVAL;
349
350 if (system_supports_bti_kernel())
351 attr |= KVM_PTE_LEAF_ATTR_HI_S1_GP;
352 }
353
354 if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
355 if (!(prot & KVM_PGTABLE_PROT_PX))
356 attr |= KVM_PTE_LEAF_ATTR_HI_S1_PXN;
357 if (!(prot & KVM_PGTABLE_PROT_UX))
358 attr |= KVM_PTE_LEAF_ATTR_HI_S1_UXN;
359 } else {
360 if (!(prot & KVM_PGTABLE_PROT_PX))
361 attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN;
362 }
363
364 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
365 if (!kvm_lpa2_is_enabled())
366 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
367 attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF;
368 attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
369 *ptep = attr;
370
371 return 0;
372 }
373
kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)374 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
375 {
376 enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
377 u32 ap;
378
379 if (!kvm_pte_valid(pte))
380 return prot;
381
382 if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
383 if (!(pte & KVM_PTE_LEAF_ATTR_HI_S1_PXN))
384 prot |= KVM_PGTABLE_PROT_PX;
385 if (!(pte & KVM_PTE_LEAF_ATTR_HI_S1_UXN))
386 prot |= KVM_PGTABLE_PROT_UX;
387 } else {
388 if (!(pte & KVM_PTE_LEAF_ATTR_HI_S1_XN))
389 prot |= KVM_PGTABLE_PROT_PX;
390 }
391
392 ap = FIELD_GET(KVM_PTE_LEAF_ATTR_LO_S1_AP, pte);
393 if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RO)
394 prot |= KVM_PGTABLE_PROT_R;
395 else if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RW)
396 prot |= KVM_PGTABLE_PROT_RW;
397
398 return prot;
399 }
400
hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx * ctx,struct hyp_map_data * data)401 static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
402 struct hyp_map_data *data)
403 {
404 u64 phys = data->phys + (ctx->addr - ctx->start);
405 kvm_pte_t new;
406
407 if (!kvm_block_mapping_supported(ctx, phys))
408 return false;
409
410 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
411 if (ctx->old == new)
412 return true;
413 if (!kvm_pte_valid(ctx->old))
414 ctx->mm_ops->get_page(ctx->ptep);
415 else if (WARN_ON((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW))
416 return false;
417
418 smp_store_release(ctx->ptep, new);
419 return true;
420 }
421
hyp_map_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)422 static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
423 enum kvm_pgtable_walk_flags visit)
424 {
425 kvm_pte_t *childp, new;
426 struct hyp_map_data *data = ctx->arg;
427 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
428
429 if (hyp_map_walker_try_leaf(ctx, data))
430 return 0;
431
432 if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
433 return -EINVAL;
434
435 childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
436 if (!childp)
437 return -ENOMEM;
438
439 new = kvm_init_table_pte(childp, mm_ops);
440 mm_ops->get_page(ctx->ptep);
441 smp_store_release(ctx->ptep, new);
442
443 return 0;
444 }
445
kvm_pgtable_hyp_map(struct kvm_pgtable * pgt,u64 addr,u64 size,u64 phys,enum kvm_pgtable_prot prot)446 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
447 enum kvm_pgtable_prot prot)
448 {
449 int ret;
450 struct hyp_map_data map_data = {
451 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
452 };
453 struct kvm_pgtable_walker walker = {
454 .cb = hyp_map_walker,
455 .flags = KVM_PGTABLE_WALK_LEAF,
456 .arg = &map_data,
457 };
458
459 ret = hyp_set_prot_attr(prot, &map_data.attr);
460 if (ret)
461 return ret;
462
463 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
464 dsb(ishst);
465 isb();
466 return ret;
467 }
468
hyp_unmap_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)469 static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
470 enum kvm_pgtable_walk_flags visit)
471 {
472 kvm_pte_t *childp = NULL;
473 u64 granule = kvm_granule_size(ctx->level);
474 u64 *unmapped = ctx->arg;
475 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
476
477 if (!kvm_pte_valid(ctx->old))
478 return -EINVAL;
479
480 if (kvm_pte_table(ctx->old, ctx->level)) {
481 childp = kvm_pte_follow(ctx->old, mm_ops);
482
483 if (mm_ops->page_count(childp) != 1)
484 return 0;
485
486 kvm_clear_pte(ctx->ptep);
487 dsb(ishst);
488 __tlbi_level(vae2is, ctx->addr, TLBI_TTL_UNKNOWN);
489 } else {
490 if (ctx->end - ctx->addr < granule)
491 return -EINVAL;
492
493 kvm_clear_pte(ctx->ptep);
494 dsb(ishst);
495 __tlbi_level(vale2is, ctx->addr, ctx->level);
496 *unmapped += granule;
497 }
498
499 __tlbi_sync_s1ish_hyp();
500 isb();
501 mm_ops->put_page(ctx->ptep);
502
503 if (childp)
504 mm_ops->put_page(childp);
505
506 return 0;
507 }
508
kvm_pgtable_hyp_unmap(struct kvm_pgtable * pgt,u64 addr,u64 size)509 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
510 {
511 u64 unmapped = 0;
512 struct kvm_pgtable_walker walker = {
513 .cb = hyp_unmap_walker,
514 .arg = &unmapped,
515 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
516 };
517
518 if (!pgt->mm_ops->page_count)
519 return 0;
520
521 kvm_pgtable_walk(pgt, addr, size, &walker);
522 return unmapped;
523 }
524
kvm_pgtable_hyp_init(struct kvm_pgtable * pgt,u32 va_bits,struct kvm_pgtable_mm_ops * mm_ops)525 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
526 struct kvm_pgtable_mm_ops *mm_ops)
527 {
528 s8 start_level = KVM_PGTABLE_LAST_LEVEL + 1 -
529 ARM64_HW_PGTABLE_LEVELS(va_bits);
530
531 if (start_level < KVM_PGTABLE_FIRST_LEVEL ||
532 start_level > KVM_PGTABLE_LAST_LEVEL)
533 return -EINVAL;
534
535 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL);
536 if (!pgt->pgd)
537 return -ENOMEM;
538
539 pgt->ia_bits = va_bits;
540 pgt->start_level = start_level;
541 pgt->mm_ops = mm_ops;
542 pgt->mmu = NULL;
543 pgt->force_pte_cb = NULL;
544
545 return 0;
546 }
547
hyp_free_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)548 static int hyp_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
549 enum kvm_pgtable_walk_flags visit)
550 {
551 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
552
553 if (!kvm_pte_valid(ctx->old))
554 return 0;
555
556 mm_ops->put_page(ctx->ptep);
557
558 if (kvm_pte_table(ctx->old, ctx->level))
559 mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
560
561 return 0;
562 }
563
kvm_pgtable_hyp_destroy(struct kvm_pgtable * pgt)564 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
565 {
566 struct kvm_pgtable_walker walker = {
567 .cb = hyp_free_walker,
568 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
569 };
570
571 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
572 pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd));
573 pgt->pgd = NULL;
574 }
575
576 struct stage2_map_data {
577 const u64 phys;
578 kvm_pte_t attr;
579 kvm_pte_t pte_annot;
580
581 kvm_pte_t *anchor;
582 kvm_pte_t *childp;
583
584 struct kvm_s2_mmu *mmu;
585 void *memcache;
586
587 /* Force mappings to page granularity */
588 bool force_pte;
589
590 /* Walk should update owner_id only */
591 bool annotation;
592 };
593
kvm_get_vtcr(u64 mmfr0,u64 mmfr1,u32 phys_shift)594 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
595 {
596 u64 vtcr = VTCR_EL2_FLAGS;
597 s8 lvls;
598
599 vtcr |= FIELD_PREP(VTCR_EL2_PS, kvm_get_parange(mmfr0));
600 vtcr |= FIELD_PREP(VTCR_EL2_T0SZ, (UL(64) - phys_shift));
601 /*
602 * Use a minimum 2 level page table to prevent splitting
603 * host PMD huge pages at stage2.
604 */
605 lvls = stage2_pgtable_levels(phys_shift);
606 if (lvls < 2)
607 lvls = 2;
608
609 /*
610 * When LPA2 is enabled, the HW supports an extra level of translation
611 * (for 5 in total) when using 4K pages. It also introduces VTCR_EL2.SL2
612 * to as an addition to SL0 to enable encoding this extra start level.
613 * However, since we always use concatenated pages for the first level
614 * lookup, we will never need this extra level and therefore do not need
615 * to touch SL2.
616 */
617 vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
618
619 #ifdef CONFIG_ARM64_HW_AFDBM
620 /*
621 * Enable the Hardware Access Flag management, unconditionally
622 * on all CPUs. In systems that have asymmetric support for the feature
623 * this allows KVM to leverage hardware support on the subset of cores
624 * that implement the feature.
625 *
626 * The architecture requires VTCR_EL2.HA to be RES0 (thus ignored by
627 * hardware) on implementations that do not advertise support for the
628 * feature. As such, setting HA unconditionally is safe, unless you
629 * happen to be running on a design that has unadvertised support for
630 * HAFDBS. Here be dragons.
631 */
632 if (!cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
633 vtcr |= VTCR_EL2_HA;
634 #endif /* CONFIG_ARM64_HW_AFDBM */
635
636 if (kvm_lpa2_is_enabled())
637 vtcr |= VTCR_EL2_DS;
638
639 /* Set the vmid bits */
640 vtcr |= (get_vmid_bits(mmfr1) == 16) ? VTCR_EL2_VS : 0;
641
642 return vtcr;
643 }
644
kvm_tlb_flush_vmid_range(struct kvm_s2_mmu * mmu,phys_addr_t addr,size_t size)645 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
646 phys_addr_t addr, size_t size)
647 {
648 unsigned long pages, inval_pages;
649
650 if (!system_supports_tlb_range()) {
651 kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
652 return;
653 }
654
655 pages = size >> PAGE_SHIFT;
656 while (pages > 0) {
657 inval_pages = min(pages, MAX_TLBI_RANGE_PAGES);
658 kvm_call_hyp(__kvm_tlb_flush_vmid_range, mmu, addr, inval_pages);
659
660 addr += inval_pages << PAGE_SHIFT;
661 pages -= inval_pages;
662 }
663 }
664
665 #define KVM_S2_MEMATTR(pgt, attr) \
666 ({ \
667 kvm_pte_t __attr; \
668 \
669 if ((pgt)->flags & KVM_PGTABLE_S2_AS_S1) \
670 __attr = PAGE_S2_MEMATTR(AS_S1); \
671 else \
672 __attr = PAGE_S2_MEMATTR(attr); \
673 \
674 __attr; \
675 })
676
stage2_set_xn_attr(enum kvm_pgtable_prot prot,kvm_pte_t * attr)677 static int stage2_set_xn_attr(enum kvm_pgtable_prot prot, kvm_pte_t *attr)
678 {
679 bool px, ux;
680 u8 xn;
681
682 px = prot & KVM_PGTABLE_PROT_PX;
683 ux = prot & KVM_PGTABLE_PROT_UX;
684
685 if (!cpus_have_final_cap(ARM64_HAS_XNX) && px != ux)
686 return -EINVAL;
687
688 if (px && ux)
689 xn = 0b00;
690 else if (!px && ux)
691 xn = 0b01;
692 else if (!px && !ux)
693 xn = 0b10;
694 else
695 xn = 0b11;
696
697 *attr &= ~KVM_PTE_LEAF_ATTR_HI_S2_XN;
698 *attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, xn);
699 return 0;
700 }
701
stage2_set_prot_attr(struct kvm_pgtable * pgt,enum kvm_pgtable_prot prot,kvm_pte_t * ptep)702 static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
703 kvm_pte_t *ptep)
704 {
705 kvm_pte_t attr;
706 u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
707 int r;
708
709 switch (prot & (KVM_PGTABLE_PROT_DEVICE |
710 KVM_PGTABLE_PROT_NORMAL_NC)) {
711 case KVM_PGTABLE_PROT_DEVICE | KVM_PGTABLE_PROT_NORMAL_NC:
712 return -EINVAL;
713 case KVM_PGTABLE_PROT_DEVICE:
714 if (prot & KVM_PGTABLE_PROT_X)
715 return -EINVAL;
716 attr = KVM_S2_MEMATTR(pgt, DEVICE_nGnRE);
717 break;
718 case KVM_PGTABLE_PROT_NORMAL_NC:
719 if (prot & KVM_PGTABLE_PROT_X)
720 return -EINVAL;
721 attr = KVM_S2_MEMATTR(pgt, NORMAL_NC);
722 break;
723 default:
724 attr = KVM_S2_MEMATTR(pgt, NORMAL);
725 }
726
727 r = stage2_set_xn_attr(prot, &attr);
728 if (r)
729 return r;
730
731 if (prot & KVM_PGTABLE_PROT_R)
732 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
733
734 if (prot & KVM_PGTABLE_PROT_W)
735 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
736
737 if (!kvm_lpa2_is_enabled())
738 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
739
740 attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
741 attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
742 *ptep = attr;
743
744 return 0;
745 }
746
kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)747 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
748 {
749 enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
750
751 if (!kvm_pte_valid(pte))
752 return prot;
753
754 if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R)
755 prot |= KVM_PGTABLE_PROT_R;
756 if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W)
757 prot |= KVM_PGTABLE_PROT_W;
758
759 switch (FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, pte)) {
760 case 0b00:
761 prot |= KVM_PGTABLE_PROT_PX | KVM_PGTABLE_PROT_UX;
762 break;
763 case 0b01:
764 prot |= KVM_PGTABLE_PROT_UX;
765 break;
766 case 0b11:
767 prot |= KVM_PGTABLE_PROT_PX;
768 break;
769 default:
770 break;
771 }
772
773 return prot;
774 }
775
stage2_pte_needs_update(kvm_pte_t old,kvm_pte_t new)776 static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
777 {
778 if (!kvm_pte_valid(old) || !kvm_pte_valid(new))
779 return true;
780
781 return ((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS));
782 }
783
stage2_pte_is_counted(kvm_pte_t pte)784 static bool stage2_pte_is_counted(kvm_pte_t pte)
785 {
786 /*
787 * The refcount tracks valid entries as well as invalid entries if they
788 * encode ownership of a page to another entity than the page-table
789 * owner, whose id is 0.
790 */
791 return !!pte;
792 }
793
stage2_pte_is_locked(kvm_pte_t pte)794 static bool stage2_pte_is_locked(kvm_pte_t pte)
795 {
796 if (kvm_pte_valid(pte))
797 return false;
798
799 return FIELD_GET(KVM_INVALID_PTE_TYPE_MASK, pte) ==
800 KVM_INVALID_PTE_TYPE_LOCKED;
801 }
802
stage2_try_set_pte(const struct kvm_pgtable_visit_ctx * ctx,kvm_pte_t new)803 static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
804 {
805 if (!kvm_pgtable_walk_shared(ctx)) {
806 WRITE_ONCE(*ctx->ptep, new);
807 return true;
808 }
809
810 return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
811 }
812
813 /**
814 * stage2_try_break_pte() - Invalidates a pte according to the
815 * 'break-before-make' requirements of the
816 * architecture.
817 *
818 * @ctx: context of the visited pte.
819 * @mmu: stage-2 mmu
820 *
821 * Returns: true if the pte was successfully broken.
822 *
823 * If the removed pte was valid, performs the necessary serialization and TLB
824 * invalidation for the old value. For counted ptes, drops the reference count
825 * on the containing table page.
826 */
stage2_try_break_pte(const struct kvm_pgtable_visit_ctx * ctx,struct kvm_s2_mmu * mmu)827 static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
828 struct kvm_s2_mmu *mmu)
829 {
830 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
831 kvm_pte_t locked_pte;
832
833 if (stage2_pte_is_locked(ctx->old)) {
834 /*
835 * Should never occur if this walker has exclusive access to the
836 * page tables.
837 */
838 WARN_ON(!kvm_pgtable_walk_shared(ctx));
839 return false;
840 }
841
842 locked_pte = FIELD_PREP(KVM_INVALID_PTE_TYPE_MASK,
843 KVM_INVALID_PTE_TYPE_LOCKED);
844 if (!stage2_try_set_pte(ctx, locked_pte))
845 return false;
846
847 if (!kvm_pgtable_walk_skip_bbm_tlbi(ctx)) {
848 /*
849 * Perform the appropriate TLB invalidation based on the
850 * evicted pte value (if any).
851 */
852 if (kvm_pte_table(ctx->old, ctx->level)) {
853 u64 size = kvm_granule_size(ctx->level);
854 u64 addr = ALIGN_DOWN(ctx->addr, size);
855
856 kvm_tlb_flush_vmid_range(mmu, addr, size);
857 } else if (kvm_pte_valid(ctx->old)) {
858 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
859 ctx->addr, ctx->level);
860 }
861 }
862
863 if (stage2_pte_is_counted(ctx->old))
864 mm_ops->put_page(ctx->ptep);
865
866 return true;
867 }
868
stage2_make_pte(const struct kvm_pgtable_visit_ctx * ctx,kvm_pte_t new)869 static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
870 {
871 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
872
873 WARN_ON(!stage2_pte_is_locked(*ctx->ptep));
874
875 if (stage2_pte_is_counted(new))
876 mm_ops->get_page(ctx->ptep);
877
878 smp_store_release(ctx->ptep, new);
879 }
880
stage2_unmap_defer_tlb_flush(struct kvm_pgtable * pgt)881 static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt)
882 {
883 /*
884 * If FEAT_TLBIRANGE is implemented, defer the individual
885 * TLB invalidations until the entire walk is finished, and
886 * then use the range-based TLBI instructions to do the
887 * invalidations. Condition deferred TLB invalidation on the
888 * system supporting FWB as the optimization is entirely
889 * pointless when the unmap walker needs to perform CMOs.
890 */
891 return system_supports_tlb_range() && cpus_have_final_cap(ARM64_HAS_STAGE2_FWB);
892 }
893
stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx * ctx,struct kvm_s2_mmu * mmu,struct kvm_pgtable_mm_ops * mm_ops)894 static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
895 struct kvm_s2_mmu *mmu,
896 struct kvm_pgtable_mm_ops *mm_ops)
897 {
898 struct kvm_pgtable *pgt = ctx->arg;
899
900 /*
901 * Clear the existing PTE, and perform break-before-make if it was
902 * valid. Depending on the system support, defer the TLB maintenance
903 * for the same until the entire unmap walk is completed.
904 */
905 if (kvm_pte_valid(ctx->old)) {
906 kvm_clear_pte(ctx->ptep);
907
908 if (kvm_pte_table(ctx->old, ctx->level)) {
909 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
910 TLBI_TTL_UNKNOWN);
911 } else if (!stage2_unmap_defer_tlb_flush(pgt)) {
912 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
913 ctx->level);
914 }
915 }
916
917 mm_ops->put_page(ctx->ptep);
918 }
919
stage2_pte_cacheable(struct kvm_pgtable * pgt,kvm_pte_t pte)920 static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
921 {
922 u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
923 return kvm_pte_valid(pte) && memattr == KVM_S2_MEMATTR(pgt, NORMAL);
924 }
925
stage2_pte_executable(kvm_pte_t pte)926 static bool stage2_pte_executable(kvm_pte_t pte)
927 {
928 return kvm_pte_valid(pte) && !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
929 }
930
stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx * ctx,const struct stage2_map_data * data)931 static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
932 const struct stage2_map_data *data)
933 {
934 u64 phys = data->phys;
935
936 /* Work out the correct PA based on how far the walk has gotten */
937 return phys + (ctx->addr - ctx->start);
938 }
939
stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx * ctx,struct stage2_map_data * data)940 static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
941 struct stage2_map_data *data)
942 {
943 u64 phys = stage2_map_walker_phys_addr(ctx, data);
944
945 if (data->force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL)
946 return false;
947
948 if (data->annotation)
949 return true;
950
951 return kvm_block_mapping_supported(ctx, phys);
952 }
953
stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx * ctx,struct stage2_map_data * data)954 static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
955 struct stage2_map_data *data)
956 {
957 kvm_pte_t new;
958 u64 phys = stage2_map_walker_phys_addr(ctx, data);
959 u64 granule = kvm_granule_size(ctx->level);
960 struct kvm_pgtable *pgt = data->mmu->pgt;
961 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
962
963 if (!stage2_leaf_mapping_allowed(ctx, data))
964 return -E2BIG;
965
966 if (!data->annotation)
967 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
968 else
969 new = data->pte_annot;
970
971 /*
972 * Skip updating the PTE if we are trying to recreate the exact
973 * same mapping or only change the access permissions. Instead,
974 * the vCPU will exit one more time from guest if still needed
975 * and then go through the path of relaxing permissions.
976 */
977 if (!stage2_pte_needs_update(ctx->old, new))
978 return -EAGAIN;
979
980 /* If we're only changing software bits, then store them and go! */
981 if (!kvm_pgtable_walk_shared(ctx) &&
982 !((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW)) {
983 bool old_is_counted = stage2_pte_is_counted(ctx->old);
984
985 if (old_is_counted != stage2_pte_is_counted(new)) {
986 if (old_is_counted)
987 mm_ops->put_page(ctx->ptep);
988 else
989 mm_ops->get_page(ctx->ptep);
990 }
991 WARN_ON_ONCE(!stage2_try_set_pte(ctx, new));
992 return 0;
993 }
994
995 if (!stage2_try_break_pte(ctx, data->mmu))
996 return -EAGAIN;
997
998 /* Perform CMOs before installation of the guest stage-2 PTE */
999 if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->dcache_clean_inval_poc &&
1000 stage2_pte_cacheable(pgt, new))
1001 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
1002 granule);
1003
1004 if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->icache_inval_pou &&
1005 stage2_pte_executable(new))
1006 mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
1007
1008 stage2_make_pte(ctx, new);
1009
1010 return 0;
1011 }
1012
stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx * ctx,struct stage2_map_data * data)1013 static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx,
1014 struct stage2_map_data *data)
1015 {
1016 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1017 kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops);
1018 int ret;
1019
1020 if (!stage2_leaf_mapping_allowed(ctx, data))
1021 return 0;
1022
1023 ret = stage2_map_walker_try_leaf(ctx, data);
1024 if (ret)
1025 return ret;
1026
1027 mm_ops->free_unlinked_table(childp, ctx->level);
1028 return 0;
1029 }
1030
stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx * ctx,struct stage2_map_data * data)1031 static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
1032 struct stage2_map_data *data)
1033 {
1034 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1035 kvm_pte_t *childp, new;
1036 int ret;
1037
1038 ret = stage2_map_walker_try_leaf(ctx, data);
1039 if (ret != -E2BIG)
1040 return ret;
1041
1042 if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
1043 return -EINVAL;
1044
1045 if (!data->memcache)
1046 return -ENOMEM;
1047
1048 childp = mm_ops->zalloc_page(data->memcache);
1049 if (!childp)
1050 return -ENOMEM;
1051
1052 if (!stage2_try_break_pte(ctx, data->mmu)) {
1053 mm_ops->put_page(childp);
1054 return -EAGAIN;
1055 }
1056
1057 /*
1058 * If we've run into an existing block mapping then replace it with
1059 * a table. Accesses beyond 'end' that fall within the new table
1060 * will be mapped lazily.
1061 */
1062 new = kvm_init_table_pte(childp, mm_ops);
1063 stage2_make_pte(ctx, new);
1064
1065 return 0;
1066 }
1067
1068 /*
1069 * The TABLE_PRE callback runs for table entries on the way down, looking
1070 * for table entries which we could conceivably replace with a block entry
1071 * for this mapping. If it finds one it replaces the entry and calls
1072 * kvm_pgtable_mm_ops::free_unlinked_table() to tear down the detached table.
1073 *
1074 * Otherwise, the LEAF callback performs the mapping at the existing leaves
1075 * instead.
1076 */
stage2_map_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)1077 static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
1078 enum kvm_pgtable_walk_flags visit)
1079 {
1080 struct stage2_map_data *data = ctx->arg;
1081
1082 switch (visit) {
1083 case KVM_PGTABLE_WALK_TABLE_PRE:
1084 return stage2_map_walk_table_pre(ctx, data);
1085 case KVM_PGTABLE_WALK_LEAF:
1086 return stage2_map_walk_leaf(ctx, data);
1087 default:
1088 return -EINVAL;
1089 }
1090 }
1091
kvm_pgtable_stage2_map(struct kvm_pgtable * pgt,u64 addr,u64 size,u64 phys,enum kvm_pgtable_prot prot,void * mc,enum kvm_pgtable_walk_flags flags)1092 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
1093 u64 phys, enum kvm_pgtable_prot prot,
1094 void *mc, enum kvm_pgtable_walk_flags flags)
1095 {
1096 int ret;
1097 struct stage2_map_data map_data = {
1098 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
1099 .mmu = pgt->mmu,
1100 .memcache = mc,
1101 .force_pte = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot),
1102 };
1103 struct kvm_pgtable_walker walker = {
1104 .cb = stage2_map_walker,
1105 .flags = flags |
1106 KVM_PGTABLE_WALK_TABLE_PRE |
1107 KVM_PGTABLE_WALK_LEAF,
1108 .arg = &map_data,
1109 };
1110
1111 if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys)))
1112 return -EINVAL;
1113
1114 ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
1115 if (ret)
1116 return ret;
1117
1118 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1119 dsb(ishst);
1120 return ret;
1121 }
1122
kvm_pgtable_stage2_annotate(struct kvm_pgtable * pgt,u64 addr,u64 size,void * mc,enum kvm_invalid_pte_type type,kvm_pte_t pte_annot)1123 int kvm_pgtable_stage2_annotate(struct kvm_pgtable *pgt, u64 addr, u64 size,
1124 void *mc, enum kvm_invalid_pte_type type,
1125 kvm_pte_t pte_annot)
1126 {
1127 int ret;
1128 struct stage2_map_data map_data = {
1129 .mmu = pgt->mmu,
1130 .memcache = mc,
1131 .force_pte = true,
1132 .annotation = true,
1133 .pte_annot = pte_annot |
1134 FIELD_PREP(KVM_INVALID_PTE_TYPE_MASK, type),
1135 };
1136 struct kvm_pgtable_walker walker = {
1137 .cb = stage2_map_walker,
1138 .flags = KVM_PGTABLE_WALK_TABLE_PRE |
1139 KVM_PGTABLE_WALK_LEAF,
1140 .arg = &map_data,
1141 };
1142
1143 if (pte_annot & ~KVM_INVALID_PTE_ANNOT_MASK)
1144 return -EINVAL;
1145
1146 if (!type || type == KVM_INVALID_PTE_TYPE_LOCKED)
1147 return -EINVAL;
1148
1149 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1150 return ret;
1151 }
1152
stage2_unmap_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)1153 static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
1154 enum kvm_pgtable_walk_flags visit)
1155 {
1156 struct kvm_pgtable *pgt = ctx->arg;
1157 struct kvm_s2_mmu *mmu = pgt->mmu;
1158 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1159 kvm_pte_t *childp = NULL;
1160 bool need_flush = false;
1161
1162 if (!kvm_pte_valid(ctx->old)) {
1163 if (stage2_pte_is_counted(ctx->old)) {
1164 kvm_clear_pte(ctx->ptep);
1165 mm_ops->put_page(ctx->ptep);
1166 }
1167 return 0;
1168 }
1169
1170 if (kvm_pte_table(ctx->old, ctx->level)) {
1171 childp = kvm_pte_follow(ctx->old, mm_ops);
1172
1173 if (mm_ops->page_count(childp) != 1)
1174 return 0;
1175 } else if (stage2_pte_cacheable(pgt, ctx->old)) {
1176 need_flush = !cpus_have_final_cap(ARM64_HAS_STAGE2_FWB);
1177 }
1178
1179 /*
1180 * This is similar to the map() path in that we unmap the entire
1181 * block entry and rely on the remaining portions being faulted
1182 * back lazily.
1183 */
1184 stage2_unmap_put_pte(ctx, mmu, mm_ops);
1185
1186 if (need_flush && mm_ops->dcache_clean_inval_poc)
1187 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1188 kvm_granule_size(ctx->level));
1189
1190 if (childp)
1191 mm_ops->put_page(childp);
1192
1193 return 0;
1194 }
1195
kvm_pgtable_stage2_unmap(struct kvm_pgtable * pgt,u64 addr,u64 size)1196 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
1197 {
1198 int ret;
1199 struct kvm_pgtable_walker walker = {
1200 .cb = stage2_unmap_walker,
1201 .arg = pgt,
1202 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
1203 };
1204
1205 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1206 if (stage2_unmap_defer_tlb_flush(pgt))
1207 /* Perform the deferred TLB invalidations */
1208 kvm_tlb_flush_vmid_range(pgt->mmu, addr, size);
1209
1210 return ret;
1211 }
1212
1213 struct stage2_attr_data {
1214 kvm_pte_t attr_set;
1215 kvm_pte_t attr_clr;
1216 kvm_pte_t pte;
1217 s8 level;
1218 };
1219
stage2_attr_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)1220 static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx,
1221 enum kvm_pgtable_walk_flags visit)
1222 {
1223 kvm_pte_t pte = ctx->old;
1224 struct stage2_attr_data *data = ctx->arg;
1225 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1226
1227 if (!kvm_pte_valid(ctx->old))
1228 return -EAGAIN;
1229
1230 data->level = ctx->level;
1231 data->pte = pte;
1232 pte &= ~data->attr_clr;
1233 pte |= data->attr_set;
1234
1235 /*
1236 * We may race with the CPU trying to set the access flag here,
1237 * but worst-case the access flag update gets lost and will be
1238 * set on the next access instead.
1239 */
1240 if (data->pte != pte) {
1241 /*
1242 * Invalidate instruction cache before updating the guest
1243 * stage-2 PTE if we are going to add executable permission.
1244 */
1245 if (mm_ops->icache_inval_pou &&
1246 stage2_pte_executable(pte) && !stage2_pte_executable(ctx->old))
1247 mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops),
1248 kvm_granule_size(ctx->level));
1249
1250 if (!stage2_try_set_pte(ctx, pte))
1251 return -EAGAIN;
1252 }
1253
1254 return 0;
1255 }
1256
stage2_update_leaf_attrs(struct kvm_pgtable * pgt,u64 addr,u64 size,kvm_pte_t attr_set,kvm_pte_t attr_clr,kvm_pte_t * orig_pte,s8 * level,enum kvm_pgtable_walk_flags flags)1257 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
1258 u64 size, kvm_pte_t attr_set,
1259 kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
1260 s8 *level, enum kvm_pgtable_walk_flags flags)
1261 {
1262 int ret;
1263 kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
1264 struct stage2_attr_data data = {
1265 .attr_set = attr_set & attr_mask,
1266 .attr_clr = attr_clr & attr_mask,
1267 };
1268 struct kvm_pgtable_walker walker = {
1269 .cb = stage2_attr_walker,
1270 .arg = &data,
1271 .flags = flags | KVM_PGTABLE_WALK_LEAF,
1272 };
1273
1274 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1275 if (ret)
1276 return ret;
1277
1278 if (orig_pte)
1279 *orig_pte = data.pte;
1280
1281 if (level)
1282 *level = data.level;
1283 return 0;
1284 }
1285
kvm_pgtable_stage2_wrprotect(struct kvm_pgtable * pgt,u64 addr,u64 size)1286 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
1287 {
1288 return stage2_update_leaf_attrs(pgt, addr, size, 0,
1289 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
1290 NULL, NULL,
1291 KVM_PGTABLE_WALK_IGNORE_EAGAIN);
1292 }
1293
kvm_pgtable_stage2_mkyoung(struct kvm_pgtable * pgt,u64 addr,enum kvm_pgtable_walk_flags flags)1294 void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
1295 enum kvm_pgtable_walk_flags flags)
1296 {
1297 int ret;
1298
1299 ret = stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
1300 NULL, NULL, flags);
1301 if (!ret)
1302 dsb(ishst);
1303 }
1304
1305 struct stage2_age_data {
1306 bool mkold;
1307 bool young;
1308 };
1309
stage2_age_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)1310 static int stage2_age_walker(const struct kvm_pgtable_visit_ctx *ctx,
1311 enum kvm_pgtable_walk_flags visit)
1312 {
1313 kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF;
1314 struct stage2_age_data *data = ctx->arg;
1315
1316 if (!kvm_pte_valid(ctx->old) || new == ctx->old)
1317 return 0;
1318
1319 data->young = true;
1320
1321 /*
1322 * stage2_age_walker() is always called while holding the MMU lock for
1323 * write, so this will always succeed. Nonetheless, this deliberately
1324 * follows the race detection pattern of the other stage-2 walkers in
1325 * case the locking mechanics of the MMU notifiers is ever changed.
1326 */
1327 if (data->mkold && !stage2_try_set_pte(ctx, new))
1328 return -EAGAIN;
1329
1330 /*
1331 * "But where's the TLBI?!", you scream.
1332 * "Over in the core code", I sigh.
1333 *
1334 * See the '->clear_flush_young()' callback on the KVM mmu notifier.
1335 */
1336 return 0;
1337 }
1338
kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable * pgt,u64 addr,u64 size,bool mkold)1339 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
1340 u64 size, bool mkold)
1341 {
1342 struct stage2_age_data data = {
1343 .mkold = mkold,
1344 };
1345 struct kvm_pgtable_walker walker = {
1346 .cb = stage2_age_walker,
1347 .arg = &data,
1348 .flags = KVM_PGTABLE_WALK_LEAF,
1349 };
1350
1351 WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
1352 return data.young;
1353 }
1354
kvm_pgtable_stage2_relax_perms(struct kvm_pgtable * pgt,u64 addr,enum kvm_pgtable_prot prot,enum kvm_pgtable_walk_flags flags)1355 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
1356 enum kvm_pgtable_prot prot, enum kvm_pgtable_walk_flags flags)
1357 {
1358 kvm_pte_t xn = 0, set = 0, clr = 0;
1359 s8 level;
1360 int ret;
1361
1362 if (prot & KVM_PTE_LEAF_ATTR_HI_SW)
1363 return -EINVAL;
1364
1365 if (prot & KVM_PGTABLE_PROT_R)
1366 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
1367
1368 if (prot & KVM_PGTABLE_PROT_W)
1369 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
1370
1371 ret = stage2_set_xn_attr(prot, &xn);
1372 if (ret)
1373 return ret;
1374
1375 set |= xn & KVM_PTE_LEAF_ATTR_HI_S2_XN;
1376 clr |= ~xn & KVM_PTE_LEAF_ATTR_HI_S2_XN;
1377
1378 ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level, flags);
1379 if (!ret || ret == -EAGAIN)
1380 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa_nsh, pgt->mmu, addr, level);
1381 return ret;
1382 }
1383
stage2_flush_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)1384 static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx,
1385 enum kvm_pgtable_walk_flags visit)
1386 {
1387 struct kvm_pgtable *pgt = ctx->arg;
1388 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1389
1390 if (!stage2_pte_cacheable(pgt, ctx->old))
1391 return 0;
1392
1393 if (mm_ops->dcache_clean_inval_poc)
1394 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1395 kvm_granule_size(ctx->level));
1396 return 0;
1397 }
1398
kvm_pgtable_stage2_flush(struct kvm_pgtable * pgt,u64 addr,u64 size)1399 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
1400 {
1401 struct kvm_pgtable_walker walker = {
1402 .cb = stage2_flush_walker,
1403 .flags = KVM_PGTABLE_WALK_LEAF,
1404 .arg = pgt,
1405 };
1406
1407 if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
1408 return 0;
1409
1410 return kvm_pgtable_walk(pgt, addr, size, &walker);
1411 }
1412
kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable * pgt,u64 phys,s8 level,enum kvm_pgtable_prot prot,void * mc,bool force_pte)1413 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
1414 u64 phys, s8 level,
1415 enum kvm_pgtable_prot prot,
1416 void *mc, bool force_pte)
1417 {
1418 struct stage2_map_data map_data = {
1419 .phys = phys,
1420 .mmu = pgt->mmu,
1421 .memcache = mc,
1422 .force_pte = force_pte,
1423 };
1424 struct kvm_pgtable_walker walker = {
1425 .cb = stage2_map_walker,
1426 .flags = KVM_PGTABLE_WALK_LEAF |
1427 KVM_PGTABLE_WALK_SKIP_BBM_TLBI |
1428 KVM_PGTABLE_WALK_SKIP_CMO,
1429 .arg = &map_data,
1430 };
1431 /*
1432 * The input address (.addr) is irrelevant for walking an
1433 * unlinked table. Construct an ambiguous IA range to map
1434 * kvm_granule_size(level) worth of memory.
1435 */
1436 struct kvm_pgtable_walk_data data = {
1437 .walker = &walker,
1438 .addr = 0,
1439 .end = kvm_granule_size(level),
1440 };
1441 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1442 kvm_pte_t *pgtable;
1443 int ret;
1444
1445 if (!IS_ALIGNED(phys, kvm_granule_size(level)))
1446 return ERR_PTR(-EINVAL);
1447
1448 ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
1449 if (ret)
1450 return ERR_PTR(ret);
1451
1452 pgtable = mm_ops->zalloc_page(mc);
1453 if (!pgtable)
1454 return ERR_PTR(-ENOMEM);
1455
1456 ret = __kvm_pgtable_walk(&data, mm_ops, (kvm_pteref_t)pgtable,
1457 level + 1);
1458 if (ret) {
1459 kvm_pgtable_stage2_free_unlinked(mm_ops, pgtable, level);
1460 return ERR_PTR(ret);
1461 }
1462
1463 return pgtable;
1464 }
1465
1466 /*
1467 * Get the number of page-tables needed to replace a block with a
1468 * fully populated tree up to the PTE entries. Note that @level is
1469 * interpreted as in "level @level entry".
1470 */
stage2_block_get_nr_page_tables(s8 level)1471 static int stage2_block_get_nr_page_tables(s8 level)
1472 {
1473 switch (level) {
1474 case 1:
1475 return PTRS_PER_PTE + 1;
1476 case 2:
1477 return 1;
1478 case 3:
1479 return 0;
1480 default:
1481 WARN_ON_ONCE(level < KVM_PGTABLE_MIN_BLOCK_LEVEL ||
1482 level > KVM_PGTABLE_LAST_LEVEL);
1483 return -EINVAL;
1484 };
1485 }
1486
stage2_split_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)1487 static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
1488 enum kvm_pgtable_walk_flags visit)
1489 {
1490 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1491 struct kvm_mmu_memory_cache *mc = ctx->arg;
1492 struct kvm_s2_mmu *mmu;
1493 kvm_pte_t pte = ctx->old, new, *childp;
1494 enum kvm_pgtable_prot prot;
1495 s8 level = ctx->level;
1496 bool force_pte;
1497 int nr_pages;
1498 u64 phys;
1499
1500 /* No huge-pages exist at the last level */
1501 if (level == KVM_PGTABLE_LAST_LEVEL)
1502 return 0;
1503
1504 /* We only split valid block mappings */
1505 if (!kvm_pte_valid(pte))
1506 return 0;
1507
1508 nr_pages = stage2_block_get_nr_page_tables(level);
1509 if (nr_pages < 0)
1510 return nr_pages;
1511
1512 if (mc->nobjs >= nr_pages) {
1513 /* Build a tree mapped down to the PTE granularity. */
1514 force_pte = true;
1515 } else {
1516 /*
1517 * Don't force PTEs, so create_unlinked() below does
1518 * not populate the tree up to the PTE level. The
1519 * consequence is that the call will require a single
1520 * page of level 2 entries at level 1, or a single
1521 * page of PTEs at level 2. If we are at level 1, the
1522 * PTEs will be created recursively.
1523 */
1524 force_pte = false;
1525 nr_pages = 1;
1526 }
1527
1528 if (mc->nobjs < nr_pages)
1529 return -ENOMEM;
1530
1531 mmu = container_of(mc, struct kvm_s2_mmu, split_page_cache);
1532 phys = kvm_pte_to_phys(pte);
1533 prot = kvm_pgtable_stage2_pte_prot(pte);
1534
1535 childp = kvm_pgtable_stage2_create_unlinked(mmu->pgt, phys,
1536 level, prot, mc, force_pte);
1537 if (IS_ERR(childp))
1538 return PTR_ERR(childp);
1539
1540 if (!stage2_try_break_pte(ctx, mmu)) {
1541 kvm_pgtable_stage2_free_unlinked(mm_ops, childp, level);
1542 return -EAGAIN;
1543 }
1544
1545 /*
1546 * Note, the contents of the page table are guaranteed to be made
1547 * visible before the new PTE is assigned because stage2_make_pte()
1548 * writes the PTE using smp_store_release().
1549 */
1550 new = kvm_init_table_pte(childp, mm_ops);
1551 stage2_make_pte(ctx, new);
1552 return 0;
1553 }
1554
kvm_pgtable_stage2_split(struct kvm_pgtable * pgt,u64 addr,u64 size,struct kvm_mmu_memory_cache * mc)1555 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
1556 struct kvm_mmu_memory_cache *mc)
1557 {
1558 struct kvm_pgtable_walker walker = {
1559 .cb = stage2_split_walker,
1560 .flags = KVM_PGTABLE_WALK_LEAF,
1561 .arg = mc,
1562 };
1563 int ret;
1564
1565 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1566 dsb(ishst);
1567 return ret;
1568 }
1569
__kvm_pgtable_stage2_init(struct kvm_pgtable * pgt,struct kvm_s2_mmu * mmu,struct kvm_pgtable_mm_ops * mm_ops,enum kvm_pgtable_stage2_flags flags,kvm_pgtable_force_pte_cb_t force_pte_cb)1570 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
1571 struct kvm_pgtable_mm_ops *mm_ops,
1572 enum kvm_pgtable_stage2_flags flags,
1573 kvm_pgtable_force_pte_cb_t force_pte_cb)
1574 {
1575 size_t pgd_sz;
1576 u64 vtcr = mmu->vtcr;
1577 u32 ia_bits = VTCR_EL2_IPA(vtcr);
1578 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1579 s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1580
1581 pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1582 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz);
1583 if (!pgt->pgd)
1584 return -ENOMEM;
1585
1586 pgt->ia_bits = ia_bits;
1587 pgt->start_level = start_level;
1588 pgt->mm_ops = mm_ops;
1589 pgt->mmu = mmu;
1590 pgt->flags = flags;
1591 pgt->force_pte_cb = force_pte_cb;
1592
1593 /* Ensure zeroed PGD pages are visible to the hardware walker */
1594 dsb(ishst);
1595 return 0;
1596 }
1597
kvm_pgtable_stage2_pgd_size(u64 vtcr)1598 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
1599 {
1600 u32 ia_bits = VTCR_EL2_IPA(vtcr);
1601 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1602 s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1603
1604 return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1605 }
1606
stage2_free_leaf(const struct kvm_pgtable_visit_ctx * ctx)1607 static int stage2_free_leaf(const struct kvm_pgtable_visit_ctx *ctx)
1608 {
1609 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1610
1611 mm_ops->put_page(ctx->ptep);
1612 return 0;
1613 }
1614
stage2_free_table_post(const struct kvm_pgtable_visit_ctx * ctx)1615 static int stage2_free_table_post(const struct kvm_pgtable_visit_ctx *ctx)
1616 {
1617 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1618 kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops);
1619
1620 if (mm_ops->page_count(childp) != 1)
1621 return 0;
1622
1623 /*
1624 * Drop references and clear the now stale PTE to avoid rewalking the
1625 * freed page table.
1626 */
1627 mm_ops->put_page(ctx->ptep);
1628 mm_ops->put_page(childp);
1629 kvm_clear_pte(ctx->ptep);
1630 return 0;
1631 }
1632
stage2_free_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)1633 static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
1634 enum kvm_pgtable_walk_flags visit)
1635 {
1636 if (!stage2_pte_is_counted(ctx->old))
1637 return 0;
1638
1639 switch (visit) {
1640 case KVM_PGTABLE_WALK_LEAF:
1641 return stage2_free_leaf(ctx);
1642 case KVM_PGTABLE_WALK_TABLE_POST:
1643 return stage2_free_table_post(ctx);
1644 default:
1645 return -EINVAL;
1646 }
1647 }
1648
kvm_pgtable_stage2_destroy_range(struct kvm_pgtable * pgt,u64 addr,u64 size)1649 void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
1650 u64 addr, u64 size)
1651 {
1652 struct kvm_pgtable_walker walker = {
1653 .cb = stage2_free_walker,
1654 .flags = KVM_PGTABLE_WALK_LEAF |
1655 KVM_PGTABLE_WALK_TABLE_POST,
1656 };
1657
1658 WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
1659 }
1660
kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable * pgt)1661 void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt)
1662 {
1663 size_t pgd_sz;
1664
1665 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
1666
1667 /*
1668 * Since the pgtable is unlinked at this point, and not shared with
1669 * other walkers, safely deference pgd with kvm_dereference_pteref_raw()
1670 */
1671 pgt->mm_ops->free_pages_exact(kvm_dereference_pteref_raw(pgt->pgd), pgd_sz);
1672 pgt->pgd = NULL;
1673 }
1674
kvm_pgtable_stage2_destroy(struct kvm_pgtable * pgt)1675 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
1676 {
1677 kvm_pgtable_stage2_destroy_range(pgt, 0, BIT(pgt->ia_bits));
1678 kvm_pgtable_stage2_destroy_pgd(pgt);
1679 }
1680
kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops * mm_ops,void * pgtable,s8 level)1681 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
1682 {
1683 kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
1684 struct kvm_pgtable_walker walker = {
1685 .cb = stage2_free_walker,
1686 .flags = KVM_PGTABLE_WALK_LEAF |
1687 KVM_PGTABLE_WALK_TABLE_POST,
1688 };
1689 struct kvm_pgtable_walk_data data = {
1690 .walker = &walker,
1691
1692 /*
1693 * At this point the IPA really doesn't matter, as the page
1694 * table being traversed has already been removed from the stage
1695 * 2. Set an appropriate range to cover the entire page table.
1696 */
1697 .addr = 0,
1698 .end = kvm_granule_size(level),
1699 };
1700
1701 WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));
1702
1703 WARN_ON(mm_ops->page_count(pgtable) != 1);
1704 mm_ops->put_page(pgtable);
1705 }
1706