Lines Matching +full:data +full:- +full:addr

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Stand-alone page-table allocator for hyp stage-1 and guest stage-2.
18 u64 addr; member
24 return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_BBM_TLBI); in kvm_pgtable_walk_skip_bbm_tlbi()
29 return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO); in kvm_pgtable_walk_skip_cmo()
34 u64 granule = kvm_granule_size(ctx->level); in kvm_block_mapping_supported()
36 if (!kvm_level_supports_block_mapping(ctx->level)) in kvm_block_mapping_supported()
39 if (granule > (ctx->end - ctx->addr)) in kvm_block_mapping_supported()
45 return IS_ALIGNED(ctx->addr, granule); in kvm_block_mapping_supported()
48 static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, s8 level) in kvm_pgtable_idx() argument
51 u64 mask = BIT(PAGE_SHIFT - 3) - 1; in kvm_pgtable_idx()
53 return (data->addr >> shift) & mask; in kvm_pgtable_idx()
56 static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) in kvm_pgd_page_idx() argument
58 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */ in kvm_pgd_page_idx()
59 u64 mask = BIT(pgt->ia_bits) - 1; in kvm_pgd_page_idx()
61 return (addr & mask) >> shift; in kvm_pgd_page_idx()
71 return kvm_pgd_page_idx(&pgt, -1ULL) + 1; in kvm_pgd_pages()
87 return mm_ops->phys_to_virt(kvm_pte_to_phys(pte)); in kvm_pte_follow()
97 kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp)); in kvm_init_table_pte()
122 static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, in kvm_pgtable_visitor_cb() argument
126 struct kvm_pgtable_walker *walker = data->walker; in kvm_pgtable_visitor_cb()
128 /* Ensure the appropriate lock is held (e.g. RCU lock for stage-2 MMU) */ in kvm_pgtable_visitor_cb()
130 return walker->cb(ctx, visit); in kvm_pgtable_visitor_cb()
146 if (r == -EAGAIN) in kvm_pgtable_walk_continue()
147 return !(walker->flags & KVM_PGTABLE_WALK_HANDLE_FAULT); in kvm_pgtable_walk_continue()
152 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
155 static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data, in __kvm_pgtable_visit() argument
159 enum kvm_pgtable_walk_flags flags = data->walker->flags; in __kvm_pgtable_visit()
160 kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref); in __kvm_pgtable_visit()
164 .arg = data->walker->arg, in __kvm_pgtable_visit()
166 .start = data->start, in __kvm_pgtable_visit()
167 .addr = data->addr, in __kvm_pgtable_visit()
168 .end = data->end, in __kvm_pgtable_visit()
178 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE); in __kvm_pgtable_visit()
183 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF); in __kvm_pgtable_visit()
189 * entries or after pre-order traversal, to allow the walker to descend in __kvm_pgtable_visit()
197 if (!kvm_pgtable_walk_continue(data->walker, ret)) in __kvm_pgtable_visit()
201 data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level)); in __kvm_pgtable_visit()
202 data->addr += kvm_granule_size(level); in __kvm_pgtable_visit()
207 ret = __kvm_pgtable_walk(data, mm_ops, childp, level + 1); in __kvm_pgtable_visit()
208 if (!kvm_pgtable_walk_continue(data->walker, ret)) in __kvm_pgtable_visit()
212 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_POST); in __kvm_pgtable_visit()
215 if (kvm_pgtable_walk_continue(data->walker, ret)) in __kvm_pgtable_visit()
221 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data, in __kvm_pgtable_walk() argument
229 return -EINVAL; in __kvm_pgtable_walk()
231 for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) { in __kvm_pgtable_walk()
234 if (data->addr >= data->end) in __kvm_pgtable_walk()
237 ret = __kvm_pgtable_visit(data, mm_ops, pteref, level); in __kvm_pgtable_walk()
245 static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data) in _kvm_pgtable_walk() argument
249 u64 limit = BIT(pgt->ia_bits); in _kvm_pgtable_walk()
251 if (data->addr > limit || data->end > limit) in _kvm_pgtable_walk()
252 return -ERANGE; in _kvm_pgtable_walk()
254 if (!pgt->pgd) in _kvm_pgtable_walk()
255 return -EINVAL; in _kvm_pgtable_walk()
257 for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) { in _kvm_pgtable_walk()
258 kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE]; in _kvm_pgtable_walk()
260 ret = __kvm_pgtable_walk(data, pgt->mm_ops, pteref, pgt->start_level); in _kvm_pgtable_walk()
268 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, in kvm_pgtable_walk() argument
272 .start = ALIGN_DOWN(addr, PAGE_SIZE), in kvm_pgtable_walk()
273 .addr = ALIGN_DOWN(addr, PAGE_SIZE), in kvm_pgtable_walk()
274 .end = PAGE_ALIGN(walk_data.addr + size), in kvm_pgtable_walk()
297 struct leaf_walk_data *data = ctx->arg; in leaf_walker() local
299 data->pte = ctx->old; in leaf_walker()
300 data->level = ctx->level; in leaf_walker()
305 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, in kvm_pgtable_get_leaf() argument
308 struct leaf_walk_data data; in kvm_pgtable_get_leaf() local
312 .arg = &data, in kvm_pgtable_get_leaf()
316 ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE), in kvm_pgtable_get_leaf()
320 *ptep = data.pte; in kvm_pgtable_get_leaf()
322 *level = data.level; in kvm_pgtable_get_leaf()
343 return -EINVAL; in hyp_set_prot_attr()
347 return -EINVAL; in hyp_set_prot_attr()
350 return -EINVAL; in hyp_set_prot_attr()
389 struct hyp_map_data *data) in hyp_map_walker_try_leaf() argument
391 u64 phys = data->phys + (ctx->addr - ctx->start); in hyp_map_walker_try_leaf()
397 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level); in hyp_map_walker_try_leaf()
398 if (ctx->old == new) in hyp_map_walker_try_leaf()
400 if (!kvm_pte_valid(ctx->old)) in hyp_map_walker_try_leaf()
401 ctx->mm_ops->get_page(ctx->ptep); in hyp_map_walker_try_leaf()
402 else if (WARN_ON((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW)) in hyp_map_walker_try_leaf()
405 smp_store_release(ctx->ptep, new); in hyp_map_walker_try_leaf()
413 struct hyp_map_data *data = ctx->arg; in hyp_map_walker() local
414 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; in hyp_map_walker()
416 if (hyp_map_walker_try_leaf(ctx, data)) in hyp_map_walker()
419 if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL)) in hyp_map_walker()
420 return -EINVAL; in hyp_map_walker()
422 childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL); in hyp_map_walker()
424 return -ENOMEM; in hyp_map_walker()
427 mm_ops->get_page(ctx->ptep); in hyp_map_walker()
428 smp_store_release(ctx->ptep, new); in hyp_map_walker()
433 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, in kvm_pgtable_hyp_map() argument
450 ret = kvm_pgtable_walk(pgt, addr, size, &walker); in kvm_pgtable_hyp_map()
460 u64 granule = kvm_granule_size(ctx->level); in hyp_unmap_walker()
461 u64 *unmapped = ctx->arg; in hyp_unmap_walker()
462 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; in hyp_unmap_walker()
464 if (!kvm_pte_valid(ctx->old)) in hyp_unmap_walker()
465 return -EINVAL; in hyp_unmap_walker()
467 if (kvm_pte_table(ctx->old, ctx->level)) { in hyp_unmap_walker()
468 childp = kvm_pte_follow(ctx->old, mm_ops); in hyp_unmap_walker()
470 if (mm_ops->page_count(childp) != 1) in hyp_unmap_walker()
473 kvm_clear_pte(ctx->ptep); in hyp_unmap_walker()
475 __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), TLBI_TTL_UNKNOWN); in hyp_unmap_walker()
477 if (ctx->end - ctx->addr < granule) in hyp_unmap_walker()
478 return -EINVAL; in hyp_unmap_walker()
480 kvm_clear_pte(ctx->ptep); in hyp_unmap_walker()
482 __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level); in hyp_unmap_walker()
488 mm_ops->put_page(ctx->ptep); in hyp_unmap_walker()
491 mm_ops->put_page(childp); in hyp_unmap_walker()
496 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) in kvm_pgtable_hyp_unmap() argument
505 if (!pgt->mm_ops->page_count) in kvm_pgtable_hyp_unmap()
508 kvm_pgtable_walk(pgt, addr, size, &walker); in kvm_pgtable_hyp_unmap()
515 s8 start_level = KVM_PGTABLE_LAST_LEVEL + 1 - in kvm_pgtable_hyp_init()
520 return -EINVAL; in kvm_pgtable_hyp_init()
522 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL); in kvm_pgtable_hyp_init()
523 if (!pgt->pgd) in kvm_pgtable_hyp_init()
524 return -ENOMEM; in kvm_pgtable_hyp_init()
526 pgt->ia_bits = va_bits; in kvm_pgtable_hyp_init()
527 pgt->start_level = start_level; in kvm_pgtable_hyp_init()
528 pgt->mm_ops = mm_ops; in kvm_pgtable_hyp_init()
529 pgt->mmu = NULL; in kvm_pgtable_hyp_init()
530 pgt->force_pte_cb = NULL; in kvm_pgtable_hyp_init()
538 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; in hyp_free_walker()
540 if (!kvm_pte_valid(ctx->old)) in hyp_free_walker()
543 mm_ops->put_page(ctx->ptep); in hyp_free_walker()
545 if (kvm_pte_table(ctx->old, ctx->level)) in hyp_free_walker()
546 mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops)); in hyp_free_walker()
558 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); in kvm_pgtable_hyp_destroy()
559 pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd)); in kvm_pgtable_hyp_destroy()
560 pgt->pgd = NULL; in kvm_pgtable_hyp_destroy()
639 return !(pgt->flags & KVM_PGTABLE_S2_NOFWB); in stage2_has_fwb()
643 phys_addr_t addr, size_t size) in kvm_tlb_flush_vmid_range() argument
655 kvm_call_hyp(__kvm_tlb_flush_vmid_range, mmu, addr, inval_pages); in kvm_tlb_flush_vmid_range()
657 addr += inval_pages << PAGE_SHIFT; in kvm_tlb_flush_vmid_range()
658 pages -= inval_pages; in kvm_tlb_flush_vmid_range()
673 return -EINVAL; in stage2_set_prot_attr()
676 return -EINVAL; in stage2_set_prot_attr()
681 return -EINVAL; in stage2_set_prot_attr()
736 * encode ownership of a page to another entity than the page-table in stage2_pte_is_counted()
750 WRITE_ONCE(*ctx->ptep, new); in stage2_try_set_pte()
754 return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old; in stage2_try_set_pte()
758 * stage2_try_break_pte() - Invalidates a pte according to the
759 * 'break-before-make' requirements of the
763 * @mmu: stage-2 mmu
774 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; in stage2_try_break_pte()
776 if (stage2_pte_is_locked(ctx->old)) { in stage2_try_break_pte()
793 if (kvm_pte_table(ctx->old, ctx->level)) { in stage2_try_break_pte()
794 u64 size = kvm_granule_size(ctx->level); in stage2_try_break_pte()
795 u64 addr = ALIGN_DOWN(ctx->addr, size); in stage2_try_break_pte() local
797 kvm_tlb_flush_vmid_range(mmu, addr, size); in stage2_try_break_pte()
798 } else if (kvm_pte_valid(ctx->old)) { in stage2_try_break_pte()
800 ctx->addr, ctx->level); in stage2_try_break_pte()
804 if (stage2_pte_is_counted(ctx->old)) in stage2_try_break_pte()
805 mm_ops->put_page(ctx->ptep); in stage2_try_break_pte()
812 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; in stage2_make_pte()
814 WARN_ON(!stage2_pte_is_locked(*ctx->ptep)); in stage2_make_pte()
817 mm_ops->get_page(ctx->ptep); in stage2_make_pte()
819 smp_store_release(ctx->ptep, new); in stage2_make_pte()
827 * then use the range-based TLBI instructions to do the in stage2_unmap_defer_tlb_flush()
839 struct kvm_pgtable *pgt = ctx->arg; in stage2_unmap_put_pte()
842 * Clear the existing PTE, and perform break-before-make if it was in stage2_unmap_put_pte()
846 if (kvm_pte_valid(ctx->old)) { in stage2_unmap_put_pte()
847 kvm_clear_pte(ctx->ptep); in stage2_unmap_put_pte()
849 if (kvm_pte_table(ctx->old, ctx->level)) { in stage2_unmap_put_pte()
850 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, in stage2_unmap_put_pte()
853 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, in stage2_unmap_put_pte()
854 ctx->level); in stage2_unmap_put_pte()
858 mm_ops->put_page(ctx->ptep); in stage2_unmap_put_pte()
873 const struct stage2_map_data *data) in stage2_map_walker_phys_addr() argument
875 u64 phys = data->phys; in stage2_map_walker_phys_addr()
878 return phys + (ctx->addr - ctx->start); in stage2_map_walker_phys_addr()
882 struct stage2_map_data *data) in stage2_leaf_mapping_allowed() argument
884 u64 phys = stage2_map_walker_phys_addr(ctx, data); in stage2_leaf_mapping_allowed()
886 if (data->force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL) in stage2_leaf_mapping_allowed()
889 if (data->annotation) in stage2_leaf_mapping_allowed()
896 struct stage2_map_data *data) in stage2_map_walker_try_leaf() argument
899 u64 phys = stage2_map_walker_phys_addr(ctx, data); in stage2_map_walker_try_leaf()
900 u64 granule = kvm_granule_size(ctx->level); in stage2_map_walker_try_leaf()
901 struct kvm_pgtable *pgt = data->mmu->pgt; in stage2_map_walker_try_leaf()
902 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; in stage2_map_walker_try_leaf()
904 if (!stage2_leaf_mapping_allowed(ctx, data)) in stage2_map_walker_try_leaf()
905 return -E2BIG; in stage2_map_walker_try_leaf()
907 if (!data->annotation) in stage2_map_walker_try_leaf()
908 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level); in stage2_map_walker_try_leaf()
910 new = kvm_init_invalid_leaf_owner(data->owner_id); in stage2_map_walker_try_leaf()
918 if (!stage2_pte_needs_update(ctx->old, new)) in stage2_map_walker_try_leaf()
919 return -EAGAIN; in stage2_map_walker_try_leaf()
923 !((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW)) { in stage2_map_walker_try_leaf()
924 bool old_is_counted = stage2_pte_is_counted(ctx->old); in stage2_map_walker_try_leaf()
928 mm_ops->put_page(ctx->ptep); in stage2_map_walker_try_leaf()
930 mm_ops->get_page(ctx->ptep); in stage2_map_walker_try_leaf()
936 if (!stage2_try_break_pte(ctx, data->mmu)) in stage2_map_walker_try_leaf()
937 return -EAGAIN; in stage2_map_walker_try_leaf()
939 /* Perform CMOs before installation of the guest stage-2 PTE */ in stage2_map_walker_try_leaf()
940 if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->dcache_clean_inval_poc && in stage2_map_walker_try_leaf()
942 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops), in stage2_map_walker_try_leaf()
945 if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->icache_inval_pou && in stage2_map_walker_try_leaf()
947 mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule); in stage2_map_walker_try_leaf()
955 struct stage2_map_data *data) in stage2_map_walk_table_pre() argument
957 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; in stage2_map_walk_table_pre()
958 kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops); in stage2_map_walk_table_pre()
961 if (!stage2_leaf_mapping_allowed(ctx, data)) in stage2_map_walk_table_pre()
964 ret = stage2_map_walker_try_leaf(ctx, data); in stage2_map_walk_table_pre()
968 mm_ops->free_unlinked_table(childp, ctx->level); in stage2_map_walk_table_pre()
973 struct stage2_map_data *data) in stage2_map_walk_leaf() argument
975 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; in stage2_map_walk_leaf()
979 ret = stage2_map_walker_try_leaf(ctx, data); in stage2_map_walk_leaf()
980 if (ret != -E2BIG) in stage2_map_walk_leaf()
983 if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL)) in stage2_map_walk_leaf()
984 return -EINVAL; in stage2_map_walk_leaf()
986 if (!data->memcache) in stage2_map_walk_leaf()
987 return -ENOMEM; in stage2_map_walk_leaf()
989 childp = mm_ops->zalloc_page(data->memcache); in stage2_map_walk_leaf()
991 return -ENOMEM; in stage2_map_walk_leaf()
993 if (!stage2_try_break_pte(ctx, data->mmu)) { in stage2_map_walk_leaf()
994 mm_ops->put_page(childp); in stage2_map_walk_leaf()
995 return -EAGAIN; in stage2_map_walk_leaf()
1021 struct stage2_map_data *data = ctx->arg; in stage2_map_walker() local
1025 return stage2_map_walk_table_pre(ctx, data); in stage2_map_walker()
1027 return stage2_map_walk_leaf(ctx, data); in stage2_map_walker()
1029 return -EINVAL; in stage2_map_walker()
1033 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, in kvm_pgtable_stage2_map() argument
1040 .mmu = pgt->mmu, in kvm_pgtable_stage2_map()
1042 .force_pte = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot), in kvm_pgtable_stage2_map()
1052 if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys))) in kvm_pgtable_stage2_map()
1053 return -EINVAL; in kvm_pgtable_stage2_map()
1059 ret = kvm_pgtable_walk(pgt, addr, size, &walker); in kvm_pgtable_stage2_map()
1064 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, in kvm_pgtable_stage2_set_owner() argument
1069 .mmu = pgt->mmu, in kvm_pgtable_stage2_set_owner()
1083 return -EINVAL; in kvm_pgtable_stage2_set_owner()
1085 ret = kvm_pgtable_walk(pgt, addr, size, &walker); in kvm_pgtable_stage2_set_owner()
1092 struct kvm_pgtable *pgt = ctx->arg; in stage2_unmap_walker()
1093 struct kvm_s2_mmu *mmu = pgt->mmu; in stage2_unmap_walker()
1094 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; in stage2_unmap_walker()
1098 if (!kvm_pte_valid(ctx->old)) { in stage2_unmap_walker()
1099 if (stage2_pte_is_counted(ctx->old)) { in stage2_unmap_walker()
1100 kvm_clear_pte(ctx->ptep); in stage2_unmap_walker()
1101 mm_ops->put_page(ctx->ptep); in stage2_unmap_walker()
1106 if (kvm_pte_table(ctx->old, ctx->level)) { in stage2_unmap_walker()
1107 childp = kvm_pte_follow(ctx->old, mm_ops); in stage2_unmap_walker()
1109 if (mm_ops->page_count(childp) != 1) in stage2_unmap_walker()
1111 } else if (stage2_pte_cacheable(pgt, ctx->old)) { in stage2_unmap_walker()
1122 if (need_flush && mm_ops->dcache_clean_inval_poc) in stage2_unmap_walker()
1123 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops), in stage2_unmap_walker()
1124 kvm_granule_size(ctx->level)); in stage2_unmap_walker()
1127 mm_ops->put_page(childp); in stage2_unmap_walker()
1132 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) in kvm_pgtable_stage2_unmap() argument
1141 ret = kvm_pgtable_walk(pgt, addr, size, &walker); in kvm_pgtable_stage2_unmap()
1144 kvm_tlb_flush_vmid_range(pgt->mmu, addr, size); in kvm_pgtable_stage2_unmap()
1159 kvm_pte_t pte = ctx->old; in stage2_attr_walker()
1160 struct stage2_attr_data *data = ctx->arg; in stage2_attr_walker() local
1161 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; in stage2_attr_walker()
1163 if (!kvm_pte_valid(ctx->old)) in stage2_attr_walker()
1164 return -EAGAIN; in stage2_attr_walker()
1166 data->level = ctx->level; in stage2_attr_walker()
1167 data->pte = pte; in stage2_attr_walker()
1168 pte &= ~data->attr_clr; in stage2_attr_walker()
1169 pte |= data->attr_set; in stage2_attr_walker()
1173 * but worst-case the access flag update gets lost and will be in stage2_attr_walker()
1176 if (data->pte != pte) { in stage2_attr_walker()
1179 * stage-2 PTE if we are going to add executable permission. in stage2_attr_walker()
1181 if (mm_ops->icache_inval_pou && in stage2_attr_walker()
1182 stage2_pte_executable(pte) && !stage2_pte_executable(ctx->old)) in stage2_attr_walker()
1183 mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops), in stage2_attr_walker()
1184 kvm_granule_size(ctx->level)); in stage2_attr_walker()
1187 return -EAGAIN; in stage2_attr_walker()
1193 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr, in stage2_update_leaf_attrs() argument
1200 struct stage2_attr_data data = { in stage2_update_leaf_attrs() local
1206 .arg = &data, in stage2_update_leaf_attrs()
1210 ret = kvm_pgtable_walk(pgt, addr, size, &walker); in stage2_update_leaf_attrs()
1215 *orig_pte = data.pte; in stage2_update_leaf_attrs()
1218 *level = data.level; in stage2_update_leaf_attrs()
1222 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size) in kvm_pgtable_stage2_wrprotect() argument
1224 return stage2_update_leaf_attrs(pgt, addr, size, 0, in kvm_pgtable_stage2_wrprotect()
1229 void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr, in kvm_pgtable_stage2_mkyoung() argument
1234 ret = stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0, in kvm_pgtable_stage2_mkyoung()
1248 kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF; in stage2_age_walker()
1249 struct stage2_age_data *data = ctx->arg; in stage2_age_walker() local
1251 if (!kvm_pte_valid(ctx->old) || new == ctx->old) in stage2_age_walker()
1254 data->young = true; in stage2_age_walker()
1259 * follows the race detection pattern of the other stage-2 walkers in in stage2_age_walker()
1262 if (data->mkold && !stage2_try_set_pte(ctx, new)) in stage2_age_walker()
1263 return -EAGAIN; in stage2_age_walker()
1269 * See the '->clear_flush_young()' callback on the KVM mmu notifier. in stage2_age_walker()
1274 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, in kvm_pgtable_stage2_test_clear_young() argument
1277 struct stage2_age_data data = { in kvm_pgtable_stage2_test_clear_young() local
1282 .arg = &data, in kvm_pgtable_stage2_test_clear_young()
1286 WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker)); in kvm_pgtable_stage2_test_clear_young()
1287 return data.young; in kvm_pgtable_stage2_test_clear_young()
1290 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, in kvm_pgtable_stage2_relax_perms() argument
1298 return -EINVAL; in kvm_pgtable_stage2_relax_perms()
1309 ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level, flags); in kvm_pgtable_stage2_relax_perms()
1310 if (!ret || ret == -EAGAIN) in kvm_pgtable_stage2_relax_perms()
1311 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa_nsh, pgt->mmu, addr, level); in kvm_pgtable_stage2_relax_perms()
1318 struct kvm_pgtable *pgt = ctx->arg; in stage2_flush_walker()
1319 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; in stage2_flush_walker()
1321 if (!stage2_pte_cacheable(pgt, ctx->old)) in stage2_flush_walker()
1324 if (mm_ops->dcache_clean_inval_poc) in stage2_flush_walker()
1325 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops), in stage2_flush_walker()
1326 kvm_granule_size(ctx->level)); in stage2_flush_walker()
1330 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) in kvm_pgtable_stage2_flush() argument
1341 return kvm_pgtable_walk(pgt, addr, size, &walker); in kvm_pgtable_stage2_flush()
1351 .mmu = pgt->mmu, in kvm_pgtable_stage2_create_unlinked()
1363 * The input address (.addr) is irrelevant for walking an in kvm_pgtable_stage2_create_unlinked()
1367 struct kvm_pgtable_walk_data data = { in kvm_pgtable_stage2_create_unlinked() local
1369 .addr = 0, in kvm_pgtable_stage2_create_unlinked()
1372 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; in kvm_pgtable_stage2_create_unlinked()
1377 return ERR_PTR(-EINVAL); in kvm_pgtable_stage2_create_unlinked()
1383 pgtable = mm_ops->zalloc_page(mc); in kvm_pgtable_stage2_create_unlinked()
1385 return ERR_PTR(-ENOMEM); in kvm_pgtable_stage2_create_unlinked()
1387 ret = __kvm_pgtable_walk(&data, mm_ops, (kvm_pteref_t)pgtable, in kvm_pgtable_stage2_create_unlinked()
1398 * Get the number of page-tables needed to replace a block with a
1414 return -EINVAL; in stage2_block_get_nr_page_tables()
1421 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; in stage2_split_walker()
1422 struct kvm_mmu_memory_cache *mc = ctx->arg; in stage2_split_walker()
1424 kvm_pte_t pte = ctx->old, new, *childp; in stage2_split_walker()
1426 s8 level = ctx->level; in stage2_split_walker()
1431 /* No huge-pages exist at the last level */ in stage2_split_walker()
1443 if (mc->nobjs >= nr_pages) { in stage2_split_walker()
1459 if (mc->nobjs < nr_pages) in stage2_split_walker()
1460 return -ENOMEM; in stage2_split_walker()
1466 childp = kvm_pgtable_stage2_create_unlinked(mmu->pgt, phys, in stage2_split_walker()
1473 return -EAGAIN; in stage2_split_walker()
1486 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, in kvm_pgtable_stage2_split() argument
1496 ret = kvm_pgtable_walk(pgt, addr, size, &walker); in kvm_pgtable_stage2_split()
1507 u64 vtcr = mmu->vtcr; in __kvm_pgtable_stage2_init()
1510 s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; in __kvm_pgtable_stage2_init()
1513 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz); in __kvm_pgtable_stage2_init()
1514 if (!pgt->pgd) in __kvm_pgtable_stage2_init()
1515 return -ENOMEM; in __kvm_pgtable_stage2_init()
1517 pgt->ia_bits = ia_bits; in __kvm_pgtable_stage2_init()
1518 pgt->start_level = start_level; in __kvm_pgtable_stage2_init()
1519 pgt->mm_ops = mm_ops; in __kvm_pgtable_stage2_init()
1520 pgt->mmu = mmu; in __kvm_pgtable_stage2_init()
1521 pgt->flags = flags; in __kvm_pgtable_stage2_init()
1522 pgt->force_pte_cb = force_pte_cb; in __kvm_pgtable_stage2_init()
1533 s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; in kvm_pgtable_stage2_pgd_size()
1541 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; in stage2_free_walker()
1543 if (!stage2_pte_is_counted(ctx->old)) in stage2_free_walker()
1546 mm_ops->put_page(ctx->ptep); in stage2_free_walker()
1548 if (kvm_pte_table(ctx->old, ctx->level)) in stage2_free_walker()
1549 mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops)); in stage2_free_walker()
1563 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); in kvm_pgtable_stage2_destroy()
1564 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE; in kvm_pgtable_stage2_destroy()
1565 pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz); in kvm_pgtable_stage2_destroy()
1566 pgt->pgd = NULL; in kvm_pgtable_stage2_destroy()
1577 struct kvm_pgtable_walk_data data = { in kvm_pgtable_stage2_free_unlinked() local
1585 .addr = 0, in kvm_pgtable_stage2_free_unlinked()
1589 WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1)); in kvm_pgtable_stage2_free_unlinked()
1591 WARN_ON(mm_ops->page_count(pgtable) != 1); in kvm_pgtable_stage2_free_unlinked()
1592 mm_ops->put_page(pgtable); in kvm_pgtable_stage2_free_unlinked()