Lines Matching +full:ipa +full:- +full:reg
1 // SPDX-License-Identifier: GPL-2.0-only
33 hyp_spin_lock(&vm->lock); in guest_lock_component()
40 hyp_spin_unlock(&vm->lock); in guest_unlock_component()
133 /* The host stage 2 is id-mapped, so use parange for T0SZ */ in prepare_host_vtcr()
150 mmu->arch = &host_mmu.arch; in kvm_host_prepare_stage2()
162 mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd); in kvm_host_prepare_stage2()
163 mmu->pgt = &host_mmu.pgt; in kvm_host_prepare_stage2()
164 atomic64_set(&mmu->vmid.id, 0); in kvm_host_prepare_stage2()
171 void *addr = hyp_alloc_pages(¤t_vm->pool, get_order(size)); in guest_s2_zalloc_pages_exact()
185 hyp_put_page(¤t_vm->pool, addr + (i * PAGE_SIZE)); in guest_s2_free_pages_exact()
193 addr = hyp_alloc_pages(¤t_vm->pool, 0); in guest_s2_zalloc_page()
203 p->refcount = 1; in guest_s2_zalloc_page()
204 p->order = 0; in guest_s2_zalloc_page()
211 hyp_get_page(¤t_vm->pool, addr); in guest_s2_get_page()
216 hyp_put_page(¤t_vm->pool, addr); in guest_s2_put_page()
222 size += va - PTR_ALIGN_DOWN(va, PAGE_SIZE); in __apply_guest_page()
242 size -= map_size; in __apply_guest_page()
259 struct kvm_s2_mmu *mmu = &vm->kvm.arch.mmu; in kvm_guest_prepare_stage2()
263 nr_pages = kvm_pgtable_stage2_pgd_size(mmu->vtcr) >> PAGE_SHIFT; in kvm_guest_prepare_stage2()
264 ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0); in kvm_guest_prepare_stage2()
268 hyp_spin_lock_init(&vm->lock); in kvm_guest_prepare_stage2()
269 vm->mm_ops = (struct kvm_pgtable_mm_ops) { in kvm_guest_prepare_stage2()
283 ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, NULL); in kvm_guest_prepare_stage2()
288 vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd); in kvm_guest_prepare_stage2()
300 kvm_pgtable_stage2_destroy(&vm->pgt); in reclaim_pgtable_pages()
301 vm->kvm.arch.mmu.pgd_phys = 0ULL; in reclaim_pgtable_pages()
305 addr = hyp_alloc_pages(&vm->pool, 0); in reclaim_pgtable_pages()
308 page->refcount = 0; in reclaim_pgtable_pages()
309 page->order = 0; in reclaim_pgtable_pages()
312 addr = hyp_alloc_pages(&vm->pool, 0); in reclaim_pgtable_pages()
321 if (params->hcr_el2 & HCR_VM) in __pkvm_prot_finalize()
322 return -EPERM; in __pkvm_prot_finalize()
324 params->vttbr = kvm_get_vttbr(mmu); in __pkvm_prot_finalize()
325 params->vtcr = mmu->vtcr; in __pkvm_prot_finalize()
326 params->hcr_el2 |= HCR_VM; in __pkvm_prot_finalize()
331 * page-table walks that have started before we trapped to EL2 in __pkvm_prot_finalize()
336 write_sysreg_hcr(params->hcr_el2); in __pkvm_prot_finalize()
356 struct memblock_region *reg; in host_stage2_unmap_dev_all() local
360 /* Unmap all non-memory regions to recycle the pages */ in host_stage2_unmap_dev_all()
361 for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) { in host_stage2_unmap_dev_all()
362 reg = &hyp_memory[i]; in host_stage2_unmap_dev_all()
363 ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr); in host_stage2_unmap_dev_all()
367 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr); in host_stage2_unmap_dev_all()
378 struct memblock_region *reg; in find_mem_range() local
381 range->start = 0; in find_mem_range()
382 range->end = ULONG_MAX; in find_mem_range()
387 reg = &hyp_memory[cur]; in find_mem_range()
388 end = reg->base + reg->size; in find_mem_range()
389 if (addr < reg->base) { in find_mem_range()
391 range->end = reg->base; in find_mem_range()
394 range->start = end; in find_mem_range()
396 range->start = reg->base; in find_mem_range()
397 range->end = end; in find_mem_range()
398 return reg; in find_mem_range()
414 return range->start <= addr && addr < range->end; in is_in_mem_range()
419 struct memblock_region *reg; in check_range_allowed_memory() local
426 reg = find_mem_range(start, &range); in check_range_allowed_memory()
427 if (!is_in_mem_range(end - 1, &range)) in check_range_allowed_memory()
428 return -EINVAL; in check_range_allowed_memory()
430 if (!reg || reg->flags & MEMBLOCK_NOMAP) in check_range_allowed_memory()
431 return -EPERM; in check_range_allowed_memory()
443 return is_in_mem_range(end - 1, &r); in range_is_memory()
449 return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start, in __host_stage2_idmap()
464 if (__ret == -ENOMEM) { \
475 return parent->start <= child->start && child->end <= parent->end; in range_included()
492 return -EAGAIN; in host_stage2_adjust_range()
497 return -EPERM; in host_stage2_adjust_range()
514 return -EINVAL; in host_stage2_adjust_range()
534 return -EPERM; in host_stage2_set_owner_locked()
553 * Block mappings must be used with care in the host stage-2 as a in host_stage2_force_pte_cb()
557 * That assumption is correct for the host stage-2 with RWX mappings in host_stage2_force_pte_cb()
561 * the host stage-2 page-table is in fact the only place where this in host_stage2_force_pte_cb()
562 * state is stored. In all those cases, it is safer to use page-level in host_stage2_force_pte_cb()
563 * mappings, hence avoiding to lose the state because of side-effects in in host_stage2_force_pte_cb()
586 ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot); in host_stage2_idmap()
602 * We've presumably raced with a page-table change which caused in handle_host_mem_abort()
610 * Yikes, we couldn't resolve the fault IPA. This should reinject an in handle_host_mem_abort()
617 BUG_ON(ret && ret != -EAGAIN); in handle_host_mem_abort()
628 struct check_walk_data *d = ctx->arg; in __check_page_state_visitor()
630 return d->get_page_state(ctx->old, ctx->addr) == d->desired ? 0 : -EPERM; in __check_page_state_visitor()
658 return -EPERM; in __host_check_page_state_range()
689 return -EPERM; in __hyp_check_page_state_range()
711 hyp_assert_lock_held(&vm->lock); in __guest_check_page_state_range()
712 return check_page_state_range(&vm->pgt, addr, size, &d); in __guest_check_page_state_range()
758 ret = -EBUSY; in __pkvm_host_unshare_hyp()
833 u64 size = end - start; in hyp_pin_shared_mem()
851 if (p->refcount == 1) in hyp_pin_shared_mem()
875 if (p->refcount == 1) in hyp_unpin_shared_mem()
914 static int __guest_check_transition_size(u64 phys, u64 ipa, u64 nr_pages, u64 *size) in __guest_check_transition_size() argument
924 block_size = kvm_granule_size(KVM_PGTABLE_LAST_LEVEL - 1); in __guest_check_transition_size()
927 return -EINVAL; in __guest_check_transition_size()
929 if (!IS_ALIGNED(phys | ipa, block_size)) in __guest_check_transition_size()
930 return -EINVAL; in __guest_check_transition_size()
941 u64 ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_share_guest() local
946 return -EINVAL; in __pkvm_host_share_guest()
948 ret = __guest_check_transition_size(phys, ipa, nr_pages, &size); in __pkvm_host_share_guest()
959 ret = __guest_check_page_state_range(vm, ipa, size, PKVM_NOPAGE); in __pkvm_host_share_guest()
968 if (page->host_share_guest_count == U32_MAX) { in __pkvm_host_share_guest()
969 ret = -EBUSY; in __pkvm_host_share_guest()
973 /* Only host to np-guest multi-sharing is tolerated */ in __pkvm_host_share_guest()
974 if (page->host_share_guest_count) in __pkvm_host_share_guest()
979 ret = -EPERM; in __pkvm_host_share_guest()
986 page->host_share_guest_count++; in __pkvm_host_share_guest()
989 WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, size, phys, in __pkvm_host_share_guest()
991 &vcpu->vcpu.arch.pkvm_memcache, 0)); in __pkvm_host_share_guest()
1000 static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa, u64 size) in __check_host_shared_guest() argument
1008 ret = kvm_pgtable_get_leaf(&vm->pgt, ipa, &pte, &level); in __check_host_shared_guest()
1012 return -ENOENT; in __check_host_shared_guest()
1014 return -E2BIG; in __check_host_shared_guest()
1016 state = guest_get_page_state(pte, ipa); in __check_host_shared_guest()
1018 return -EPERM; in __check_host_shared_guest()
1027 return -EPERM; in __check_host_shared_guest()
1028 if (WARN_ON(!page->host_share_guest_count)) in __check_host_shared_guest()
1029 return -EINVAL; in __check_host_shared_guest()
1039 u64 ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_unshare_guest() local
1043 ret = __guest_check_transition_size(0, ipa, nr_pages, &size); in __pkvm_host_unshare_guest()
1050 ret = __check_host_shared_guest(vm, &phys, ipa, size); in __pkvm_host_unshare_guest()
1054 ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, size); in __pkvm_host_unshare_guest()
1060 page->host_share_guest_count--; in __pkvm_host_unshare_guest()
1061 if (!page->host_share_guest_count) in __pkvm_host_unshare_guest()
1072 static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa, u64 size) in assert_host_shared_guest() argument
1083 ret = __check_host_shared_guest(vm, &phys, ipa, size); in assert_host_shared_guest()
1088 WARN_ON(ret && ret != -ENOENT); in assert_host_shared_guest()
1094 u64 ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_relax_perms_guest() local
1098 return -EPERM; in __pkvm_host_relax_perms_guest()
1101 return -EINVAL; in __pkvm_host_relax_perms_guest()
1103 assert_host_shared_guest(vm, ipa, PAGE_SIZE); in __pkvm_host_relax_perms_guest()
1105 ret = kvm_pgtable_stage2_relax_perms(&vm->pgt, ipa, prot, 0); in __pkvm_host_relax_perms_guest()
1113 u64 size, ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_wrprotect_guest() local
1117 return -EPERM; in __pkvm_host_wrprotect_guest()
1119 ret = __guest_check_transition_size(0, ipa, nr_pages, &size); in __pkvm_host_wrprotect_guest()
1123 assert_host_shared_guest(vm, ipa, size); in __pkvm_host_wrprotect_guest()
1125 ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, size); in __pkvm_host_wrprotect_guest()
1133 u64 size, ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_test_clear_young_guest() local
1137 return -EPERM; in __pkvm_host_test_clear_young_guest()
1139 ret = __guest_check_transition_size(0, ipa, nr_pages, &size); in __pkvm_host_test_clear_young_guest()
1143 assert_host_shared_guest(vm, ipa, size); in __pkvm_host_test_clear_young_guest()
1145 ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, size, mkold); in __pkvm_host_test_clear_young_guest()
1154 u64 ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_mkyoung_guest() local
1157 return -EPERM; in __pkvm_host_mkyoung_guest()
1159 assert_host_shared_guest(vm, ipa, PAGE_SIZE); in __pkvm_host_mkyoung_guest()
1161 kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0); in __pkvm_host_mkyoung_guest()
1215 return BIT(selftest_vm.pgt.ia_bits - 1); in selftest_ipa()
1221 u64 size = PAGE_SIZE << selftest_page->order; in assert_page_state()
1224 u64 ipa[2] = { selftest_ipa(), selftest_ipa() + PAGE_SIZE }; in assert_page_state() local
1238 WARN_ON(__guest_check_page_state_range(vm, ipa[0], size, selftest_state.guest[0])); in assert_page_state()
1239 WARN_ON(__guest_check_page_state_range(vm, ipa[1], size, selftest_state.guest[1])); in assert_page_state()
1259 selftest_page->refcount = 0; in pkvm_ownership_selftest()
1262 size = PAGE_SIZE << selftest_page->order; in pkvm_ownership_selftest()
1271 assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); in pkvm_ownership_selftest()
1272 assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); in pkvm_ownership_selftest()
1273 assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn); in pkvm_ownership_selftest()
1274 assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); in pkvm_ownership_selftest()
1275 assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1); in pkvm_ownership_selftest()
1276 assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size); in pkvm_ownership_selftest()
1277 assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); in pkvm_ownership_selftest()
1278 assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); in pkvm_ownership_selftest()
1283 assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); in pkvm_ownership_selftest()
1284 assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn); in pkvm_ownership_selftest()
1285 assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1); in pkvm_ownership_selftest()
1286 assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); in pkvm_ownership_selftest()
1287 assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size); in pkvm_ownership_selftest()
1292 assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); in pkvm_ownership_selftest()
1293 assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); in pkvm_ownership_selftest()
1294 assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); in pkvm_ownership_selftest()
1295 assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); in pkvm_ownership_selftest()
1296 assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); in pkvm_ownership_selftest()
1297 assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); in pkvm_ownership_selftest()
1303 assert_transition_res(-EBUSY, __pkvm_host_unshare_hyp, pfn); in pkvm_ownership_selftest()
1304 assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); in pkvm_ownership_selftest()
1305 assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); in pkvm_ownership_selftest()
1306 assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); in pkvm_ownership_selftest()
1307 assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); in pkvm_ownership_selftest()
1308 assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); in pkvm_ownership_selftest()
1309 assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); in pkvm_ownership_selftest()
1322 assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); in pkvm_ownership_selftest()
1323 assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); in pkvm_ownership_selftest()
1324 assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); in pkvm_ownership_selftest()
1325 assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn); in pkvm_ownership_selftest()
1326 assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); in pkvm_ownership_selftest()
1327 assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); in pkvm_ownership_selftest()
1328 assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); in pkvm_ownership_selftest()
1329 assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size); in pkvm_ownership_selftest()
1334 assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1); in pkvm_ownership_selftest()
1339 assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); in pkvm_ownership_selftest()
1340 assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); in pkvm_ownership_selftest()
1341 assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); in pkvm_ownership_selftest()
1342 assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); in pkvm_ownership_selftest()
1343 assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn); in pkvm_ownership_selftest()
1344 assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); in pkvm_ownership_selftest()
1345 assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size); in pkvm_ownership_selftest()
1349 WARN_ON(hyp_virt_to_page(virt)->host_share_guest_count != 2); in pkvm_ownership_selftest()
1362 selftest_page->refcount = 1; in pkvm_ownership_selftest()