Lines Matching +full:half +full:-
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
29 * and that half of that space (VA_BITS - 1) is used for the linear
30 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
33 * top or the bottom half of that space to shadow the kernel's linear
37 * If the page is in the bottom half, we have to use the top half. If
38 * the page is in the top half, we have to use the bottom half:
41 * if (T & BIT(VA_BITS - 1))
42 * HYP_VA_MIN = 0 //idmap in upper half
44 * HYP_VA_MIN = 1 << (VA_BITS - 1)
45 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
77 /* Convert hyp VA -> PA. */
88 /* Convert PA -> kimg VA. */
113 * Can be called from hyp or non-hyp context.
124 * context. When called from a VHE non-hyp context, kvm_update_va_mask() will in __kern_hyp_va()
143 * We currently support using a VM-specified IPA size. For backward
148 #define kvm_phys_shift(mmu) VTCR_EL2_IPA((mmu)->vtcr)
150 #define kvm_phys_mask(mmu) (kvm_phys_size(mmu) - _AC(1, ULL))
190 int idx = slot - (slot != HYP_VECTOR_DIRECT); in __kvm_vector_slot2addr()
247 * Blow the whole I-cache if it is aliasing (i.e. VIPT) or the in __invalidate_icache_guest_page()
268 * We are not in the kvm->srcu critical section most of the time, so we take
275 int srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_read_guest_lock()
278 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_read_guest_lock()
286 int srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_write_guest_lock()
289 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_write_guest_lock()
303 struct kvm_vmid *vmid = &mmu->vmid; in kvm_get_vttbr()
307 baddr = mmu->pgd_phys; in kvm_get_vttbr()
308 vmid_field = atomic64_read(&vmid->id) << VTTBR_VMID_SHIFT; in kvm_get_vttbr()
320 write_sysreg(mmu->vtcr, vtcr_el2); in __load_stage2()
333 return container_of(mmu->arch, struct kvm, arch); in kvm_s2_mmu_to_kvm()
344 return !(mmu->tlb_vttbr & VTTBR_CNP_BIT); in kvm_s2_mmu_valid()
353 return &kvm->arch.mmu != mmu; in kvm_is_nested_s2_mmu()