Lines Matching +full:linear +full:- +full:mapping +full:- +full:mode
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
20 * Instead, give the HYP mode its own VA region at a fixed offset from
29 * and that half of that space (VA_BITS - 1) is used for the linear
30 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
33 * top or the bottom half of that space to shadow the kernel's linear
34 * mapping?". As we need to idmap the trampoline page, this is
41 * if (T & BIT(VA_BITS - 1))
44 * HYP_VA_MIN = 1 << (VA_BITS - 1)
45 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
77 /* Convert hyp VA -> PA. */
88 /* Convert PA -> kimg VA. */
113 * Can be called from hyp or non-hyp context.
124 * context. When called from a VHE non-hyp context, kvm_update_va_mask() will in __kern_hyp_va()
145 * We currently support using a VM-specified IPA size. For backward
150 #define kvm_phys_shift(mmu) VTCR_EL2_IPA((mmu)->vtcr)
152 #define kvm_phys_mask(mmu) (kvm_phys_size(mmu) - _AC(1, ULL))
193 int idx = slot - (slot != HYP_VECTOR_DIRECT); in __kvm_vector_slot2addr()
250 * Blow the whole I-cache if it is aliasing (i.e. VIPT) or the in __invalidate_icache_guest_page()
271 * We are not in the kvm->srcu critical section most of the time, so we take
278 int srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_read_guest_lock()
281 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_read_guest_lock()
289 int srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_write_guest_lock()
292 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_write_guest_lock()
306 struct kvm_vmid *vmid = &mmu->vmid; in kvm_get_vttbr()
310 baddr = mmu->pgd_phys; in kvm_get_vttbr()
311 vmid_field = atomic64_read(&vmid->id) << VTTBR_VMID_SHIFT; in kvm_get_vttbr()
323 write_sysreg(mmu->vtcr, vtcr_el2); in __load_stage2()
336 return container_of(mmu->arch, struct kvm, arch); in kvm_s2_mmu_to_kvm()
347 return !(mmu->tlb_vttbr & VTTBR_CNP_BIT); in kvm_s2_mmu_valid()
356 return &kvm->arch.mmu != mmu; in kvm_is_nested_s2_mmu()
362 write_lock(&kvm->mmu_lock); in kvm_fault_lock()
364 read_lock(&kvm->mmu_lock); in kvm_fault_lock()
370 write_unlock(&kvm->mmu_lock); in kvm_fault_unlock()
372 read_unlock(&kvm->mmu_lock); in kvm_fault_unlock()
379 * VM_PFNMAP VMAs which may not have a kernel direct mapping to a
385 * are thus necessary to support cacheable S2 mapping of VM_PFNMAP.