Lines Matching +full:reg +full:- +full:space

1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
26 * runtime VA space, at the same time.
28 * Given that the kernel uses VA_BITS for its entire address space,
29 * and that half of that space (VA_BITS - 1) is used for the linear
30 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
32 * The main question is "Within the VA_BITS space, does EL2 use the
33 * top or the bottom half of that space to shadow the kernel's linear
41 * if (T & BIT(VA_BITS - 1))
44 * HYP_VA_MIN = 1 << (VA_BITS - 1)
45 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
58 * reg: hypervisor address to be converted in place
61 .macro hyp_pa reg, tmp
63 add \reg, \reg, \tmp
68 * reg: hypervisor address to be converted in place
72 * the instructions below are only there to reserve the space and
76 .macro hyp_kimg_va reg, tmp
77 /* Convert hyp VA -> PA. */
78 hyp_pa \reg, \tmp
88 /* Convert PA -> kimg VA. */
89 add \reg, \reg, \tmp
113 * Can be called from hyp or non-hyp context.
116 * the instructions below are only there to reserve the space and
124 * context. When called from a VHE non-hyp context, kvm_update_va_mask() will in __kern_hyp_va()
145 * We currently support using a VM-specified IPA size. For backward
150 #define kvm_phys_shift(mmu) VTCR_EL2_IPA((mmu)->vtcr)
152 #define kvm_phys_mask(mmu) (kvm_phys_size(mmu) - _AC(1, ULL))
192 int idx = slot - (slot != HYP_VECTOR_DIRECT); in __kvm_vector_slot2addr()
205 int reg; in vcpu_has_cache_enabled() local
208 reg = SCTLR_EL2; in vcpu_has_cache_enabled()
210 reg = SCTLR_EL1; in vcpu_has_cache_enabled()
212 return (vcpu_read_sys_reg(vcpu, reg) & cache_bits) == cache_bits; in vcpu_has_cache_enabled()
249 * Blow the whole I-cache if it is aliasing (i.e. VIPT) or the in __invalidate_icache_guest_page()
264 int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); in kvm_get_vmid_bits() local
266 return get_vmid_bits(reg); in kvm_get_vmid_bits()
270 * We are not in the kvm->srcu critical section most of the time, so we take
277 int srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_read_guest_lock()
280 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_read_guest_lock()
288 int srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_write_guest_lock()
291 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_write_guest_lock()
305 struct kvm_vmid *vmid = &mmu->vmid; in kvm_get_vttbr()
309 baddr = mmu->pgd_phys; in kvm_get_vttbr()
310 vmid_field = atomic64_read(&vmid->id) << VTTBR_VMID_SHIFT; in kvm_get_vttbr()
322 write_sysreg(mmu->vtcr, vtcr_el2); in __load_stage2()
335 return container_of(mmu->arch, struct kvm, arch); in kvm_s2_mmu_to_kvm()
346 return !(mmu->tlb_vttbr & VTTBR_CNP_BIT); in kvm_s2_mmu_valid()
355 return &kvm->arch.mmu != mmu; in kvm_is_nested_s2_mmu()
361 write_lock(&kvm->mmu_lock); in kvm_fault_lock()
363 read_lock(&kvm->mmu_lock); in kvm_fault_lock()
369 write_unlock(&kvm->mmu_lock); in kvm_fault_unlock()
371 read_unlock(&kvm->mmu_lock); in kvm_fault_unlock()