Lines Matching +full:el3 +full:-
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 - Columbia University and Linaro Ltd.
28 /* -1 when not mapped on a CPU */
39 * Ratio of live shadow S2 MMU per vcpu. This is a trade-off between
48 kvm->arch.nested_mmus = NULL; in kvm_init_nested()
49 kvm->arch.nested_mmus_size = 0; in kvm_init_nested()
50 atomic_set(&kvm->arch.vncr_map_count, 0); in kvm_init_nested()
70 struct kvm *kvm = vcpu->kvm; in kvm_vcpu_init_nested()
74 if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features) && in kvm_vcpu_init_nested()
76 return -EINVAL; in kvm_vcpu_init_nested()
78 if (!vcpu->arch.ctxt.vncr_array) in kvm_vcpu_init_nested()
79 vcpu->arch.ctxt.vncr_array = (u64 *)__get_free_page(GFP_KERNEL_ACCOUNT | in kvm_vcpu_init_nested()
82 if (!vcpu->arch.ctxt.vncr_array) in kvm_vcpu_init_nested()
83 return -ENOMEM; in kvm_vcpu_init_nested()
91 num_mmus = atomic_read(&kvm->online_vcpus) * S2_MMU_PER_VCPU; in kvm_vcpu_init_nested()
92 tmp = kvrealloc(kvm->arch.nested_mmus, in kvm_vcpu_init_nested()
93 size_mul(sizeof(*kvm->arch.nested_mmus), num_mmus), in kvm_vcpu_init_nested()
96 return -ENOMEM; in kvm_vcpu_init_nested()
98 swap(kvm->arch.nested_mmus, tmp); in kvm_vcpu_init_nested()
101 * If we went through a realocation, adjust the MMU back-pointers in in kvm_vcpu_init_nested()
104 if (kvm->arch.nested_mmus != tmp) in kvm_vcpu_init_nested()
105 for (int i = 0; i < kvm->arch.nested_mmus_size; i++) in kvm_vcpu_init_nested()
106 kvm->arch.nested_mmus[i].pgt->mmu = &kvm->arch.nested_mmus[i]; in kvm_vcpu_init_nested()
108 for (int i = kvm->arch.nested_mmus_size; !ret && i < num_mmus; i++) in kvm_vcpu_init_nested()
109 ret = init_nested_s2_mmu(kvm, &kvm->arch.nested_mmus[i]); in kvm_vcpu_init_nested()
112 for (int i = kvm->arch.nested_mmus_size; i < num_mmus; i++) in kvm_vcpu_init_nested()
113 kvm_free_stage2_pgd(&kvm->arch.nested_mmus[i]); in kvm_vcpu_init_nested()
115 free_page((unsigned long)vcpu->arch.ctxt.vncr_array); in kvm_vcpu_init_nested()
116 vcpu->arch.ctxt.vncr_array = NULL; in kvm_vcpu_init_nested()
121 kvm->arch.nested_mmus_size = num_mmus; in kvm_vcpu_init_nested()
153 return 64 - wi->t0sz; in get_ia_size()
164 switch (BIT(wi->pgshift)) { in check_base_s2_limits()
167 return -EFAULT; in check_base_s2_limits()
171 return -EFAULT; in check_base_s2_limits()
175 return -EFAULT; in check_base_s2_limits()
181 return -EFAULT; in check_base_s2_limits()
184 start_size = input_size - ((3 - level) * stride + wi->pgshift); in check_base_s2_limits()
186 return -EFAULT; in check_base_s2_limits()
194 unsigned int output_size = wi->max_oa_bits; in check_output_size()
197 return -1; in check_output_size()
203 * This is essentially a C-version of the pseudo code from the ARM ARM
207 * Must be called with the kvm->srcu read lock held
219 switch (BIT(wi->pgshift)) { in walk_nested_s2_pgd()
223 level = 3 - wi->sl; in walk_nested_s2_pgd()
227 level = 2 - wi->sl; in walk_nested_s2_pgd()
232 stride = wi->pgshift - 3; in walk_nested_s2_pgd()
235 return -EFAULT; in walk_nested_s2_pgd()
241 base_lower_bound = 3 + input_size - ((3 - level) * stride + in walk_nested_s2_pgd()
242 wi->pgshift); in walk_nested_s2_pgd()
243 base_addr = wi->baddr & GENMASK_ULL(47, base_lower_bound); in walk_nested_s2_pgd()
246 out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ); in walk_nested_s2_pgd()
250 addr_top = input_size - 1; in walk_nested_s2_pgd()
255 addr_bottom = (3 - level) * stride + wi->pgshift; in walk_nested_s2_pgd()
257 >> (addr_bottom - 3); in walk_nested_s2_pgd()
260 ret = wi->read_desc(paddr, &desc, wi->data); in walk_nested_s2_pgd()
268 if (wi->be) in walk_nested_s2_pgd()
275 out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT); in walk_nested_s2_pgd()
276 out->desc = desc; in walk_nested_s2_pgd()
285 out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ); in walk_nested_s2_pgd()
286 out->desc = desc; in walk_nested_s2_pgd()
290 base_addr = desc & GENMASK_ULL(47, wi->pgshift); in walk_nested_s2_pgd()
293 addr_top = addr_bottom - 1; in walk_nested_s2_pgd()
297 out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT); in walk_nested_s2_pgd()
298 out->desc = desc; in walk_nested_s2_pgd()
303 out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ); in walk_nested_s2_pgd()
304 out->desc = desc; in walk_nested_s2_pgd()
309 out->esr = compute_fsc(level, ESR_ELx_FSC_ACCESS); in walk_nested_s2_pgd()
310 out->desc = desc; in walk_nested_s2_pgd()
318 (ipa & GENMASK_ULL(addr_bottom - 1, 0)); in walk_nested_s2_pgd()
319 out->output = paddr; in walk_nested_s2_pgd()
320 out->block_size = 1UL << ((3 - level) * stride + wi->pgshift); in walk_nested_s2_pgd()
321 out->readable = desc & (0b01 << 6); in walk_nested_s2_pgd()
322 out->writable = desc & (0b10 << 6); in walk_nested_s2_pgd()
323 out->level = level; in walk_nested_s2_pgd()
324 out->desc = desc; in walk_nested_s2_pgd()
332 return kvm_read_guest(vcpu->kvm, pa, desc, sizeof(*desc)); in read_guest_s2_desc()
337 wi->t0sz = vtcr & TCR_EL2_T0SZ_MASK; in vtcr_to_walk_info()
341 wi->pgshift = 12; break; in vtcr_to_walk_info()
343 wi->pgshift = 14; break; in vtcr_to_walk_info()
346 wi->pgshift = 16; break; in vtcr_to_walk_info()
349 wi->sl = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr); in vtcr_to_walk_info()
350 /* Global limit for now, should eventually be per-VM */ in vtcr_to_walk_info()
351 wi->max_oa_bits = min(get_kvm_ipa_limit(), in vtcr_to_walk_info()
362 result->esr = 0; in kvm_walk_nested_s2()
377 result->esr |= (kvm_vcpu_get_esr(vcpu) & ~ESR_ELx_FSC); in kvm_walk_nested_s2()
469 u64 tmp, sz = 0, vtcr = mmu->tlb_vtcr; in get_guest_mapping_ttl()
473 lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(mmu)->mmu_lock); in get_guest_mapping_ttl()
515 tmp &= ~(sz - 1); in get_guest_mapping_ttl()
516 if (kvm_pgtable_get_leaf(mmu->pgt, tmp, &pte, NULL)) in get_guest_mapping_ttl()
555 switch (mmu->tlb_vtcr & VTCR_EL2_TG0_MASK) { in compute_tlb_inval_range()
580 * - S2 being enabled or not, hence differing by the HCR_EL2.VM bit
582 * - Multiple vcpus using private S2s (huh huh...), hence differing by the
585 * - A combination of the above...
587 * We can always identify which MMU context to pick at run-time. However,
597 write_lock(&kvm->mmu_lock); in kvm_s2_mmu_iterate_by_vmid()
599 for (int i = 0; i < kvm->arch.nested_mmus_size; i++) { in kvm_s2_mmu_iterate_by_vmid()
600 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; in kvm_s2_mmu_iterate_by_vmid()
605 if (vmid == get_vmid(mmu->tlb_vttbr)) in kvm_s2_mmu_iterate_by_vmid()
609 write_unlock(&kvm->mmu_lock); in kvm_s2_mmu_iterate_by_vmid()
614 struct kvm *kvm = vcpu->kvm; in lookup_s2_mmu()
618 lockdep_assert_held_write(&kvm->mmu_lock); in lookup_s2_mmu()
632 * - either S2 is enabled in the guest, and we need a context that is in lookup_s2_mmu()
633 * S2-enabled and matches the full VTTBR (VMID+BADDR) and VTCR, in lookup_s2_mmu()
637 * - or S2 is disabled, and we need a context that is S2-disabled in lookup_s2_mmu()
641 for (int i = 0; i < kvm->arch.nested_mmus_size; i++) { in lookup_s2_mmu()
642 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; in lookup_s2_mmu()
648 mmu->nested_stage2_enabled && in lookup_s2_mmu()
649 vttbr == mmu->tlb_vttbr && in lookup_s2_mmu()
650 vtcr == mmu->tlb_vtcr) in lookup_s2_mmu()
654 !mmu->nested_stage2_enabled && in lookup_s2_mmu()
655 get_vmid(vttbr) == get_vmid(mmu->tlb_vttbr)) in lookup_s2_mmu()
663 struct kvm *kvm = vcpu->kvm; in get_s2_mmu_nested()
667 lockdep_assert_held_write(&vcpu->kvm->mmu_lock); in get_s2_mmu_nested()
678 for (i = kvm->arch.nested_mmus_next; in get_s2_mmu_nested()
679 i < (kvm->arch.nested_mmus_size + kvm->arch.nested_mmus_next); in get_s2_mmu_nested()
681 s2_mmu = &kvm->arch.nested_mmus[i % kvm->arch.nested_mmus_size]; in get_s2_mmu_nested()
683 if (atomic_read(&s2_mmu->refcnt) == 0) in get_s2_mmu_nested()
686 BUG_ON(atomic_read(&s2_mmu->refcnt)); /* We have struct MMUs to spare */ in get_s2_mmu_nested()
689 kvm->arch.nested_mmus_next = (i + 1) % kvm->arch.nested_mmus_size; in get_s2_mmu_nested()
693 s2_mmu->pending_unmap = true; in get_s2_mmu_nested()
703 s2_mmu->tlb_vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2) & ~VTTBR_CNP_BIT; in get_s2_mmu_nested()
704 s2_mmu->tlb_vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2); in get_s2_mmu_nested()
705 s2_mmu->nested_stage2_enabled = vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_VM; in get_s2_mmu_nested()
708 atomic_inc(&s2_mmu->refcnt); in get_s2_mmu_nested()
716 if (s2_mmu->pending_unmap) in get_s2_mmu_nested()
725 mmu->tlb_vttbr = VTTBR_CNP_BIT; in kvm_init_nested_s2_mmu()
726 mmu->nested_stage2_enabled = false; in kvm_init_nested_s2_mmu()
727 atomic_set(&mmu->refcnt, 0); in kvm_init_nested_s2_mmu()
737 if (!vcpu->arch.hw_mmu) in kvm_vcpu_load_hw_mmu()
738 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; in kvm_vcpu_load_hw_mmu()
740 if (!vcpu->arch.hw_mmu) { in kvm_vcpu_load_hw_mmu()
741 scoped_guard(write_lock, &vcpu->kvm->mmu_lock) in kvm_vcpu_load_hw_mmu()
742 vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu); in kvm_vcpu_load_hw_mmu()
754 BUG_ON(vcpu->arch.vncr_tlb->cpu != smp_processor_id()); in kvm_vcpu_put_hw_mmu()
757 clear_fixmap(vncr_fixmap(vcpu->arch.vncr_tlb->cpu)); in kvm_vcpu_put_hw_mmu()
758 vcpu->arch.vncr_tlb->cpu = -1; in kvm_vcpu_put_hw_mmu()
760 atomic_dec(&vcpu->kvm->arch.vncr_map_count); in kvm_vcpu_put_hw_mmu()
764 * Keep a reference on the associated stage-2 MMU if the vCPU is in kvm_vcpu_put_hw_mmu()
768 if (vcpu->scheduled_out && !vcpu_get_flag(vcpu, IN_WFI)) in kvm_vcpu_put_hw_mmu()
771 if (kvm_is_nested_s2_mmu(vcpu->kvm, vcpu->arch.hw_mmu)) in kvm_vcpu_put_hw_mmu()
772 atomic_dec(&vcpu->arch.hw_mmu->refcnt); in kvm_vcpu_put_hw_mmu()
774 vcpu->arch.hw_mmu = NULL; in kvm_vcpu_put_hw_mmu()
778 * Returns non-zero if permission fault is handled by injecting it to the next
785 trans->esr = 0; in kvm_s2_handle_perm_fault()
795 forward_fault = ((write_fault && !trans->writable) || in kvm_s2_handle_perm_fault()
796 (!write_fault && !trans->readable)); in kvm_s2_handle_perm_fault()
800 trans->esr = esr_s2_fault(vcpu, trans->level, ESR_ELx_FSC_PERM); in kvm_s2_handle_perm_fault()
807 vcpu_write_sys_reg(vcpu, vcpu->arch.fault.far_el2, FAR_EL2); in kvm_inject_s2_fault()
808 vcpu_write_sys_reg(vcpu, vcpu->arch.fault.hpfar_el2, HPFAR_EL2); in kvm_inject_s2_fault()
815 vt->valid = false; in invalidate_vncr()
816 if (vt->cpu != -1) in invalidate_vncr()
817 clear_fixmap(vncr_fixmap(vt->cpu)); in invalidate_vncr()
825 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_invalidate_vncr_ipa()
831 struct vncr_tlb *vt = vcpu->arch.vncr_tlb; in kvm_invalidate_vncr_ipa()
835 * Careful here: We end-up here from an MMU notifier, in kvm_invalidate_vncr_ipa()
837 * yet, without the pseudo-TLB being allocated. in kvm_invalidate_vncr_ipa()
845 if (!vt->valid) in kvm_invalidate_vncr_ipa()
848 ipa_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift, in kvm_invalidate_vncr_ipa()
849 vt->wr.level)); in kvm_invalidate_vncr_ipa()
850 ipa_start = vt->wr.pa & ~(ipa_size - 1); in kvm_invalidate_vncr_ipa()
879 lockdep_assert_held_write(&kvm->mmu_lock); in invalidate_vncr_va()
882 struct vncr_tlb *vt = vcpu->arch.vncr_tlb; in invalidate_vncr_va()
885 if (!vt->valid) in invalidate_vncr_va()
888 va_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift, in invalidate_vncr_va()
889 vt->wr.level)); in invalidate_vncr_va()
890 va_start = vt->gva & ~(va_size - 1); in invalidate_vncr_va()
893 switch (scope->type) { in invalidate_vncr_va()
898 if (va_end <= scope->va || in invalidate_vncr_va()
899 va_start >= (scope->va + scope->size)) in invalidate_vncr_va()
901 if (vt->wr.nG && vt->wr.asid != scope->asid) in invalidate_vncr_va()
906 if (va_end <= scope->va || in invalidate_vncr_va()
907 va_start >= (scope->va + scope->size)) in invalidate_vncr_va()
912 if (!vt->wr.nG || vt->wr.asid != scope->asid) in invalidate_vncr_va()
939 scope->type = TLBI_ALL; in compute_s1_tlbi_range()
965 scope->type = TLBI_VA; in compute_s1_tlbi_range()
966 scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val)); in compute_s1_tlbi_range()
967 if (!scope->size) in compute_s1_tlbi_range()
968 scope->size = SZ_1G; in compute_s1_tlbi_range()
969 scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1); in compute_s1_tlbi_range()
970 scope->asid = FIELD_GET(TLBIR_ASID_MASK, val); in compute_s1_tlbi_range()
978 scope->type = TLBI_ASID; in compute_s1_tlbi_range()
979 scope->asid = FIELD_GET(TLBIR_ASID_MASK, val); in compute_s1_tlbi_range()
993 scope->type = TLBI_VAA; in compute_s1_tlbi_range()
994 scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val)); in compute_s1_tlbi_range()
995 if (!scope->size) in compute_s1_tlbi_range()
996 scope->size = SZ_1G; in compute_s1_tlbi_range()
997 scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1); in compute_s1_tlbi_range()
1023 scope->type = TLBI_VA; in compute_s1_tlbi_range()
1024 scope->va = decode_range_tlbi(val, &scope->size, &scope->asid); in compute_s1_tlbi_range()
1038 scope->type = TLBI_VAA; in compute_s1_tlbi_range()
1039 scope->va = decode_range_tlbi(val, &scope->size, NULL); in compute_s1_tlbi_range()
1050 guard(write_lock)(&vcpu->kvm->mmu_lock); in kvm_handle_s1e2_tlbi()
1051 invalidate_vncr_va(vcpu->kvm, &scope); in kvm_handle_s1e2_tlbi()
1058 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_nested_s2_wp()
1060 for (i = 0; i < kvm->arch.nested_mmus_size; i++) { in kvm_nested_s2_wp()
1061 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; in kvm_nested_s2_wp()
1067 kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits)); in kvm_nested_s2_wp()
1074 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_nested_s2_unmap()
1076 for (i = 0; i < kvm->arch.nested_mmus_size; i++) { in kvm_nested_s2_unmap()
1077 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; in kvm_nested_s2_unmap()
1083 kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits)); in kvm_nested_s2_unmap()
1090 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_nested_s2_flush()
1092 for (i = 0; i < kvm->arch.nested_mmus_size; i++) { in kvm_nested_s2_flush()
1093 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; in kvm_nested_s2_flush()
1104 for (i = 0; i < kvm->arch.nested_mmus_size; i++) { in kvm_arch_flush_shadow_all()
1105 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; in kvm_arch_flush_shadow_all()
1107 if (!WARN_ON(atomic_read(&mmu->refcnt))) in kvm_arch_flush_shadow_all()
1110 kvfree(kvm->arch.nested_mmus); in kvm_arch_flush_shadow_all()
1111 kvm->arch.nested_mmus = NULL; in kvm_arch_flush_shadow_all()
1112 kvm->arch.nested_mmus_size = 0; in kvm_arch_flush_shadow_all()
1119 * - We introduce an internal representation of a vcpu-private TLB,
1123 * - On translation fault from a nested VNCR access, we create such a TLB.
1127 * - On vcpu_load() in a non-HYP context with HCR_EL2.NV==1, if the above
1132 * - Note that we usually don't do a vcpu_load() on the back of a fault
1137 * - On vcpu_put() in a non-HYP context with HCR_EL2.NV==1, if the TLB was
1141 * - On permission fault, we simply forward the fault to the guest's EL2.
1144 * - On any TLBI for the EL2&0 translation regime, we must find any TLB that
1146 * from the fixmap. Because we need to look at all the vcpu-private TLBs,
1147 * this requires some wide-ranging locking to ensure that nothing races
1151 * - On MMU notifiers, we must invalidate our TLB in a similar way, but
1153 * stage-2 mapping for this page if L1 hasn't accessed it using LD/ST
1159 if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY)) in kvm_vcpu_allocate_vncr_tlb()
1162 vcpu->arch.vncr_tlb = kzalloc(sizeof(*vcpu->arch.vncr_tlb), in kvm_vcpu_allocate_vncr_tlb()
1164 if (!vcpu->arch.vncr_tlb) in kvm_vcpu_allocate_vncr_tlb()
1165 return -ENOMEM; in kvm_vcpu_allocate_vncr_tlb()
1185 vt = vcpu->arch.vncr_tlb; in kvm_translate_vncr()
1195 scoped_guard(write_lock, &vcpu->kvm->mmu_lock) { in kvm_translate_vncr()
1198 vt->wi = (struct s1_walk_info) { in kvm_translate_vncr()
1203 vt->wr = (struct s1_walk_result){}; in kvm_translate_vncr()
1206 guard(srcu)(&vcpu->kvm->srcu); in kvm_translate_vncr()
1210 ret = __kvm_translate_va(vcpu, &vt->wi, &vt->wr, va); in kvm_translate_vncr()
1216 mmu_seq = vcpu->kvm->mmu_invalidate_seq; in kvm_translate_vncr()
1219 gfn = vt->wr.pa >> PAGE_SHIFT; in kvm_translate_vncr()
1220 memslot = gfn_to_memslot(vcpu->kvm, gfn); in kvm_translate_vncr()
1222 return -EFAULT; in kvm_translate_vncr()
1229 return -EFAULT; in kvm_translate_vncr()
1231 ret = kvm_gmem_get_pfn(vcpu->kvm, memslot, gfn, &pfn, &page, NULL); in kvm_translate_vncr()
1233 kvm_prepare_memory_fault_exit(vcpu, vt->wr.pa, PAGE_SIZE, in kvm_translate_vncr()
1239 scoped_guard(write_lock, &vcpu->kvm->mmu_lock) { in kvm_translate_vncr()
1240 if (mmu_invalidate_retry(vcpu->kvm, mmu_seq)) in kvm_translate_vncr()
1241 return -EAGAIN; in kvm_translate_vncr()
1243 vt->gva = va; in kvm_translate_vncr()
1244 vt->hpa = pfn << PAGE_SHIFT; in kvm_translate_vncr()
1245 vt->valid = true; in kvm_translate_vncr()
1246 vt->cpu = -1; in kvm_translate_vncr()
1249 kvm_release_faultin_page(vcpu->kvm, page, false, vt->wr.pw); in kvm_translate_vncr()
1252 if (vt->wr.pw) in kvm_translate_vncr()
1253 mark_page_dirty(vcpu->kvm, gfn); in kvm_translate_vncr()
1260 struct vncr_tlb *vt = vcpu->arch.vncr_tlb; in inject_vncr_perm()
1266 ESR_ELx_FSC_PERM_L(vt->wr.level)); in inject_vncr_perm()
1273 struct vncr_tlb *vt = vcpu->arch.vncr_tlb; in kvm_vncr_tlb_lookup()
1275 lockdep_assert_held_read(&vcpu->kvm->mmu_lock); in kvm_vncr_tlb_lookup()
1277 if (!vt->valid) in kvm_vncr_tlb_lookup()
1280 if (read_vncr_el2(vcpu) != vt->gva) in kvm_vncr_tlb_lookup()
1283 if (vt->wr.nG) { in kvm_vncr_tlb_lookup()
1291 if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) || in kvm_vncr_tlb_lookup()
1295 return asid == vt->wr.asid; in kvm_vncr_tlb_lookup()
1303 struct vncr_tlb *vt = vcpu->arch.vncr_tlb; in kvm_handle_vncr_abort()
1317 scoped_guard(read_lock, &vcpu->kvm->mmu_lock) in kvm_handle_vncr_abort()
1323 ret = -EPERM; in kvm_handle_vncr_abort()
1326 case -EAGAIN: in kvm_handle_vncr_abort()
1329 case -ENOMEM: in kvm_handle_vncr_abort()
1338 case -EFAULT: in kvm_handle_vncr_abort()
1339 case -EIO: in kvm_handle_vncr_abort()
1340 case -EHWPOISON: in kvm_handle_vncr_abort()
1344 case -EINVAL: in kvm_handle_vncr_abort()
1345 case -ENOENT: in kvm_handle_vncr_abort()
1346 case -EACCES: in kvm_handle_vncr_abort()
1351 BUG_ON(!vt->wr.failed); in kvm_handle_vncr_abort()
1354 esr |= FIELD_PREP(ESR_ELx_FSC, vt->wr.fst); in kvm_handle_vncr_abort()
1358 case -EPERM: in kvm_handle_vncr_abort()
1374 struct vncr_tlb *vt = vcpu->arch.vncr_tlb; in kvm_map_l1_vncr()
1378 guard(read_lock)(&vcpu->kvm->mmu_lock); in kvm_map_l1_vncr()
1388 * Check that the pseudo-TLB is valid and that VNCR_EL2 still in kvm_map_l1_vncr()
1390 * without a mapping -- a transformed MSR/MRS will generate the in kvm_map_l1_vncr()
1391 * fault and allows us to populate the pseudo-TLB. in kvm_map_l1_vncr()
1393 if (!vt->valid) in kvm_map_l1_vncr()
1396 if (read_vncr_el2(vcpu) != vt->gva) in kvm_map_l1_vncr()
1399 if (vt->wr.nG) { in kvm_map_l1_vncr()
1407 if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) || in kvm_map_l1_vncr()
1411 if (asid != vt->wr.asid) in kvm_map_l1_vncr()
1415 vt->cpu = smp_processor_id(); in kvm_map_l1_vncr()
1417 if (vt->wr.pw && vt->wr.pr) in kvm_map_l1_vncr()
1419 else if (vt->wr.pr) in kvm_map_l1_vncr()
1425 * We can't map write-only (or no permission at all) in the kernel, in kvm_map_l1_vncr()
1431 __set_fixmap(vncr_fixmap(vt->cpu), vt->hpa, prot); in kvm_map_l1_vncr()
1433 atomic_inc(&vcpu->kvm->arch.vncr_map_count); in kvm_map_l1_vncr()
1476 /* No RME, AMU, MPAM, or S-EL2 */ in limit_nv_id_reg()
1489 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL3, IMP); in limit_nv_id_reg()
1562 if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features)) in limit_nv_id_reg()
1582 * - FEAT_VHE without FEAT_E2H0 in limit_nv_id_reg()
1583 * - FEAT_NV limited to FEAT_NV2 in limit_nv_id_reg()
1584 * - HCR_EL2.NV1 being RES0 in limit_nv_id_reg()
1588 * - FEAT_E2H0 without FEAT_VHE nor FEAT_NV in limit_nv_id_reg()
1592 if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features)) { in limit_nv_id_reg()
1629 masks = vcpu->kvm->arch.sysreg_masks; in kvm_vcpu_apply_reg_masks()
1632 sr -= __SANITISED_REG_START__; in kvm_vcpu_apply_reg_masks()
1634 v &= ~masks->mask[sr].res0; in kvm_vcpu_apply_reg_masks()
1635 v |= masks->mask[sr].res1; in kvm_vcpu_apply_reg_masks()
1643 int i = sr - __SANITISED_REG_START__; in set_sysreg_masks()
1649 kvm->arch.sysreg_masks->mask[i].res0 = res0; in set_sysreg_masks()
1650 kvm->arch.sysreg_masks->mask[i].res1 = res1; in set_sysreg_masks()
1655 struct kvm *kvm = vcpu->kvm; in kvm_init_nv_sysregs()
1658 lockdep_assert_held(&kvm->arch.config_lock); in kvm_init_nv_sysregs()
1660 if (kvm->arch.sysreg_masks) in kvm_init_nv_sysregs()
1663 kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)), in kvm_init_nv_sysregs()
1665 if (!kvm->arch.sysreg_masks) in kvm_init_nv_sysregs()
1666 return -ENOMEM; in kvm_init_nv_sysregs()
1710 /* HAFGRTR_EL2 - not a lot to see here */ in kvm_init_nv_sysregs()
1785 struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu; in check_nested_vcpu_requests()
1787 write_lock(&vcpu->kvm->mmu_lock); in check_nested_vcpu_requests()
1788 if (mmu->pending_unmap) { in check_nested_vcpu_requests()
1790 mmu->pending_unmap = false; in check_nested_vcpu_requests()
1792 write_unlock(&vcpu->kvm->mmu_lock); in check_nested_vcpu_requests()
1835 * Re-attempt SError injection in case the deliverability has changed, in kvm_nested_sync_hwstate()
1846 * against future changes to the non-nested trap configuration.
1863 vcpu->arch.mdcr_el2 |= (guest_mdcr & NV_MDCR_GUEST_INCLUDE); in kvm_nested_setup_mdcr_el2()
1873 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA; in kvm_nested_setup_mdcr_el2()