Lines Matching +full:spe +full:- +full:pmu

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
20 #include <linux/irqchip/arm-gic-v3.h>
25 #include <asm/debug-monitors.h>
74 "sys_reg read to write-only register"); in read_from_write_only()
82 "sys_reg write to read-only register"); in write_to_read_only()
188 * If we have a non-VHE guest and that the sysreg in vcpu_read_sys_reg()
190 * in-memory copy instead. in vcpu_read_sys_reg()
228 * to reverse-translate virtual EL2 system registers for a in vcpu_write_sys_reg()
229 * non-VHE guest hypervisor. in vcpu_write_sys_reg()
291 * = Log2(bytes) - 2 + 2 in get_min_cache_line_size()
302 if (vcpu->arch.ccsidr) in get_ccsidr()
303 return vcpu->arch.ccsidr[csselr]; in get_ccsidr()
318 * non-aliasing) are 1 set and 1 way. in get_ccsidr()
330 return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4); in get_ccsidr()
336 u32 *ccsidr = vcpu->arch.ccsidr; in set_ccsidr()
341 return -EINVAL; in set_ccsidr()
349 return -ENOMEM; in set_ccsidr()
354 vcpu->arch.ccsidr = ccsidr; in set_ccsidr()
366 if (p->is_write) in access_rw()
367 vcpu_write_sys_reg(vcpu, p->regval, r->reg); in access_rw()
369 p->regval = vcpu_read_sys_reg(vcpu, r->reg); in access_rw()
381 if (!p->is_write) in access_dcsw()
388 * CPU left in the system, and certainly not from non-secure in access_dcsw()
401 if (!kvm_has_mte(vcpu->kvm)) in access_dcgsw()
410 switch (r->aarch32_map) { in get_access_mask()
438 BUG_ON(!p->is_write); in access_vm_reg()
443 val = vcpu_read_sys_reg(vcpu, r->reg); in access_vm_reg()
449 val |= (p->regval & (mask >> shift)) << shift; in access_vm_reg()
450 vcpu_write_sys_reg(vcpu, val, r->reg); in access_vm_reg()
462 if (p->is_write) in access_actlr()
466 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift; in access_actlr()
483 if (!kvm_has_gicv3(vcpu->kvm)) in access_gic_sgi()
486 if (!p->is_write) in access_gic_sgi()
496 if (p->Op0 == 0) { /* AArch32 */ in access_gic_sgi()
497 switch (p->Op1) { in access_gic_sgi()
508 switch (p->Op2) { in access_gic_sgi()
520 vgic_v3_dispatch_sgi(vcpu, p->regval, g1); in access_gic_sgi()
529 if (!kvm_has_gicv3(vcpu->kvm)) in access_gic_sre()
532 if (p->is_write) in access_gic_sre()
535 if (p->Op1 == 4) { /* ICC_SRE_EL2 */ in access_gic_sre()
536 p->regval = (ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE | in access_gic_sre()
539 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; in access_gic_sre()
549 if (p->is_write) in trap_raz_wi()
567 if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) in trap_loregion()
570 if (p->is_write && sr == SYS_LORID_EL1) in trap_loregion()
580 if (!p->is_write) in trap_oslar_el1()
583 kvm_debug_handle_oslar(vcpu, p->regval); in trap_oslar_el1()
591 if (p->is_write) in trap_oslsr_el1()
594 p->regval = __vcpu_sys_reg(vcpu, r->reg); in trap_oslsr_el1()
605 if ((val ^ rd->val) & ~OSLSR_EL1_OSLK) in set_oslsr_el1()
606 return -EINVAL; in set_oslsr_el1()
608 __vcpu_assign_sys_reg(vcpu, rd->reg, val); in set_oslsr_el1()
616 if (p->is_write) { in trap_dbgauthstatus_el1()
619 p->regval = read_sysreg(dbgauthstatus_el1); in trap_dbgauthstatus_el1()
651 val |= (p->regval & (mask >> shift)) << shift; in reg_to_dbg()
663 p->regval = (*dbg_reg & mask) >> shift; in dbg_to_reg()
668 struct kvm_guest_debug_arch *dbg = &vcpu->arch.vcpu_debug_state; in demux_wb_reg()
670 switch (rd->Op2) { in demux_wb_reg()
672 return &dbg->dbg_bvr[rd->CRm]; in demux_wb_reg()
674 return &dbg->dbg_bcr[rd->CRm]; in demux_wb_reg()
676 return &dbg->dbg_wvr[rd->CRm]; in demux_wb_reg()
678 return &dbg->dbg_wcr[rd->CRm]; in demux_wb_reg()
680 KVM_BUG_ON(1, vcpu->kvm); in demux_wb_reg()
693 if (p->is_write) in trap_dbg_wb_reg()
708 return -EINVAL; in set_dbg_wb_reg()
720 return -EINVAL; in get_dbg_wb_reg()
738 *reg = rd->val; in reset_dbg_wb_reg()
739 return rd->val; in reset_dbg_wb_reg()
767 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); in reset_mpidr()
768 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); in reset_mpidr()
769 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); in reset_mpidr()
788 u8 n = vcpu->kvm->arch.nr_pmu_counters; in reset_pmu_reg()
791 mask |= GENMASK(n - 1, 0); in reset_pmu_reg()
794 __vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask); in reset_pmu_reg()
796 return __vcpu_sys_reg(vcpu, r->reg); in reset_pmu_reg()
802 __vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0)); in reset_pmevcntr()
804 return __vcpu_sys_reg(vcpu, r->reg); in reset_pmevcntr()
814 __vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm)); in reset_pmevtyper()
816 return __vcpu_sys_reg(vcpu, r->reg); in reset_pmevtyper()
822 __vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK); in reset_pmselr()
824 return __vcpu_sys_reg(vcpu, r->reg); in reset_pmselr()
838 __vcpu_assign_sys_reg(vcpu, r->reg, pmcr); in reset_pmcr()
840 return __vcpu_sys_reg(vcpu, r->reg); in reset_pmcr()
882 if (p->is_write) { in access_pmcr()
889 val |= p->regval & ARMV8_PMU_PMCR_MASK; in access_pmcr()
897 p->regval = val; in access_pmcr()
909 if (p->is_write) in access_pmselr()
910 __vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval); in access_pmselr()
913 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0) in access_pmselr()
924 BUG_ON(p->is_write); in access_pmceid()
931 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1)); in access_pmceid()
935 p->regval = pmceid; in access_pmceid()
959 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0) in get_pmu_evcntr()
964 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); in get_pmu_evcntr()
975 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0) in set_pmu_evcntr()
980 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); in set_pmu_evcntr()
992 if (r->CRn == 9 && r->CRm == 13) { in access_pmu_evcntr()
993 if (r->Op2 == 2) { in access_pmu_evcntr()
1000 } else if (r->Op2 == 0) { in access_pmu_evcntr()
1007 } else if (r->CRn == 0 && r->CRm == 9) { in access_pmu_evcntr()
1013 } else if (r->CRn == 14 && (r->CRm & 12) == 8) { in access_pmu_evcntr()
1018 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); in access_pmu_evcntr()
1027 if (p->is_write) { in access_pmu_evcntr()
1031 kvm_pmu_set_counter_value(vcpu, idx, p->regval); in access_pmu_evcntr()
1033 p->regval = kvm_pmu_get_counter_value(vcpu, idx); in access_pmu_evcntr()
1047 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { in access_pmu_evtyper()
1051 } else if (r->CRn == 14 && (r->CRm & 12) == 12) { in access_pmu_evtyper()
1052 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); in access_pmu_evtyper()
1065 if (p->is_write) { in access_pmu_evtyper()
1066 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); in access_pmu_evtyper()
1069 p->regval = __vcpu_sys_reg(vcpu, reg); in access_pmu_evtyper()
1079 __vcpu_assign_sys_reg(vcpu, r->reg, val & mask); in set_pmreg()
1089 *val = __vcpu_sys_reg(vcpu, r->reg) & mask; in get_pmreg()
1102 if (p->is_write) { in access_pmcnten()
1103 val = p->regval & mask; in access_pmcnten()
1104 if (r->Op2 & 0x1) in access_pmcnten()
1113 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); in access_pmcnten()
1127 if (p->is_write) { in access_pminten()
1128 u64 val = p->regval & mask; in access_pminten()
1130 if (r->Op2 & 0x1) in access_pminten()
1137 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1); in access_pminten()
1151 if (p->is_write) { in access_pmovs()
1152 if (r->CRm & 0x2) in access_pmovs()
1154 __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask)); in access_pmovs()
1157 __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask)); in access_pmovs()
1159 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); in access_pmovs()
1170 if (!p->is_write) in access_pmswinc()
1177 kvm_pmu_software_increment(vcpu, p->regval & mask); in access_pmswinc()
1184 if (p->is_write) { in access_pmuserenr()
1189 (p->regval & ARMV8_PMU_USERENR_MASK)); in access_pmuserenr()
1191 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0) in access_pmuserenr()
1209 struct kvm *kvm = vcpu->kvm; in set_pmcr()
1211 mutex_lock(&kvm->arch.config_lock); in set_pmcr()
1214 * The vCPU can't have more counters than the PMU hardware in set_pmcr()
1221 kvm->arch.nr_pmu_counters = new_n; in set_pmcr()
1223 mutex_unlock(&kvm->arch.config_lock); in set_pmcr()
1240 __vcpu_assign_sys_reg(vcpu, r->reg, val); in set_pmcr()
1459 if (p->is_write) in access_arch_timer()
1460 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval); in access_arch_timer()
1462 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg); in access_arch_timer()
1504 * arm64_check_features() - Check if a feature register value constitutes
1522 u64 writable_mask = rd->val; in arm64_check_features()
1523 u64 limit = rd->reset(vcpu, rd); in arm64_check_features()
1532 return val ? -E2BIG : 0; in arm64_check_features()
1536 return -EINVAL; in arm64_check_features()
1538 ftrp = ftr_reg->ftr_bits; in arm64_check_features()
1540 for (; ftrp && ftrp->width; ftrp++) { in arm64_check_features()
1558 return -E2BIG; in arm64_check_features()
1563 return -E2BIG; in arm64_check_features()
1604 if (!kvm_has_mte(vcpu->kvm)) { in __kvm_read_sanitised_id_reg()
1655 val = limit_nv_id_reg(vcpu->kvm, id, val); in __kvm_read_sanitised_id_reg()
1668 return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r)); in read_id_reg()
1682 * registers KVM maintains on a per-VM basis.
1685 * per-VM registers.
1756 if (p->is_write) in access_id_reg()
1759 p->regval = read_id_reg(vcpu, r); in access_id_reg()
1764 /* Visibility overrides for SVE-specific control registers */
1777 if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP)) in sme_visibility()
1786 if (kvm_has_fpmr(vcpu->kvm)) in fp8_visibility()
1799 * Although this is a per-CPU feature, we make it global because in sanitise_id_aa64pfr0_el1()
1836 * Only initialize the PMU version if the vCPU was configured with one. in sanitise_id_aa64dfr0_el1()
1843 /* Hide SPE from guests */ in sanitise_id_aa64dfr0_el1()
1860 * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the in set_id_aa64dfr0_el1()
1862 * exposed an IMP_DEF PMU to userspace and the guest on systems w/ in set_id_aa64dfr0_el1()
1863 * non-architectural PMUs. Of course, PMUv3 is the only game in town for in set_id_aa64dfr0_el1()
1864 * PMU virtualization, so the IMP_DEF value was rather user-hostile. in set_id_aa64dfr0_el1()
1870 * surprising than an ill-guided PMU driver poking at impdef system in set_id_aa64dfr0_el1()
1881 return -EINVAL; in set_id_aa64dfr0_el1()
1922 return -EINVAL; in set_id_dfr0_el1()
1925 return -EINVAL; in set_id_dfr0_el1()
1943 * But KVM must also accept values from user-space that were provided in set_id_aa64pfr0_el1()
1944 * by KVM. On CPUs that support MPAM, permit user-space to write in set_id_aa64pfr0_el1()
1954 return -EINVAL; in set_id_aa64pfr0_el1()
1978 * As KVM must accept values from KVM provided by user-space, in set_id_aa64pfr1_el1()
1979 * when ID_AA64PFR1_EL1.MTE is 2 allow user-space to set in set_id_aa64pfr1_el1()
2005 return -EINVAL; in set_id_aa64mmfr0_el1()
2045 return -EINVAL; in set_ctr_el0()
2050 return -ENOENT; in set_ctr_el0()
2068 if (kvm_vm_has_ran_once(vcpu->kvm)) { in get_id_reg()
2073 mutex_lock(&vcpu->kvm->arch.config_lock); in get_id_reg()
2075 mutex_unlock(&vcpu->kvm->arch.config_lock); in get_id_reg()
2086 mutex_lock(&vcpu->kvm->arch.config_lock); in set_id_reg()
2092 if (kvm_vm_has_ran_once(vcpu->kvm)) { in set_id_reg()
2094 ret = -EBUSY; in set_id_reg()
2098 mutex_unlock(&vcpu->kvm->arch.config_lock); in set_id_reg()
2104 kvm_set_vm_id_reg(vcpu->kvm, id, val); in set_id_reg()
2106 mutex_unlock(&vcpu->kvm->arch.config_lock); in set_id_reg()
2109 * arm64_check_features() returns -E2BIG to indicate the register's in set_id_reg()
2110 * feature set is a superset of the maximally-allowed register value. in set_id_reg()
2113 * writes return -EINVAL. in set_id_reg()
2115 if (ret == -E2BIG) in set_id_reg()
2116 ret = -EINVAL; in set_id_reg()
2122 u64 *p = __vm_id_reg(&kvm->arch, reg); in kvm_set_vm_id_reg()
2124 lockdep_assert_held(&kvm->arch.config_lock); in kvm_set_vm_id_reg()
2148 if (p->is_write) in access_ctr()
2151 p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0); in access_ctr()
2158 if (p->is_write) in access_clidr()
2161 p->regval = __vcpu_sys_reg(vcpu, r->reg); in access_clidr()
2213 if (kvm_has_mte(vcpu->kvm)) in reset_clidr()
2216 __vcpu_assign_sys_reg(vcpu, r->reg, clidr); in reset_clidr()
2218 return __vcpu_sys_reg(vcpu, r->reg); in reset_clidr()
2228 return -EINVAL; in set_clidr()
2230 __vcpu_assign_sys_reg(vcpu, rd->reg, val); in set_clidr()
2238 int reg = r->reg; in access_csselr()
2240 if (p->is_write) in access_csselr()
2241 vcpu_write_sys_reg(vcpu, p->regval, reg); in access_csselr()
2243 p->regval = vcpu_read_sys_reg(vcpu, reg); in access_csselr()
2252 if (p->is_write) in access_ccsidr()
2258 p->regval = get_ccsidr(vcpu, csselr); in access_ccsidr()
2266 if (kvm_has_mte(vcpu->kvm)) in mte_visibility()
2299 "trap of VNCR-backed register"); in bad_vncr_trap()
2406 if (p->is_write) in access_sp_el1()
2407 __vcpu_assign_sys_reg(vcpu, SP_EL1, p->regval); in access_sp_el1()
2409 p->regval = __vcpu_sys_reg(vcpu, SP_EL1); in access_sp_el1()
2418 if (p->is_write) in access_elr()
2419 vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1); in access_elr()
2421 p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1); in access_elr()
2430 if (p->is_write) in access_spsr()
2431 __vcpu_assign_sys_reg(vcpu, SPSR_EL1, p->regval); in access_spsr()
2433 p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1); in access_spsr()
2442 if (p->is_write) in access_cntkctl_el12()
2443 __vcpu_assign_sys_reg(vcpu, CNTKCTL_EL1, p->regval); in access_cntkctl_el12()
2445 p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1); in access_cntkctl_el12()
2452 u64 val = r->val; in reset_hcr()
2457 __vcpu_assign_sys_reg(vcpu, r->reg, val); in reset_hcr()
2459 return __vcpu_sys_reg(vcpu, r->reg); in reset_hcr()
2480 kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY)) in vncr_el2_visibility()
2497 if (!p->is_write) { in access_zcr_el2()
2498 p->regval = vcpu_read_sys_reg(vcpu, ZCR_EL2); in access_zcr_el2()
2502 vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1; in access_zcr_el2()
2504 vcpu_write_sys_reg(vcpu, vq - 1, ZCR_EL2); in access_zcr_el2()
2513 if (p->is_write) in access_gic_vtr()
2516 p->regval = kvm_vgic_global_state.ich_vtr_el2; in access_gic_vtr()
2517 p->regval &= ~(ICH_VTR_EL2_DVIM | in access_gic_vtr()
2520 p->regval |= ICH_VTR_EL2_nV4; in access_gic_vtr()
2529 if (p->is_write) in access_gic_misr()
2532 p->regval = vgic_v3_get_misr(vcpu); in access_gic_misr()
2541 if (p->is_write) in access_gic_eisr()
2544 p->regval = vgic_v3_get_eisr(vcpu); in access_gic_eisr()
2553 if (p->is_write) in access_gic_elrsr()
2556 p->regval = vgic_v3_get_elrsr(vcpu); in access_gic_elrsr()
2564 if (kvm_has_s1poe(vcpu->kvm)) in s1poe_visibility()
2579 if (kvm_has_tcr2(vcpu->kvm)) in tcr2_visibility()
2594 if (kvm_has_s1pie(vcpu->kvm)) in s1pie_visibility()
2612 if (!p->is_write) { in access_mdcr()
2613 p->regval = old; in access_mdcr()
2617 val = p->regval; in access_mdcr()
2625 if (hpmn > vcpu->kvm->arch.nr_pmu_counters) { in access_mdcr()
2626 hpmn = vcpu->kvm->arch.nr_pmu_counters; in access_mdcr()
2633 * Request a reload of the PMU to enable/disable the counters in access_mdcr()
2649 * trapped, allowing the guest to read the actual hardware value. On big-little
2661 if (p->is_write) in access_imp_id_reg()
2665 * Return the VM-scoped implementation ID register values if userspace in access_imp_id_reg()
2668 if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &vcpu->kvm->arch.flags)) in access_imp_id_reg()
2677 p->regval = read_sysreg(revidr_el1); in access_imp_id_reg()
2680 p->regval = read_sysreg(aidr_el1); in access_imp_id_reg()
2710 KVM_BUG_ON(1, vcpu->kvm); in reset_imp_id_reg()
2718 struct kvm *kvm = vcpu->kvm; in set_imp_id_reg()
2721 guard(mutex)(&kvm->arch.config_lock); in set_imp_id_reg()
2727 if (!test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags)) in set_imp_id_reg()
2728 return -EINVAL; in set_imp_id_reg()
2735 return -EBUSY; in set_imp_id_reg()
2741 if ((val & r->val) != val) in set_imp_id_reg()
2742 return -EINVAL; in set_imp_id_reg()
2759 __vcpu_assign_sys_reg(vcpu, r->reg, vcpu->kvm->arch.nr_pmu_counters); in reset_mdcr()
2760 return vcpu->kvm->arch.nr_pmu_counters; in reset_mdcr()
2890 * Prior to FEAT_Debugv8.9, the architecture defines context-aware
3440 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_at_s1e01()
3442 __kvm_at_s1e01(vcpu, op, p->regval); in handle_at_s1e01()
3450 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_at_s1e2()
3452 /* There is no FGT associated with AT S1E2A :-( */ in handle_at_s1e2()
3454 !kvm_has_feat(vcpu->kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) { in handle_at_s1e2()
3459 __kvm_at_s1e2(vcpu, op, p->regval); in handle_at_s1e2()
3467 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_at_s12()
3469 __kvm_at_s12(vcpu, op, p->regval); in handle_at_s12()
3476 struct kvm *kvm = vpcu->kvm; in kvm_supported_tlbi_s12_op()
3493 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_alle1is()
3498 write_lock(&vcpu->kvm->mmu_lock); in handle_alle1is()
3504 kvm_nested_s2_unmap(vcpu->kvm, true); in handle_alle1is()
3506 write_unlock(&vcpu->kvm->mmu_lock); in handle_alle1is()
3513 struct kvm *kvm = vpcu->kvm; in kvm_supported_tlbi_ipas2_op()
3572 * at worst may cause more aborts for shadow stage-2 fills. in s2_mmu_unmap_range()
3574 * Dropping the MMU lock also implies that shadow stage-2 fills could in s2_mmu_unmap_range()
3576 * the L1 needs to put its stage-2 in a consistent state before doing in s2_mmu_unmap_range()
3579 kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true); in s2_mmu_unmap_range()
3585 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_vmalls12e1is()
3592 limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm)); in handle_vmalls12e1is()
3594 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), in handle_vmalls12e1is()
3609 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_ripas2e1is()
3621 base = decode_range_tlbi(p->regval, &range, NULL); in handle_ripas2e1is()
3623 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), in handle_ripas2e1is()
3644 * - NS bit: we're non-secure only. in s2_mmu_unmap_ipa()
3646 * - IPA[51:48]: We don't support 52bit IPA just yet... in s2_mmu_unmap_ipa()
3650 base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12; in s2_mmu_unmap_ipa()
3651 max_size = compute_tlb_inval_range(mmu, info->ipa.addr); in s2_mmu_unmap_ipa()
3652 base_addr &= ~(max_size - 1); in s2_mmu_unmap_ipa()
3664 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_ipas2e1is()
3670 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), in handle_ipas2e1is()
3673 .addr = p->regval, in handle_ipas2e1is()
3684 WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding)); in s2_mmu_tlbi_s1e1()
3690 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_tlbi_el2()
3695 kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval); in handle_tlbi_el2()
3702 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); in handle_tlbi_el1()
3710 * - HCR_EL2.E2H == 0 : a non-VHE guest in handle_tlbi_el1()
3711 * - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode in handle_tlbi_el1()
3716 * CPU TLBs (such as the VNCR pseudo-TLB and its EL2 mapping). In in handle_tlbi_el1()
3718 * as we don't allow an NV-capable L1 in a nVHE configuration. in handle_tlbi_el1()
3730 kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval); in handle_tlbi_el1()
3734 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, in handle_tlbi_el1()
3738 .addr = p->regval, in handle_tlbi_el1()
3936 if (p->is_write) { in trap_dbgdidr()
3939 u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1); in trap_dbgdidr()
3940 u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP); in trap_dbgdidr()
3942 p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) | in trap_dbgdidr()
4128 /* PMU */
4289 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) { in check_sysreg_table()
4290 kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n", in check_sysreg_table()
4291 &table[i], i, table[i - 1].name, table[i].name); in check_sysreg_table()
4322 BUG_ON(!r->access); in perform_access()
4325 if (likely(r->access(vcpu, params, r))) in perform_access()
4330 * emulate_cp -- tries to match a sys_reg access in a handling table, and
4364 int cp = -1; in unhandled_cp_access()
4386 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
4409 * Make a 64-bit value out of Rt and Rt2. As we use the same trap in kvm_handle_cp_64()
4448 params->is_write = ((esr & 1) == 0); in kvm_esr_cp10_id_to_sys64()
4449 params->Op0 = 3; in kvm_esr_cp10_id_to_sys64()
4450 params->Op1 = 0; in kvm_esr_cp10_id_to_sys64()
4451 params->CRn = 0; in kvm_esr_cp10_id_to_sys64()
4452 params->CRm = 3; in kvm_esr_cp10_id_to_sys64()
4454 /* CP10 ID registers are read-only */ in kvm_esr_cp10_id_to_sys64()
4455 valid = !params->is_write; in kvm_esr_cp10_id_to_sys64()
4460 params->Op2 = 0; in kvm_esr_cp10_id_to_sys64()
4464 params->Op2 = 1; in kvm_esr_cp10_id_to_sys64()
4468 params->Op2 = 2; in kvm_esr_cp10_id_to_sys64()
4478 params->is_write ? "write" : "read", reg_id); in kvm_esr_cp10_id_to_sys64()
4483 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
4487 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
4510 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
4521 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
4532 if (params->is_write) { in kvm_emulate_cp15_id_reg()
4537 params->Op0 = 3; in kvm_emulate_cp15_id_reg()
4544 if (params->CRm > 3) in kvm_emulate_cp15_id_reg()
4545 params->regval = 0; in kvm_emulate_cp15_id_reg()
4549 vcpu_set_reg(vcpu, Rt, params->regval); in kvm_emulate_cp15_id_reg()
4554 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
4567 params->regval = vcpu_get_reg(vcpu, Rt); in kvm_handle_cp_32()
4570 if (!params->is_write) in kvm_handle_cp_32()
4571 vcpu_set_reg(vcpu, Rt, params->regval); in kvm_handle_cp_32()
4621 * emulate_sys_reg - Emulate a guest access to an AArch64 system register
4667 struct kvm *kvm = s->private; in idregs_debug_start()
4670 mutex_lock(&kvm->arch.config_lock); in idregs_debug_start()
4672 iter = &kvm->arch.idreg_debugfs_iter; in idregs_debug_start()
4673 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) && in idregs_debug_start()
4679 iter = ERR_PTR(-EBUSY); in idregs_debug_start()
4682 mutex_unlock(&kvm->arch.config_lock); in idregs_debug_start()
4689 struct kvm *kvm = s->private; in idregs_debug_next()
4693 if (idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter + 1)) { in idregs_debug_next()
4694 kvm->arch.idreg_debugfs_iter++; in idregs_debug_next()
4696 return &kvm->arch.idreg_debugfs_iter; in idregs_debug_next()
4704 struct kvm *kvm = s->private; in idregs_debug_stop()
4709 mutex_lock(&kvm->arch.config_lock); in idregs_debug_stop()
4711 kvm->arch.idreg_debugfs_iter = ~0; in idregs_debug_stop()
4713 mutex_unlock(&kvm->arch.config_lock); in idregs_debug_stop()
4719 struct kvm *kvm = s->private; in idregs_debug_show()
4721 desc = idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter); in idregs_debug_show()
4723 if (!desc->name) in idregs_debug_show()
4727 desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc))); in idregs_debug_show()
4743 kvm->arch.idreg_debugfs_iter = ~0; in kvm_sys_regs_create_debugfs()
4745 debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm, in kvm_sys_regs_create_debugfs()
4752 struct kvm *kvm = vcpu->kvm; in reset_vm_ftr_id_reg()
4754 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)) in reset_vm_ftr_id_reg()
4757 kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg)); in reset_vm_ftr_id_reg()
4766 reg->reset(vcpu, reg); in reset_vcpu_ftr_id_reg()
4770 * kvm_reset_sys_regs - sets system registers to reset value
4778 struct kvm *kvm = vcpu->kvm; in kvm_reset_sys_regs()
4784 if (!r->reset) in kvm_reset_sys_regs()
4792 r->reset(vcpu, r); in kvm_reset_sys_regs()
4794 if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS) in kvm_reset_sys_regs()
4795 __vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0); in kvm_reset_sys_regs()
4798 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags); in kvm_reset_sys_regs()
4805 * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
4858 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) in index_to_params()
4860 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) in index_to_params()
4862 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) in index_to_params()
4864 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) in index_to_params()
4866 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) in index_to_params()
4901 if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r))) in id_to_sys_reg_desc()
4914 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) in demux_c15_get()
4915 return -ENOENT; in demux_c15_get()
4920 return -ENOENT; in demux_c15_get()
4924 return -ENOENT; in demux_c15_get()
4928 return -ENOENT; in demux_c15_get()
4939 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) in demux_c15_set()
4940 return -ENOENT; in demux_c15_set()
4945 return -ENOENT; in demux_c15_set()
4949 return -ENOENT; in demux_c15_set()
4952 return -EFAULT; in demux_c15_set()
4956 return -ENOENT; in demux_c15_set()
4963 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; in kvm_sys_reg_get_user()
4968 r = id_to_sys_reg_desc(vcpu, reg->id, table, num); in kvm_sys_reg_get_user()
4970 return -ENOENT; in kvm_sys_reg_get_user()
4972 if (r->get_user) { in kvm_sys_reg_get_user()
4973 ret = (r->get_user)(vcpu, r, &val); in kvm_sys_reg_get_user()
4975 val = __vcpu_sys_reg(vcpu, r->reg); in kvm_sys_reg_get_user()
4987 void __user *uaddr = (void __user *)(unsigned long)reg->addr; in kvm_arm_sys_reg_get_reg()
4989 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) in kvm_arm_sys_reg_get_reg()
4990 return demux_c15_get(vcpu, reg->id, uaddr); in kvm_arm_sys_reg_get_reg()
4999 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; in kvm_sys_reg_set_user()
5005 return -EFAULT; in kvm_sys_reg_set_user()
5007 r = id_to_sys_reg_desc(vcpu, reg->id, table, num); in kvm_sys_reg_set_user()
5009 return -ENOENT; in kvm_sys_reg_set_user()
5014 if (r->set_user) { in kvm_sys_reg_set_user()
5015 ret = (r->set_user)(vcpu, r, val); in kvm_sys_reg_set_user()
5017 __vcpu_assign_sys_reg(vcpu, r->reg, val); in kvm_sys_reg_set_user()
5026 void __user *uaddr = (void __user *)(unsigned long)reg->addr; in kvm_arm_sys_reg_set_reg()
5028 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) in kvm_arm_sys_reg_set_reg()
5029 return demux_c15_set(vcpu, reg->id, uaddr); in kvm_arm_sys_reg_set_reg()
5048 return -EFAULT; in write_demux_regids()
5058 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | in sys_reg_to_index()
5059 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | in sys_reg_to_index()
5060 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | in sys_reg_to_index()
5061 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | in sys_reg_to_index()
5062 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); in sys_reg_to_index()
5086 if (!(rd->reg || rd->get_user)) in walk_one_sys_reg()
5093 return -EFAULT; in walk_one_sys_reg()
5145 u64 __user *masks = (u64 __user *)range->addr; in kvm_vm_ioctl_get_reg_writable_masks()
5148 if (range->range || in kvm_vm_ioctl_get_reg_writable_masks()
5149 memcmp(range->reserved, zero_page, sizeof(range->reserved))) in kvm_vm_ioctl_get_reg_writable_masks()
5150 return -EINVAL; in kvm_vm_ioctl_get_reg_writable_masks()
5154 return -EFAULT; in kvm_vm_ioctl_get_reg_writable_masks()
5161 if (!is_feature_id_reg(encoding) || !reg->set_user) in kvm_vm_ioctl_get_reg_writable_masks()
5164 if (!reg->val || in kvm_vm_ioctl_get_reg_writable_masks()
5168 val = reg->val; in kvm_vm_ioctl_get_reg_writable_masks()
5171 return -EFAULT; in kvm_vm_ioctl_get_reg_writable_masks()
5179 struct kvm *kvm = vcpu->kvm; in vcpu_set_hcr()
5182 vcpu->arch.hcr_el2 |= HCR_E2H; in vcpu_set_hcr()
5185 vcpu->arch.hcr_el2 |= HCR_TEA; in vcpu_set_hcr()
5187 vcpu->arch.hcr_el2 |= HCR_TERR; in vcpu_set_hcr()
5191 vcpu->arch.hcr_el2 |= HCR_FWB; in vcpu_set_hcr()
5196 vcpu->arch.hcr_el2 |= HCR_TID4; in vcpu_set_hcr()
5198 vcpu->arch.hcr_el2 |= HCR_TID2; in vcpu_set_hcr()
5201 vcpu->arch.hcr_el2 &= ~HCR_RW; in vcpu_set_hcr()
5203 if (kvm_has_mte(vcpu->kvm)) in vcpu_set_hcr()
5204 vcpu->arch.hcr_el2 |= HCR_ATA; in vcpu_set_hcr()
5212 vcpu->arch.hcr_el2 |= HCR_TTLBOS; in vcpu_set_hcr()
5217 struct kvm *kvm = vcpu->kvm; in kvm_calculate_traps()
5219 mutex_lock(&kvm->arch.config_lock); in kvm_calculate_traps()
5224 if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags)) in kvm_calculate_traps()
5235 set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags); in kvm_calculate_traps()
5237 mutex_unlock(&kvm->arch.config_lock); in kvm_calculate_traps()
5250 struct kvm *kvm = vcpu->kvm; in kvm_finalize_sys_regs()
5252 guard(mutex)(&kvm->arch.config_lock); in kvm_finalize_sys_regs()
5256 kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)) { in kvm_finalize_sys_regs()
5257 kvm->arch.id_regs[IDREG_IDX(SYS_ID_AA64PFR0_EL1)] &= ~ID_AA64PFR0_EL1_GIC_MASK; in kvm_finalize_sys_regs()
5258 kvm->arch.id_regs[IDREG_IDX(SYS_ID_PFR1_EL1)] &= ~ID_PFR1_EL1_GIC_MASK; in kvm_finalize_sys_regs()
5285 return -EINVAL; in kvm_sys_reg_table_init()