Lines Matching +full:el3 +full:-

1 // SPDX-License-Identifier: GPL-2.0-only
35 return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]); in kvm_pmc_to_vcpu()
40 return &vcpu->arch.pmu.pmc[cnt_idx]; in kvm_vcpu_idx_to_pmc()
75 if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP)) in kvm_pmu_evtyper_mask()
84 * kvm_pmc_is_64bit - determine if counter is 64bit
91 return (pmc->idx == ARMV8_PMU_CYCLE_IDX || in kvm_pmc_is_64bit()
92 kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5)); in kvm_pmc_is_64bit()
100 if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx)) in kvm_pmc_has_64bit_overflow()
103 return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) || in kvm_pmc_has_64bit_overflow()
104 (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC)); in kvm_pmc_has_64bit_overflow()
109 return (!(pmc->idx & 1) && (pmc->idx + 1) < ARMV8_PMU_CYCLE_IDX && in kvm_pmu_counter_can_chain()
125 return __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), counter_index_to_evtreg(pmc->idx)); in kvm_pmc_read_evtreg()
133 reg = counter_index_to_reg(pmc->idx); in kvm_pmu_get_pmc_value()
140 if (pmc->perf_event) in kvm_pmu_get_pmc_value()
141 counter += perf_event_read_value(pmc->perf_event, &enabled, in kvm_pmu_get_pmc_value()
151 * kvm_pmu_get_counter_value - get PMU counter value
167 reg = counter_index_to_reg(pmc->idx); in kvm_pmu_set_pmc_value()
169 if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX && in kvm_pmu_set_pmc_value()
188 * kvm_pmu_set_counter_value - set PMU counter value
199 * kvm_pmu_set_counter_value_user - set PMU counter value from user
212 * kvm_pmu_release_perf_event - remove the perf event
217 if (pmc->perf_event) { in kvm_pmu_release_perf_event()
218 perf_event_disable(pmc->perf_event); in kvm_pmu_release_perf_event()
219 perf_event_release_kernel(pmc->perf_event); in kvm_pmu_release_perf_event()
220 pmc->perf_event = NULL; in kvm_pmu_release_perf_event()
225 * kvm_pmu_stop_counter - stop PMU counter
235 if (!pmc->perf_event) in kvm_pmu_stop_counter()
240 reg = counter_index_to_reg(pmc->idx); in kvm_pmu_stop_counter()
248 * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
255 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_init()
258 pmu->pmc[i].idx = i; in kvm_pmu_vcpu_init()
262 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
272 irq_work_sync(&vcpu->arch.pmu.overflow_work); in kvm_pmu_vcpu_destroy()
283 n = vcpu->kvm->arch.nr_pmu_counters; in kvm_pmu_hyp_counter_mask()
298 * range reserved for EL2/EL3. in kvm_pmu_hyp_counter_mask()
300 return GENMASK(n - 1, hpmn); in kvm_pmu_hyp_counter_mask()
325 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX); in kvm_pmu_implemented_counter_mask()
330 if (!pmc->perf_event) { in kvm_pmc_enable_perf_event()
335 perf_event_enable(pmc->perf_event); in kvm_pmc_enable_perf_event()
336 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) in kvm_pmc_enable_perf_event()
342 if (pmc->perf_event) in kvm_pmc_disable_perf_event()
343 perf_event_disable(pmc->perf_event); in kvm_pmc_disable_perf_event()
398 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_update_state()
402 if (pmu->irq_level == overflow) in kvm_pmu_update_state()
405 pmu->irq_level = overflow; in kvm_pmu_update_state()
407 if (likely(irqchip_in_kernel(vcpu->kvm))) { in kvm_pmu_update_state()
408 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu, in kvm_pmu_update_state()
409 pmu->irq_num, overflow, pmu); in kvm_pmu_update_state()
416 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_should_notify_user()
417 struct kvm_sync_regs *sregs = &vcpu->run->s.regs; in kvm_pmu_should_notify_user()
418 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU; in kvm_pmu_should_notify_user()
420 if (likely(irqchip_in_kernel(vcpu->kvm))) in kvm_pmu_should_notify_user()
423 return pmu->irq_level != run_level; in kvm_pmu_should_notify_user()
431 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_pmu_update_run()
434 regs->device_irq_level &= ~KVM_ARM_DEV_PMU; in kvm_pmu_update_run()
435 if (vcpu->arch.pmu.irq_level) in kvm_pmu_update_run()
436 regs->device_irq_level |= KVM_ARM_DEV_PMU; in kvm_pmu_update_run()
440 * kvm_pmu_flush_hwstate - flush pmu state to cpu
452 * kvm_pmu_sync_hwstate - sync pmu state from cpu
498 type &= kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_counter_increment()
527 val = (-counter) & GENMASK(63, 0); in compute_period()
529 val = (-counter) & GENMASK(31, 0); in compute_period()
541 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_pmu_perf_overflow()
542 struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu); in kvm_pmu_perf_overflow()
544 int idx = pmc->idx; in kvm_pmu_perf_overflow()
547 cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE); in kvm_pmu_perf_overflow()
553 period = compute_period(pmc, local64_read(&perf_event->count)); in kvm_pmu_perf_overflow()
555 local64_set(&perf_event->hw.period_left, 0); in kvm_pmu_perf_overflow()
556 perf_event->attr.sample_period = period; in kvm_pmu_perf_overflow()
557 perf_event->hw.sample_period = period; in kvm_pmu_perf_overflow()
571 irq_work_queue(&vcpu->arch.pmu.overflow_work); in kvm_pmu_perf_overflow()
574 cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD); in kvm_pmu_perf_overflow()
578 * kvm_pmu_software_increment - do software increment
588 * kvm_pmu_handle_pmcr - handle PMCR register
597 if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5)) in kvm_pmu_handle_pmcr()
627 if (!(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx))) in kvm_pmu_counter_is_enabled()
630 if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx)) in kvm_pmu_counter_is_enabled()
659 if (!kvm_pmu_counter_is_hyp(vcpu, pmc->idx) && (mdcr & MDCR_EL2_HPMD)) in kvm_pmc_counts_at_el2()
667 struct arm_pmu *pmu = kvm->arch.arm_pmu; in kvm_map_pmu_event()
673 if (unlikely(pmu->map_pmuv3_event)) in kvm_map_pmu_event()
674 return pmu->map_pmuv3_event(eventsel); in kvm_map_pmu_event()
680 * kvm_pmu_create_perf_event - create a perf event for a counter
686 struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu; in kvm_pmu_create_perf_event()
695 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) in kvm_pmu_create_perf_event()
698 eventsel = evtreg & kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_create_perf_event()
712 if (vcpu->kvm->arch.pmu_filter && in kvm_pmu_create_perf_event()
713 !test_bit(eventsel, vcpu->kvm->arch.pmu_filter)) in kvm_pmu_create_perf_event()
720 eventsel = kvm_map_pmu_event(vcpu->kvm, eventsel); in kvm_pmu_create_perf_event()
725 attr.type = arm_pmu->pmu.type; in kvm_pmu_create_perf_event()
753 event = perf_event_create_kernel_counter(&attr, -1, current, in kvm_pmu_create_perf_event()
762 pmc->perf_event = event; in kvm_pmu_create_perf_event()
766 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
781 reg = counter_index_to_evtreg(pmc->idx); in kvm_pmu_set_counter_event_type()
782 __vcpu_assign_sys_reg(vcpu, reg, (data & kvm_pmu_evtyper_mask(vcpu->kvm))); in kvm_pmu_set_counter_event_type()
804 entry->arm_pmu = pmu; in kvm_host_pmu_init()
805 list_add_tail(&entry->entry, &arm_pmus); in kvm_host_pmu_init()
835 pmu = entry->arm_pmu; in kvm_pmu_probe_armpmu()
837 if (cpumask_test_cpu(cpu, &pmu->supported_cpus)) in kvm_pmu_probe_armpmu()
848 bitmap_to_arr32(lo, pmu->pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); in __compute_pmceid()
849 bitmap_to_arr32(hi, pmu->pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); in __compute_pmceid()
873 val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) | in compute_pmceid1()
874 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) | in compute_pmceid1()
875 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32)); in compute_pmceid1()
881 struct arm_pmu *cpu_pmu = vcpu->kvm->arch.arm_pmu; in kvm_pmu_get_pmceid()
882 unsigned long *bmap = vcpu->kvm->arch.pmu_filter; in kvm_pmu_get_pmceid()
897 nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1; in kvm_pmu_get_pmceid()
926 if (!vcpu->arch.pmu.created) in kvm_arm_pmu_v3_enable()
927 return -EINVAL; in kvm_arm_pmu_v3_enable()
931 * properly configured interrupt number and using an in-kernel in kvm_arm_pmu_v3_enable()
932 * irqchip, or to not have an in-kernel GIC and not set an IRQ. in kvm_arm_pmu_v3_enable()
934 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arm_pmu_v3_enable()
935 int irq = vcpu->arch.pmu.irq_num; in kvm_arm_pmu_v3_enable()
937 * If we are using an in-kernel vgic, at this point we know in kvm_arm_pmu_v3_enable()
942 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq)) in kvm_arm_pmu_v3_enable()
943 return -EINVAL; in kvm_arm_pmu_v3_enable()
945 return -EINVAL; in kvm_arm_pmu_v3_enable()
953 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arm_pmu_v3_init()
957 * If using the PMU with an in-kernel virtual GIC in kvm_arm_pmu_v3_init()
961 if (!vgic_initialized(vcpu->kvm)) in kvm_arm_pmu_v3_init()
962 return -ENODEV; in kvm_arm_pmu_v3_init()
965 return -ENXIO; in kvm_arm_pmu_v3_init()
967 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num, in kvm_arm_pmu_v3_init()
968 &vcpu->arch.pmu); in kvm_arm_pmu_v3_init()
973 init_irq_work(&vcpu->arch.pmu.overflow_work, in kvm_arm_pmu_v3_init()
976 vcpu->arch.pmu.created = true; in kvm_arm_pmu_v3_init()
995 if (vcpu->arch.pmu.irq_num != irq) in pmu_irq_is_valid()
998 if (vcpu->arch.pmu.irq_num == irq) in pmu_irq_is_valid()
1007 * kvm_arm_pmu_get_max_counters - Return the max number of PMU counters.
1012 struct arm_pmu *arm_pmu = kvm->arch.arm_pmu; in kvm_arm_pmu_get_max_counters()
1016 * event, though the same may not be true of non-PMUv3 hardware. in kvm_arm_pmu_get_max_counters()
1022 * The arm_pmu->cntr_mask considers the fixed counter(s) as well. in kvm_arm_pmu_get_max_counters()
1023 * Ignore those and return only the general-purpose counters. in kvm_arm_pmu_get_max_counters()
1025 return bitmap_weight(arm_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS); in kvm_arm_pmu_get_max_counters()
1030 kvm->arch.nr_pmu_counters = nr; in kvm_arm_set_nr_counters()
1033 if (test_bit(KVM_ARM_VCPU_HAS_EL2, kvm->arch.vcpu_features)) { in kvm_arm_set_nr_counters()
1040 val |= FIELD_PREP(MDCR_EL2_HPMN, kvm->arch.nr_pmu_counters); in kvm_arm_set_nr_counters()
1048 lockdep_assert_held(&kvm->arch.config_lock); in kvm_arm_set_pmu()
1050 kvm->arch.arm_pmu = arm_pmu; in kvm_arm_set_pmu()
1055 * kvm_arm_set_default_pmu - No PMU set, get the default one.
1071 return -ENODEV; in kvm_arm_set_default_pmu()
1079 struct kvm *kvm = vcpu->kvm; in kvm_arm_pmu_v3_set_pmu()
1082 int ret = -ENXIO; in kvm_arm_pmu_v3_set_pmu()
1084 lockdep_assert_held(&kvm->arch.config_lock); in kvm_arm_pmu_v3_set_pmu()
1088 arm_pmu = entry->arm_pmu; in kvm_arm_pmu_v3_set_pmu()
1089 if (arm_pmu->pmu.type == pmu_id) { in kvm_arm_pmu_v3_set_pmu()
1091 (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) { in kvm_arm_pmu_v3_set_pmu()
1092 ret = -EBUSY; in kvm_arm_pmu_v3_set_pmu()
1097 cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus); in kvm_arm_pmu_v3_set_pmu()
1109 struct kvm *kvm = vcpu->kvm; in kvm_arm_pmu_v3_set_nr_counters()
1111 if (!kvm->arch.arm_pmu) in kvm_arm_pmu_v3_set_nr_counters()
1112 return -EINVAL; in kvm_arm_pmu_v3_set_nr_counters()
1115 return -EINVAL; in kvm_arm_pmu_v3_set_nr_counters()
1123 struct kvm *kvm = vcpu->kvm; in kvm_arm_pmu_v3_set_attr()
1125 lockdep_assert_held(&kvm->arch.config_lock); in kvm_arm_pmu_v3_set_attr()
1128 return -ENODEV; in kvm_arm_pmu_v3_set_attr()
1130 if (vcpu->arch.pmu.created) in kvm_arm_pmu_v3_set_attr()
1131 return -EBUSY; in kvm_arm_pmu_v3_set_attr()
1133 switch (attr->attr) { in kvm_arm_pmu_v3_set_attr()
1135 int __user *uaddr = (int __user *)(long)attr->addr; in kvm_arm_pmu_v3_set_attr()
1139 return -EINVAL; in kvm_arm_pmu_v3_set_attr()
1142 return -EFAULT; in kvm_arm_pmu_v3_set_attr()
1146 return -EINVAL; in kvm_arm_pmu_v3_set_attr()
1149 return -EINVAL; in kvm_arm_pmu_v3_set_attr()
1152 return -EBUSY; in kvm_arm_pmu_v3_set_attr()
1155 vcpu->arch.pmu.irq_num = irq; in kvm_arm_pmu_v3_set_attr()
1171 uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr; in kvm_arm_pmu_v3_set_attr()
1174 return -EFAULT; in kvm_arm_pmu_v3_set_attr()
1179 return -EINVAL; in kvm_arm_pmu_v3_set_attr()
1182 return -EBUSY; in kvm_arm_pmu_v3_set_attr()
1184 if (!kvm->arch.pmu_filter) { in kvm_arm_pmu_v3_set_attr()
1185 kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT); in kvm_arm_pmu_v3_set_attr()
1186 if (!kvm->arch.pmu_filter) in kvm_arm_pmu_v3_set_attr()
1187 return -ENOMEM; in kvm_arm_pmu_v3_set_attr()
1196 bitmap_zero(kvm->arch.pmu_filter, nr_events); in kvm_arm_pmu_v3_set_attr()
1198 bitmap_fill(kvm->arch.pmu_filter, nr_events); in kvm_arm_pmu_v3_set_attr()
1202 bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents); in kvm_arm_pmu_v3_set_attr()
1204 bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents); in kvm_arm_pmu_v3_set_attr()
1209 int __user *uaddr = (int __user *)(long)attr->addr; in kvm_arm_pmu_v3_set_attr()
1213 return -EFAULT; in kvm_arm_pmu_v3_set_attr()
1218 unsigned int __user *uaddr = (unsigned int __user *)(long)attr->addr; in kvm_arm_pmu_v3_set_attr()
1222 return -EFAULT; in kvm_arm_pmu_v3_set_attr()
1230 return -ENXIO; in kvm_arm_pmu_v3_set_attr()
1235 switch (attr->attr) { in kvm_arm_pmu_v3_get_attr()
1237 int __user *uaddr = (int __user *)(long)attr->addr; in kvm_arm_pmu_v3_get_attr()
1240 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arm_pmu_v3_get_attr()
1241 return -EINVAL; in kvm_arm_pmu_v3_get_attr()
1244 return -ENODEV; in kvm_arm_pmu_v3_get_attr()
1247 return -ENXIO; in kvm_arm_pmu_v3_get_attr()
1249 irq = vcpu->arch.pmu.irq_num; in kvm_arm_pmu_v3_get_attr()
1254 return -ENXIO; in kvm_arm_pmu_v3_get_attr()
1259 switch (attr->attr) { in kvm_arm_pmu_v3_has_attr()
1269 return -ENXIO; in kvm_arm_pmu_v3_has_attr()
1297 * kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU
1303 u64 n = vcpu->kvm->arch.nr_pmu_counters; in kvm_vcpu_read_pmcr()