Lines Matching full:pmu

185 	if (type == event->pmu->type)  in armpmu_map_event()
202 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_event_set_period()
244 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_event_update()
274 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_stop()
278 * ARM pmu always has to update the counter, so ignore in armpmu_stop()
290 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_start()
294 * ARM pmu always has to reprogram the period, so ignore in armpmu_start()
315 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_del()
324 perf_sched_cb_dec(event->pmu); in armpmu_del()
337 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_add()
356 perf_sched_cb_inc(event->pmu); in armpmu_add()
373 validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, in validate_event() argument
383 * core perf code won't check that the pmu->ctx == leader->ctx in validate_event()
384 * until after pmu->event_init(event). in validate_event()
386 if (event->pmu != pmu) in validate_event()
395 armpmu = to_arm_pmu(event->pmu); in validate_event()
406 * Initialise the fake PMU. We only need to populate the in validate_group()
411 if (!validate_event(event->pmu, &fake_pmu, leader)) in validate_group()
418 if (!validate_event(event->pmu, &fake_pmu, sibling)) in validate_group()
422 if (!validate_event(event->pmu, &fake_pmu, event)) in validate_group()
455 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in __hw_perf_event_init()
471 * yet. For SMP systems, each core has it's own PMU so we can't do any in __hw_perf_event_init()
510 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_event_init()
514 * that which this PMU handles. Process-following events (where in armpmu_event_init()
529 static void armpmu_enable(struct pmu *pmu) in armpmu_enable() argument
531 struct arm_pmu *armpmu = to_arm_pmu(pmu); in armpmu_enable()
543 static void armpmu_disable(struct pmu *pmu) in armpmu_disable() argument
545 struct arm_pmu *armpmu = to_arm_pmu(pmu); in armpmu_disable()
559 static bool armpmu_filter(struct pmu *pmu, int cpu) in armpmu_filter() argument
561 struct arm_pmu *armpmu = to_arm_pmu(pmu); in armpmu_filter()
649 err = request_nmi(irq, handler, irq_flags, "arm-pmu", in armpmu_request_irq()
654 err = request_irq(irq, handler, irq_flags, "arm-pmu", in armpmu_request_irq()
662 err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu); in armpmu_request_irq()
666 err = request_percpu_irq(irq, handler, "arm-pmu", in armpmu_request_irq()
689 pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); in armpmu_request_irq()
693 static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) in armpmu_get_cpu_irq() argument
695 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; in armpmu_get_cpu_irq()
705 * PMU hardware loses all context when a CPU goes offline.
707 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
712 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); in arm_perf_starting_cpu() local
715 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) in arm_perf_starting_cpu()
717 if (pmu->reset) in arm_perf_starting_cpu()
718 pmu->reset(pmu); in arm_perf_starting_cpu()
720 per_cpu(cpu_armpmu, cpu) = pmu; in arm_perf_starting_cpu()
722 irq = armpmu_get_cpu_irq(pmu, cpu); in arm_perf_starting_cpu()
731 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); in arm_perf_teardown_cpu() local
734 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) in arm_perf_teardown_cpu()
737 irq = armpmu_get_cpu_irq(pmu, cpu); in arm_perf_teardown_cpu()
789 * Always reset the PMU registers on power-up even if in cpu_pm_pmu_notify()
861 struct arm_pmu *pmu; in armpmu_alloc() local
864 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); in armpmu_alloc()
865 if (!pmu) in armpmu_alloc()
868 pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, GFP_KERNEL); in armpmu_alloc()
869 if (!pmu->hw_events) { in armpmu_alloc()
870 pr_info("failed to allocate per-cpu PMU data.\n"); in armpmu_alloc()
874 pmu->pmu = (struct pmu) { in armpmu_alloc()
884 .attr_groups = pmu->attr_groups, in armpmu_alloc()
886 * This is a CPU PMU potentially in a heterogeneous in armpmu_alloc()
890 * specific PMU. in armpmu_alloc()
896 pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] = in armpmu_alloc()
902 events = per_cpu_ptr(pmu->hw_events, cpu); in armpmu_alloc()
903 events->percpu_pmu = pmu; in armpmu_alloc()
906 return pmu; in armpmu_alloc()
909 kfree(pmu); in armpmu_alloc()
914 void armpmu_free(struct arm_pmu *pmu) in armpmu_free() argument
916 free_percpu(pmu->hw_events); in armpmu_free()
917 kfree(pmu); in armpmu_free()
920 int armpmu_register(struct arm_pmu *pmu) in armpmu_register() argument
924 ret = cpu_pmu_init(pmu); in armpmu_register()
928 if (!pmu->set_event_filter) in armpmu_register()
929 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; in armpmu_register()
931 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); in armpmu_register()
935 pr_info("enabled with %s PMU driver, %d (%*pb) counters available%s\n", in armpmu_register()
936 pmu->name, bitmap_weight(pmu->cntr_mask, ARMPMU_MAX_HWEVENTS), in armpmu_register()
937 ARMPMU_MAX_HWEVENTS, &pmu->cntr_mask, in armpmu_register()
940 kvm_host_pmu_init(pmu); in armpmu_register()
945 cpu_pmu_destroy(pmu); in armpmu_register()
954 "perf/arm/pmu:starting", in arm_pmu_hp_init()
958 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", in arm_pmu_hp_init()