Lines Matching refs:l2cache_pmu
109 struct l2cache_pmu { struct
138 struct l2cache_pmu *l2cache_pmu; argument
150 #define to_l2cache_pmu(p) (container_of(p, struct l2cache_pmu, pmu))
164 struct l2cache_pmu *l2cache_pmu, int cpu) in get_cluster_pmu() argument
166 return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu); in get_cluster_pmu()
344 int num_ctrs = cluster->l2cache_pmu->num_counters - 1; in l2_cache_get_event_idx()
388 int num_counters = cluster->l2cache_pmu->num_counters; in l2_cache_handle_irq()
443 struct l2cache_pmu *l2cache_pmu; in l2_cache_event_init() local
448 l2cache_pmu = to_l2cache_pmu(event->pmu); in l2_cache_event_init()
451 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
457 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
465 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
474 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
482 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
488 cluster = get_cluster_pmu(l2cache_pmu, event->cpu); in l2_cache_event_init()
491 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
499 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
509 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
521 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
639 struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev)); in l2_cache_pmu_cpumask_show() local
641 return cpumap_print_to_pagebuf(true, buf, &l2cache_pmu->cpumask); in l2_cache_pmu_cpumask_show()
735 struct l2cache_pmu *l2cache_pmu, int cpu) in l2_cache_associate_cpu_with_cluster() argument
752 list_for_each_entry(cluster, &l2cache_pmu->clusters, next) { in l2_cache_associate_cpu_with_cluster()
756 dev_info(&l2cache_pmu->pdev->dev, in l2_cache_associate_cpu_with_cluster()
760 *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster; in l2_cache_associate_cpu_with_cluster()
770 struct l2cache_pmu *l2cache_pmu; in l2cache_pmu_online_cpu() local
772 l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node); in l2cache_pmu_online_cpu()
773 cluster = get_cluster_pmu(l2cache_pmu, cpu); in l2cache_pmu_online_cpu()
776 cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu); in l2cache_pmu_online_cpu()
793 cpumask_set_cpu(cpu, &l2cache_pmu->cpumask); in l2cache_pmu_online_cpu()
804 struct l2cache_pmu *l2cache_pmu; in l2cache_pmu_offline_cpu() local
808 l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node); in l2cache_pmu_offline_cpu()
809 cluster = get_cluster_pmu(l2cache_pmu, cpu); in l2cache_pmu_offline_cpu()
818 cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask); in l2cache_pmu_offline_cpu()
829 perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target); in l2cache_pmu_offline_cpu()
831 cpumask_set_cpu(target, &l2cache_pmu->cpumask); in l2cache_pmu_offline_cpu()
841 struct l2cache_pmu *l2cache_pmu = data; in l2_cache_pmu_probe_cluster() local
865 cluster->l2cache_pmu = l2cache_pmu; in l2_cache_pmu_probe_cluster()
883 list_add(&cluster->next, &l2cache_pmu->clusters); in l2_cache_pmu_probe_cluster()
884 l2cache_pmu->num_pmus++; in l2_cache_pmu_probe_cluster()
892 struct l2cache_pmu *l2cache_pmu; in l2_cache_pmu_probe() local
894 l2cache_pmu = in l2_cache_pmu_probe()
895 devm_kzalloc(&pdev->dev, sizeof(*l2cache_pmu), GFP_KERNEL); in l2_cache_pmu_probe()
896 if (!l2cache_pmu) in l2_cache_pmu_probe()
899 INIT_LIST_HEAD(&l2cache_pmu->clusters); in l2_cache_pmu_probe()
901 platform_set_drvdata(pdev, l2cache_pmu); in l2_cache_pmu_probe()
902 l2cache_pmu->pmu = (struct pmu) { in l2_cache_pmu_probe()
919 l2cache_pmu->num_counters = get_num_counters(); in l2_cache_pmu_probe()
920 l2cache_pmu->pdev = pdev; in l2_cache_pmu_probe()
921 l2cache_pmu->pmu_cluster = devm_alloc_percpu(&pdev->dev, in l2_cache_pmu_probe()
923 if (!l2cache_pmu->pmu_cluster) in l2_cache_pmu_probe()
926 l2_cycle_ctr_idx = l2cache_pmu->num_counters - 1; in l2_cache_pmu_probe()
927 l2_counter_present_mask = GENMASK(l2cache_pmu->num_counters - 2, 0) | in l2_cache_pmu_probe()
930 cpumask_clear(&l2cache_pmu->cpumask); in l2_cache_pmu_probe()
933 err = device_for_each_child(&pdev->dev, l2cache_pmu, in l2_cache_pmu_probe()
938 if (l2cache_pmu->num_pmus == 0) { in l2_cache_pmu_probe()
944 &l2cache_pmu->node); in l2_cache_pmu_probe()
950 err = perf_pmu_register(&l2cache_pmu->pmu, l2cache_pmu->pmu.name, -1); in l2_cache_pmu_probe()
957 l2cache_pmu->num_pmus); in l2_cache_pmu_probe()
963 &l2cache_pmu->node); in l2_cache_pmu_probe()
969 struct l2cache_pmu *l2cache_pmu = in l2_cache_pmu_remove() local
972 perf_pmu_unregister(&l2cache_pmu->pmu); in l2_cache_pmu_remove()
974 &l2cache_pmu->node); in l2_cache_pmu_remove()