Lines Matching refs:pmu_ctx

809 static bool perf_skip_pmu_ctx(struct perf_event_pmu_context *pmu_ctx,  in perf_skip_pmu_ctx()  argument
812 if ((event_type & EVENT_CGROUP) && !pmu_ctx->nr_cgroups) in perf_skip_pmu_ctx()
815 !(pmu_ctx->pmu->capabilities & PERF_PMU_CAP_MEDIATED_VPMU)) in perf_skip_pmu_ctx()
831 struct perf_event_pmu_context *pmu_ctx; in perf_ctx_disable() local
833 for_each_epc(pmu_ctx, ctx, NULL, event_type) in perf_ctx_disable()
834 perf_pmu_disable(pmu_ctx->pmu); in perf_ctx_disable()
840 struct perf_event_pmu_context *pmu_ctx; in perf_ctx_enable() local
842 for_each_epc(pmu_ctx, ctx, NULL, event_type) in perf_ctx_enable()
843 perf_pmu_enable(pmu_ctx->pmu); in perf_ctx_enable()
1171 event->pmu_ctx->nr_cgroups++; in perf_cgroup_event_enable()
1193 event->pmu_ctx->nr_cgroups--; in perf_cgroup_event_disable()
1782 if (left_pmu < right->pmu_ctx->pmu) in perf_event_groups_cmp()
1784 if (left_pmu > right->pmu_ctx->pmu) in perf_event_groups_cmp()
1830 return perf_event_groups_cmp(e->cpu, e->pmu_ctx->pmu, event_cgroup(e), in __group_less()
2008 event->pmu_ctx->nr_events++; in list_add_event()
2236 event->pmu_ctx->nr_events--; in list_del_event()
2341 return event->attr.pinned ? &event->pmu_ctx->pinned_active : in get_event_list()
2342 &event->pmu_ctx->flexible_active; in get_event_list()
2454 struct perf_event_pmu_context *epc = event->pmu_ctx; in event_sched_out()
2506 perf_assert_pmu_disabled(group_event->pmu_ctx->pmu); in group_sched_out()
2577 struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx; in __perf_remove_from_context() local
2607 if (!pmu_ctx->nr_events) { in __perf_remove_from_context()
2608 pmu_ctx->rotate_necessary = 0; in __perf_remove_from_context()
2611 struct perf_cpu_pmu_context *cpc = this_cpc(pmu_ctx->pmu); in __perf_remove_from_context()
2613 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx); in __perf_remove_from_context()
2683 perf_pmu_disable(event->pmu_ctx->pmu); in __perf_event_disable()
2699 perf_pmu_enable(event->pmu_ctx->pmu); in __perf_event_disable()
2804 struct perf_event_pmu_context *epc = event->pmu_ctx; in event_sched_in()
2862 struct pmu *pmu = group_event->pmu_ctx->pmu; in group_sched_in()
2909 struct perf_event_pmu_context *epc = event->pmu_ctx; in group_can_go_on()
3099 ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu, in __perf_install_in_context()
3265 ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu, get_event_type(event)); in __perf_event_enable()
3526 static void __pmu_ctx_sched_out(struct perf_event_pmu_context *pmu_ctx, in __pmu_ctx_sched_out() argument
3529 struct perf_event_context *ctx = pmu_ctx->ctx; in __pmu_ctx_sched_out()
3531 struct pmu *pmu = pmu_ctx->pmu; in __pmu_ctx_sched_out()
3536 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx); in __pmu_ctx_sched_out()
3546 &pmu_ctx->pinned_active, in __pmu_ctx_sched_out()
3553 &pmu_ctx->flexible_active, in __pmu_ctx_sched_out()
3561 pmu_ctx->rotate_necessary = 0; in __pmu_ctx_sched_out()
3580 struct perf_event_pmu_context *pmu_ctx; in ctx_sched_out() local
3645 for_each_epc(pmu_ctx, ctx, pmu, event_type) in ctx_sched_out()
3646 __pmu_ctx_sched_out(pmu_ctx, is_active); in ctx_sched_out()
3753 struct perf_event_pmu_context *pmu_ctx; in perf_ctx_sched_task_cb() local
3756 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { in perf_ctx_sched_task_cb()
3757 cpc = this_cpc(pmu_ctx->pmu); in perf_ctx_sched_task_cb()
3759 if (cpc->sched_cb_usage && pmu_ctx->pmu->sched_task) in perf_ctx_sched_task_cb()
3760 pmu_ctx->pmu->sched_task(pmu_ctx, task, sched_in); in perf_ctx_sched_task_cb()
3988 static void __link_epc(struct perf_event_pmu_context *pmu_ctx) in __link_epc() argument
3992 if (!pmu_ctx->ctx->task) in __link_epc()
3995 cpc = this_cpc(pmu_ctx->pmu); in __link_epc()
3996 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx); in __link_epc()
3997 cpc->task_epc = pmu_ctx; in __link_epc()
4052 __link_epc((*evt)->pmu_ctx); in visit_groups_merge()
4053 perf_assert_pmu_disabled((*evt)->pmu_ctx->pmu); in visit_groups_merge()
4123 event->pmu_ctx->pmu->capabilities & PERF_PMU_CAP_MEDIATED_VPMU && in merge_sched_in()
4144 struct perf_cpu_pmu_context *cpc = this_cpc(event->pmu_ctx->pmu); in merge_sched_in()
4146 event->pmu_ctx->rotate_necessary = 1; in merge_sched_in()
4168 static void __pmu_ctx_sched_in(struct perf_event_pmu_context *pmu_ctx, in __pmu_ctx_sched_in() argument
4171 struct perf_event_context *ctx = pmu_ctx->ctx; in __pmu_ctx_sched_in()
4174 pmu_groups_sched_in(ctx, &ctx->pinned_groups, pmu_ctx->pmu, event_type); in __pmu_ctx_sched_in()
4176 pmu_groups_sched_in(ctx, &ctx->flexible_groups, pmu_ctx->pmu, event_type); in __pmu_ctx_sched_in()
4184 struct perf_event_pmu_context *pmu_ctx; in ctx_sched_in() local
4236 for_each_epc(pmu_ctx, ctx, pmu, event_type) in ctx_sched_in()
4237 __pmu_ctx_sched_in(pmu_ctx, EVENT_PINNED | (event_type & EVENT_GUEST)); in ctx_sched_in()
4242 for_each_epc(pmu_ctx, ctx, pmu, event_type) in ctx_sched_in()
4243 __pmu_ctx_sched_in(pmu_ctx, EVENT_FLEXIBLE | (event_type & EVENT_GUEST)); in ctx_sched_in()
4491 struct perf_event_pmu_context *pmu_ctx; in perf_adjust_freq_unthr_context() local
4503 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { in perf_adjust_freq_unthr_context()
4504 if (!(pmu_ctx->nr_freq || unthrottle)) in perf_adjust_freq_unthr_context()
4506 if (!perf_pmu_ctx_is_active(pmu_ctx)) in perf_adjust_freq_unthr_context()
4508 if (pmu_ctx->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) in perf_adjust_freq_unthr_context()
4511 perf_pmu_disable(pmu_ctx->pmu); in perf_adjust_freq_unthr_context()
4512 perf_adjust_freq_unthr_events(&pmu_ctx->pinned_active); in perf_adjust_freq_unthr_context()
4513 perf_adjust_freq_unthr_events(&pmu_ctx->flexible_active); in perf_adjust_freq_unthr_context()
4514 perf_pmu_enable(pmu_ctx->pmu); in perf_adjust_freq_unthr_context()
4538 ctx_event_to_rotate(struct perf_event_pmu_context *pmu_ctx) in ctx_event_to_rotate() argument
4544 .pmu = pmu_ctx->pmu, in ctx_event_to_rotate()
4548 event = list_first_entry_or_null(&pmu_ctx->flexible_active, in ctx_event_to_rotate()
4554 tree = &pmu_ctx->ctx->flexible_groups.tree; in ctx_event_to_rotate()
4556 if (!pmu_ctx->ctx->task) { in ctx_event_to_rotate()
4582 pmu_ctx->rotate_necessary = 0; in ctx_event_to_rotate()
4844 pmu = event->pmu_ctx->pmu; in __perf_event_read()
5775 if (event->pmu_ctx) { in __free_event()
5782 WARN_ON_ONCE(event->pmu_ctx->ctx != event->ctx); in __free_event()
5783 put_pmu_ctx(event->pmu_ctx); in __free_event()
12887 if (event->pmu_ctx) { in __pmu_detach_event()
12888 put_pmu_ctx(event->pmu_ctx); in __pmu_detach_event()
12889 event->pmu_ctx = NULL; in __pmu_detach_event()
13810 struct perf_event_pmu_context *pmu_ctx; in SYSCALL_DEFINE5() local
14047 pmu = group_leader->pmu_ctx->pmu; in SYSCALL_DEFINE5()
14061 group_leader->pmu_ctx->pmu != pmu) in SYSCALL_DEFINE5()
14069 pmu_ctx = find_get_pmu_context(pmu, ctx, event); in SYSCALL_DEFINE5()
14070 if (IS_ERR(pmu_ctx)) { in SYSCALL_DEFINE5()
14071 err = PTR_ERR(pmu_ctx); in SYSCALL_DEFINE5()
14074 event->pmu_ctx = pmu_ctx; in SYSCALL_DEFINE5()
14117 put_pmu_ctx(group_leader->pmu_ctx); in SYSCALL_DEFINE5()
14121 put_pmu_ctx(sibling->pmu_ctx); in SYSCALL_DEFINE5()
14135 sibling->pmu_ctx = pmu_ctx; in SYSCALL_DEFINE5()
14136 get_pmu_ctx(pmu_ctx); in SYSCALL_DEFINE5()
14146 group_leader->pmu_ctx = pmu_ctx; in SYSCALL_DEFINE5()
14147 get_pmu_ctx(pmu_ctx); in SYSCALL_DEFINE5()
14187 put_pmu_ctx(event->pmu_ctx); in SYSCALL_DEFINE5()
14188 event->pmu_ctx = NULL; /* _free_event() */ in SYSCALL_DEFINE5()
14221 struct perf_event_pmu_context *pmu_ctx; in perf_event_create_kernel_counter() local
14269 pmu_ctx = find_get_pmu_context(pmu, ctx, event); in perf_event_create_kernel_counter()
14270 if (IS_ERR(pmu_ctx)) { in perf_event_create_kernel_counter()
14271 err = PTR_ERR(pmu_ctx); in perf_event_create_kernel_counter()
14274 event->pmu_ctx = pmu_ctx; in perf_event_create_kernel_counter()
14303 put_pmu_ctx(pmu_ctx); in perf_event_create_kernel_counter()
14304 event->pmu_ctx = NULL; /* _free_event() */ in perf_event_create_kernel_counter()
14325 put_pmu_ctx(event->pmu_ctx); in __perf_pmu_remove()
14330 put_pmu_ctx(sibling->pmu_ctx); in __perf_pmu_remove()
14347 event->pmu_ctx = epc; in __perf_pmu_install_event()
14712 struct perf_event_pmu_context *pmu_ctx; in inherit_event() local
14744 pmu_ctx = find_get_pmu_context(parent_event->pmu_ctx->pmu, child_ctx, child_event); in inherit_event()
14745 if (IS_ERR(pmu_ctx)) { in inherit_event()
14747 return ERR_CAST(pmu_ctx); in inherit_event()
14749 child_event->pmu_ctx = pmu_ctx; in inherit_event()