Lines Matching full:cpc

485 static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc);
1115 struct perf_cpu_pmu_context *cpc; in perf_mux_hrtimer_handler() local
1120 cpc = container_of(hr, struct perf_cpu_pmu_context, hrtimer); in perf_mux_hrtimer_handler()
1121 rotations = perf_rotate_context(cpc); in perf_mux_hrtimer_handler()
1123 raw_spin_lock(&cpc->hrtimer_lock); in perf_mux_hrtimer_handler()
1125 hrtimer_forward_now(hr, cpc->hrtimer_interval); in perf_mux_hrtimer_handler()
1127 cpc->hrtimer_active = 0; in perf_mux_hrtimer_handler()
1128 raw_spin_unlock(&cpc->hrtimer_lock); in perf_mux_hrtimer_handler()
1133 static void __perf_mux_hrtimer_init(struct perf_cpu_pmu_context *cpc, int cpu) in __perf_mux_hrtimer_init() argument
1135 struct hrtimer *timer = &cpc->hrtimer; in __perf_mux_hrtimer_init()
1136 struct pmu *pmu = cpc->epc.pmu; in __perf_mux_hrtimer_init()
1147 cpc->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); in __perf_mux_hrtimer_init()
1149 raw_spin_lock_init(&cpc->hrtimer_lock); in __perf_mux_hrtimer_init()
1154 static int perf_mux_hrtimer_restart(struct perf_cpu_pmu_context *cpc) in perf_mux_hrtimer_restart() argument
1156 struct hrtimer *timer = &cpc->hrtimer; in perf_mux_hrtimer_restart()
1159 raw_spin_lock_irqsave(&cpc->hrtimer_lock, flags); in perf_mux_hrtimer_restart()
1160 if (!cpc->hrtimer_active) { in perf_mux_hrtimer_restart()
1161 cpc->hrtimer_active = 1; in perf_mux_hrtimer_restart()
1162 hrtimer_forward_now(timer, cpc->hrtimer_interval); in perf_mux_hrtimer_restart()
1165 raw_spin_unlock_irqrestore(&cpc->hrtimer_lock, flags); in perf_mux_hrtimer_restart()
2306 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context); in event_sched_out() local
2309 // XXX cpc serialization, probably per-cpu IRQ disabled in event_sched_out()
2338 cpc->active_oncpu--; in event_sched_out()
2343 if (event->attr.exclusive || !cpc->active_oncpu) in event_sched_out()
2344 cpc->exclusive = 0; in event_sched_out()
2447 struct perf_cpu_pmu_context *cpc; in __perf_remove_from_context() local
2449 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context); in __perf_remove_from_context()
2450 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx); in __perf_remove_from_context()
2451 cpc->task_epc = NULL; in __perf_remove_from_context()
2587 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context); in event_sched_in() local
2628 cpc->active_oncpu++; in event_sched_in()
2634 cpc->exclusive = 1; in event_sched_in()
2694 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context); in group_can_go_on() local
2705 if (cpc->exclusive) in group_can_go_on()
3317 struct perf_cpu_pmu_context *cpc; in __pmu_ctx_sched_out() local
3319 cpc = this_cpu_ptr(pmu->cpu_pmu_context); in __pmu_ctx_sched_out()
3320 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx); in __pmu_ctx_sched_out()
3321 cpc->task_epc = NULL; in __pmu_ctx_sched_out()
3564 struct perf_cpu_pmu_context *cpc; in perf_ctx_sched_task_cb() local
3567 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context); in perf_ctx_sched_task_cb()
3569 if (cpc->sched_cb_usage && pmu_ctx->pmu->sched_task) in perf_ctx_sched_task_cb()
3676 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context); in perf_sched_cb_dec() local
3681 if (!--cpc->sched_cb_usage) in perf_sched_cb_dec()
3682 list_del(&cpc->sched_cb_entry); in perf_sched_cb_dec()
3688 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context); in perf_sched_cb_inc() local
3690 if (!cpc->sched_cb_usage++) in perf_sched_cb_inc()
3691 list_add(&cpc->sched_cb_entry, this_cpu_ptr(&sched_cb_list)); in perf_sched_cb_inc()
3705 static void __perf_pmu_sched_task(struct perf_cpu_pmu_context *cpc, bool sched_in) in __perf_pmu_sched_task() argument
3710 pmu = cpc->epc.pmu; in __perf_pmu_sched_task()
3719 pmu->sched_task(cpc->task_epc, sched_in); in __perf_pmu_sched_task()
3730 struct perf_cpu_pmu_context *cpc; in perf_pmu_sched_task() local
3736 list_for_each_entry(cpc, this_cpu_ptr(&sched_cb_list), sched_cb_entry) in perf_pmu_sched_task()
3737 __perf_pmu_sched_task(cpc, sched_in); in perf_pmu_sched_task()
3800 struct perf_cpu_pmu_context *cpc; in __link_epc() local
3805 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context); in __link_epc()
3806 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx); in __link_epc()
3807 cpc->task_epc = pmu_ctx; in __link_epc()
3934 struct perf_cpu_pmu_context *cpc; in merge_sched_in() local
3937 cpc = this_cpu_ptr(event->pmu_ctx->pmu->cpu_pmu_context); in merge_sched_in()
3938 perf_mux_hrtimer_restart(cpc); in merge_sched_in()
4362 static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc) in perf_rotate_context() argument
4375 cpu_epc = &cpc->epc; in perf_rotate_context()
4377 task_epc = cpc->task_epc; in perf_rotate_context()
4962 struct perf_cpu_pmu_context *cpc; in find_get_pmu_context() local
4964 cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu); in find_get_pmu_context()
4965 epc = &cpc->epc; in find_get_pmu_context()
11683 struct perf_cpu_pmu_context *cpc; in perf_event_mux_interval_ms_store() local
11684 cpc = per_cpu_ptr(pmu->cpu_pmu_context, cpu); in perf_event_mux_interval_ms_store()
11685 cpc->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); in perf_event_mux_interval_ms_store()
11687 cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpc); in perf_event_mux_interval_ms_store()
11896 struct perf_cpu_pmu_context *cpc; in perf_pmu_register() local
11898 cpc = per_cpu_ptr(pmu->cpu_pmu_context, cpu); in perf_pmu_register()
11899 __perf_init_event_pmu_context(&cpc->epc, pmu); in perf_pmu_register()
11900 __perf_mux_hrtimer_init(cpc, cpu); in perf_pmu_register()