Lines Matching refs:perf_event
80 if (pmc->perf_event) {
81 perf_event_disable(pmc->perf_event);
82 perf_event_release_kernel(pmc->perf_event);
83 pmc->perf_event = NULL;
248 } else if (pmc->perf_event) {
249 pmc->counter_val += perf_event_read_value(pmc->perf_event, &enabled, &running);
268 static void kvm_riscv_pmu_overflow(struct perf_event *perf_event,
272 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
275 struct riscv_pmu *rpmu = to_riscv_pmu(perf_event->pmu);
279 * Stop the event counting by directly accessing the perf_event.
285 rpmu->pmu.stop(perf_event, PERF_EF_UPDATE);
297 period = -(local64_read(&perf_event->count));
299 local64_set(&perf_event->hw.period_left, 0);
300 perf_event->attr.sample_period = period;
301 perf_event->hw.sample_period = period;
306 rpmu->pmu.start(perf_event, PERF_EF_RELOAD);
313 struct perf_event *event;
334 pmc->perf_event = event;
336 perf_event_enable(pmc->perf_event);
537 } else if (pmc->perf_event) {
542 perf_event_period(pmc->perf_event, kvm_pmu_get_sample_period(pmc));
543 perf_event_enable(pmc->perf_event);
595 } else if (pmc->perf_event) {
598 perf_event_disable(pmc->perf_event);
614 else if (pmc->perf_event)
615 pmc->counter_val += perf_event_read_value(pmc->perf_event,