Lines Matching refs:event
3 * Performance event support - Freescale Embedded Performance Monitor
23 struct perf_event *event[MAX_HWEVENTS];
162 static void fsl_emb_pmu_read(struct perf_event *event)
166 if (event->hw.state & PERF_HES_STOPPED)
175 prev = local64_read(&event->hw.prev_count);
177 val = read_pmc(event->hw.idx);
178 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
182 local64_add(delta, &event->count);
183 local64_sub(delta, &event->hw.period_left);
255 struct perf_event *event;
263 for_each_sibling_event(event, group) {
264 if (!is_software_event(event) &&
265 event->state != PERF_EVENT_STATE_OFF) {
268 ctrs[n] = event;
276 static int fsl_emb_pmu_add(struct perf_event *event, int flags)
284 perf_pmu_disable(event->pmu);
287 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
295 if (cpuhw->event[i])
304 event->hw.idx = i;
305 cpuhw->event[i] = event;
309 if (event->hw.sample_period) {
310 s64 left = local64_read(&event->hw.period_left);
314 local64_set(&event->hw.prev_count, val);
317 event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
320 event->hw.state &= ~(PERF_HES_STOPPED | PERF_HES_UPTODATE);
324 perf_event_update_userpage(event);
326 write_pmlcb(i, event->hw.config >> 32);
327 write_pmlca(i, event->hw.config_base);
332 perf_pmu_enable(event->pmu);
337 static void fsl_emb_pmu_del(struct perf_event *event, int flags)
340 int i = event->hw.idx;
342 perf_pmu_disable(event->pmu);
346 fsl_emb_pmu_read(event);
350 WARN_ON(event != cpuhw->event[event->hw.idx]);
356 cpuhw->event[i] = NULL;
357 event->hw.idx = -1;
360 * TODO: if at least one restricted event exists, and we
363 * a non-restricted event, migrate that event to the
370 perf_pmu_enable(event->pmu);
374 static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
380 if (event->hw.idx < 0 || !event->hw.sample_period)
383 if (!(event->hw.state & PERF_HES_STOPPED))
387 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
390 perf_pmu_disable(event->pmu);
392 event->hw.state = 0;
393 left = local64_read(&event->hw.period_left);
397 write_pmc(event->hw.idx, val);
399 perf_event_update_userpage(event);
400 perf_pmu_enable(event->pmu);
404 static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
408 if (event->hw.idx < 0 || !event->hw.sample_period)
411 if (event->hw.state & PERF_HES_STOPPED)
415 perf_pmu_disable(event->pmu);
417 fsl_emb_pmu_read(event);
418 event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
419 write_pmc(event->hw.idx, 0);
421 perf_event_update_userpage(event);
422 perf_pmu_enable(event->pmu);
429 static void hw_perf_event_destroy(struct perf_event *event)
469 static int fsl_emb_pmu_event_init(struct perf_event *event)
484 switch (event->attr.type) {
486 ev = event->attr.config;
493 err = hw_perf_cache_event(event->attr.config, &ev);
499 ev = event->attr.config;
506 event->hw.config = ppmu->xlate_event(ev);
507 if (!(event->hw.config & FSL_EMB_EVENT_VALID))
512 * other hardware events in the group. We assume the event
516 if (event->group_leader != event) {
517 n = collect_events(event->group_leader,
523 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
534 event->hw.idx = -1;
536 event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
539 if (event->attr.exclude_user)
540 event->hw.config_base |= PMLCA_FCU;
541 if (event->attr.exclude_kernel)
542 event->hw.config_base |= PMLCA_FCS;
543 if (event->attr.exclude_idle)
546 event->hw.last_period = event->hw.sample_period;
547 local64_set(&event->hw.period_left, event->hw.last_period);
568 event->destroy = hw_perf_event_destroy;
589 static void record_and_restart(struct perf_event *event, unsigned long val,
592 u64 period = event->hw.sample_period;
593 const u64 last_period = event->hw.last_period;
597 if (event->hw.state & PERF_HES_STOPPED) {
598 write_pmc(event->hw.idx, 0);
603 prev = local64_read(&event->hw.prev_count);
605 local64_add(delta, &event->count);
608 * See if the total period for this event has expired,
612 left = local64_read(&event->hw.period_left) - delta;
619 event->hw.last_period = event->hw.sample_period;
625 write_pmc(event->hw.idx, val);
626 local64_set(&event->hw.prev_count, val);
627 local64_set(&event->hw.period_left, left);
628 perf_event_update_userpage(event);
638 perf_event_overflow(event, &data, regs);
646 struct perf_event *event;
650 event = cpuhw->event[i];
654 if (event) {
655 /* event has overflowed */
656 record_and_restart(event, val, regs);