Lines Matching refs:cpuc

855 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);  in intel_pmu_disable_bts()  local
858 if (!cpuc->ds) in intel_pmu_disable_bts()
872 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_drain_bts_buffer() local
873 struct debug_store *ds = cpuc->ds; in intel_pmu_drain_bts_buffer()
879 struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; in intel_pmu_drain_bts_buffer()
1261 static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc) in pebs_needs_sched_cb() argument
1263 if (cpuc->n_pebs == cpuc->n_pebs_via_pt) in pebs_needs_sched_cb()
1266 return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs); in pebs_needs_sched_cb()
1271 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_pebs_sched_task() local
1273 if (!sched_in && pebs_needs_sched_cb(cpuc)) in intel_pmu_pebs_sched_task()
1277 static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) in pebs_update_threshold() argument
1279 struct debug_store *ds = cpuc->ds; in pebs_update_threshold()
1280 int max_pebs_events = intel_pmu_max_num_pebs(cpuc->pmu); in pebs_update_threshold()
1284 if (cpuc->n_pebs_via_pt) in pebs_update_threshold()
1288 reserved = max_pebs_events + x86_pmu_max_num_counters_fixed(cpuc->pmu); in pebs_update_threshold()
1292 if (cpuc->n_pebs == cpuc->n_large_pebs) { in pebs_update_threshold()
1294 reserved * cpuc->pebs_record_size; in pebs_update_threshold()
1296 threshold = ds->pebs_buffer_base + cpuc->pebs_record_size; in pebs_update_threshold()
1317 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in adaptive_pebs_record_size_update() local
1318 u64 pebs_data_cfg = cpuc->pebs_data_cfg; in adaptive_pebs_record_size_update()
1343 cpuc->pebs_record_size = sz; in adaptive_pebs_record_size_update()
1363 void intel_pmu_pebs_late_setup(struct cpu_hw_events *cpuc) in intel_pmu_pebs_late_setup() argument
1369 for (i = 0; i < cpuc->n_events; i++) { in intel_pmu_pebs_late_setup()
1370 event = cpuc->event_list[i]; in intel_pmu_pebs_late_setup()
1373 __intel_pmu_pebs_update_cfg(event, cpuc->assign[i], &pebs_data_cfg); in intel_pmu_pebs_late_setup()
1376 if (pebs_data_cfg & ~cpuc->pebs_data_cfg) in intel_pmu_pebs_late_setup()
1377 cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW; in intel_pmu_pebs_late_setup()
1435 pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, in pebs_update_state() argument
1445 if ((cpuc->n_pebs == 1) && add) in pebs_update_state()
1446 cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW; in pebs_update_state()
1448 if (needed_cb != pebs_needs_sched_cb(cpuc)) { in pebs_update_state()
1454 cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW; in pebs_update_state()
1468 if (pebs_data_cfg & ~cpuc->pebs_data_cfg) in pebs_update_state()
1469 cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW; in pebs_update_state()
1475 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_pebs_add() local
1477 bool needed_cb = pebs_needs_sched_cb(cpuc); in intel_pmu_pebs_add()
1479 cpuc->n_pebs++; in intel_pmu_pebs_add()
1481 cpuc->n_large_pebs++; in intel_pmu_pebs_add()
1483 cpuc->n_pebs_via_pt++; in intel_pmu_pebs_add()
1485 pebs_update_state(needed_cb, cpuc, event, true); in intel_pmu_pebs_add()
1490 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_pebs_via_pt_disable() local
1495 if (!(cpuc->pebs_enabled & ~PEBS_VIA_PT_MASK)) in intel_pmu_pebs_via_pt_disable()
1496 cpuc->pebs_enabled &= ~PEBS_VIA_PT_MASK; in intel_pmu_pebs_via_pt_disable()
1501 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_pebs_via_pt_enable() local
1503 struct debug_store *ds = cpuc->ds; in intel_pmu_pebs_via_pt_enable()
1512 cpuc->pebs_enabled |= PEBS_PMI_AFTER_EACH_RECORD; in intel_pmu_pebs_via_pt_enable()
1514 cpuc->pebs_enabled |= PEBS_OUTPUT_PT; in intel_pmu_pebs_via_pt_enable()
1527 static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc) in intel_pmu_drain_large_pebs() argument
1529 if (cpuc->n_pebs == cpuc->n_large_pebs && in intel_pmu_drain_large_pebs()
1530 cpuc->n_pebs != cpuc->n_pebs_via_pt) in intel_pmu_drain_large_pebs()
1536 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_pebs_enable() local
1537 u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW; in intel_pmu_pebs_enable()
1539 struct debug_store *ds = cpuc->ds; in intel_pmu_pebs_enable()
1544 cpuc->pebs_enabled |= 1ULL << hwc->idx; in intel_pmu_pebs_enable()
1547 cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32); in intel_pmu_pebs_enable()
1549 cpuc->pebs_enabled |= 1ULL << 63; in intel_pmu_pebs_enable()
1553 if (pebs_data_cfg != cpuc->active_pebs_data_cfg) { in intel_pmu_pebs_enable()
1562 cpuc->active_pebs_data_cfg = pebs_data_cfg; in intel_pmu_pebs_enable()
1565 if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) { in intel_pmu_pebs_enable()
1566 cpuc->pebs_data_cfg = pebs_data_cfg; in intel_pmu_pebs_enable()
1567 pebs_update_threshold(cpuc); in intel_pmu_pebs_enable()
1593 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_pebs_del() local
1595 bool needed_cb = pebs_needs_sched_cb(cpuc); in intel_pmu_pebs_del()
1597 cpuc->n_pebs--; in intel_pmu_pebs_del()
1599 cpuc->n_large_pebs--; in intel_pmu_pebs_del()
1601 cpuc->n_pebs_via_pt--; in intel_pmu_pebs_del()
1603 pebs_update_state(needed_cb, cpuc, event, false); in intel_pmu_pebs_del()
1608 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_pebs_disable() local
1611 intel_pmu_drain_large_pebs(cpuc); in intel_pmu_pebs_disable()
1613 cpuc->pebs_enabled &= ~(1ULL << hwc->idx); in intel_pmu_pebs_disable()
1617 cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32)); in intel_pmu_pebs_disable()
1619 cpuc->pebs_enabled &= ~(1ULL << 63); in intel_pmu_pebs_disable()
1623 if (cpuc->enabled) in intel_pmu_pebs_disable()
1624 wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); in intel_pmu_pebs_disable()
1631 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_pebs_enable_all() local
1633 if (cpuc->pebs_enabled) in intel_pmu_pebs_enable_all()
1634 wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); in intel_pmu_pebs_enable_all()
1639 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_pebs_disable_all() local
1641 if (cpuc->pebs_enabled) in intel_pmu_pebs_disable_all()
1647 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_pebs_fixup_ip() local
1648 unsigned long from = cpuc->lbr_entries[0].from; in intel_pmu_pebs_fixup_ip()
1649 unsigned long old_to, to = cpuc->lbr_entries[0].to; in intel_pmu_pebs_fixup_ip()
1664 if (!cpuc->lbr_stack.nr || !from || !to) in intel_pmu_pebs_fixup_ip()
1822 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in setup_pebs_fixed_sample_data() local
1956 perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); in setup_pebs_fixed_sample_data()
2024 static inline void __setup_pebs_counter_group(struct cpu_hw_events *cpuc, in __setup_pebs_counter_group() argument
2032 intel_perf_event_update_pmc(cpuc->events[bit], *(u64 *)next_record); in __setup_pebs_counter_group()
2043 intel_perf_event_update_pmc(cpuc->events[bit + INTEL_PMC_IDX_FIXED], in __setup_pebs_counter_group()
2054 (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS], in __setup_pebs_counter_group()
2070 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in setup_pebs_adaptive_sample_data() local
2189 intel_pmu_lbr_save_brstack(data, cpuc, event); in setup_pebs_adaptive_sample_data()
2205 __setup_pebs_counter_group(cpuc, event, cntr, next_record); in setup_pebs_adaptive_sample_data()
2225 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in get_next_pebs_record_by_bit() local
2239 for (at = base; at < top; at += cpuc->pebs_record_size) { in get_next_pebs_record_by_bit()
2251 pebs_status = status & cpuc->pebs_enabled; in get_next_pebs_record_by_bit()
2411 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in __intel_pmu_pebs_events() local
2422 at += cpuc->pebs_record_size; in __intel_pmu_pebs_events()
2432 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_drain_pebs_core() local
2433 struct debug_store *ds = cpuc->ds; in intel_pmu_drain_pebs_core()
2434 struct perf_event *event = cpuc->events[0]; /* PMC0 only */ in intel_pmu_drain_pebs_core()
2449 if (!test_bit(0, cpuc->active_mask)) in intel_pmu_drain_pebs_core()
2468 static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, u64 mask) in intel_pmu_pebs_event_update_no_drain() argument
2470 u64 pebs_enabled = cpuc->pebs_enabled & mask; in intel_pmu_pebs_event_update_no_drain()
2482 event = cpuc->events[bit]; in intel_pmu_pebs_event_update_no_drain()
2490 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_drain_pebs_nhm() local
2491 struct debug_store *ds = cpuc->ds; in intel_pmu_drain_pebs_nhm()
2516 intel_pmu_pebs_event_update_no_drain(cpuc, mask); in intel_pmu_drain_pebs_nhm()
2524 pebs_status = p->status & cpuc->pebs_enabled; in intel_pmu_drain_pebs_nhm()
2543 if (!pebs_status && cpuc->pebs_enabled && in intel_pmu_drain_pebs_nhm()
2544 !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1))) in intel_pmu_drain_pebs_nhm()
2545 pebs_status = p->status = cpuc->pebs_enabled; in intel_pmu_drain_pebs_nhm()
2581 event = cpuc->events[bit]; in intel_pmu_drain_pebs_nhm()
2608 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_drain_pebs_icl() local
2609 struct debug_store *ds = cpuc->ds; in intel_pmu_drain_pebs_icl()
2626 mask = hybrid(cpuc->pmu, pebs_events_mask) | in intel_pmu_drain_pebs_icl()
2627 (hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED); in intel_pmu_drain_pebs_icl()
2630 intel_pmu_pebs_event_update_no_drain(cpuc, mask); in intel_pmu_drain_pebs_icl()
2642 if (basic->format_size != cpuc->pebs_record_size) in intel_pmu_drain_pebs_icl()
2645 pebs_status = basic->applicable_counters & cpuc->pebs_enabled & mask; in intel_pmu_drain_pebs_icl()
2647 event = cpuc->events[bit]; in intel_pmu_drain_pebs_icl()
2665 event = cpuc->events[bit]; in intel_pmu_drain_pebs_icl()