Lines Matching refs:cpuc

694 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);  in x86_pmu_disable_all()  local
698 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; in x86_pmu_disable_all()
701 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_disable_all()
734 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_disable() local
739 if (!cpuc->enabled) in x86_pmu_disable()
742 cpuc->n_added = 0; in x86_pmu_disable()
743 cpuc->enabled = 0; in x86_pmu_disable()
751 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_enable_all() local
755 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; in x86_pmu_enable_all()
757 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_enable_all()
782 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); in x86_get_pmu() local
788 if (WARN_ON_ONCE(!cpuc->pmu)) in x86_get_pmu()
791 return cpuc->pmu; in x86_get_pmu()
992 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) in x86_schedule_events() argument
1007 n0 = cpuc->n_events; in x86_schedule_events()
1008 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) in x86_schedule_events()
1009 n0 -= cpuc->n_txn; in x86_schedule_events()
1011 static_call_cond(x86_pmu_start_scheduling)(cpuc); in x86_schedule_events()
1014 c = cpuc->event_constraint[i]; in x86_schedule_events()
1028 c = static_call(x86_pmu_get_event_constraints)(cpuc, i, cpuc->event_list[i]); in x86_schedule_events()
1029 cpuc->event_constraint[i] = c; in x86_schedule_events()
1042 hwc = &cpuc->event_list[i]->hw; in x86_schedule_events()
1043 c = cpuc->event_constraint[i]; in x86_schedule_events()
1069 int gpmax = x86_pmu_max_num_counters(cpuc->pmu); in x86_schedule_events()
1081 if (is_ht_workaround_enabled() && !cpuc->is_fake && in x86_schedule_events()
1082 READ_ONCE(cpuc->excl_cntrs->exclusive_present)) in x86_schedule_events()
1090 gpmax -= cpuc->n_pair; in x86_schedule_events()
1094 unsched = perf_assign_events(cpuc->event_constraint, n, wmin, in x86_schedule_events()
1110 static_call_cond(x86_pmu_commit_scheduling)(cpuc, i, assign[i]); in x86_schedule_events()
1113 e = cpuc->event_list[i]; in x86_schedule_events()
1118 static_call_cond(x86_pmu_put_event_constraints)(cpuc, e); in x86_schedule_events()
1120 cpuc->event_constraint[i] = NULL; in x86_schedule_events()
1124 static_call_cond(x86_pmu_stop_scheduling)(cpuc); in x86_schedule_events()
1129 static int add_nr_metric_event(struct cpu_hw_events *cpuc, in add_nr_metric_event() argument
1133 if (cpuc->n_metric == INTEL_TD_METRIC_NUM) in add_nr_metric_event()
1135 cpuc->n_metric++; in add_nr_metric_event()
1136 cpuc->n_txn_metric++; in add_nr_metric_event()
1142 static void del_nr_metric_event(struct cpu_hw_events *cpuc, in del_nr_metric_event() argument
1146 cpuc->n_metric--; in del_nr_metric_event()
1149 static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event, in collect_event() argument
1152 union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap); in collect_event()
1154 if (intel_cap.perf_metrics && add_nr_metric_event(cpuc, event)) in collect_event()
1157 if (n >= max_count + cpuc->n_metric) in collect_event()
1160 cpuc->event_list[n] = event; in collect_event()
1162 cpuc->n_pair++; in collect_event()
1163 cpuc->n_txn_pair++; in collect_event()
1173 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp) in collect_events() argument
1178 max_count = x86_pmu_num_counters(cpuc->pmu) + x86_pmu_num_counters_fixed(cpuc->pmu); in collect_events()
1181 n = cpuc->n_events; in collect_events()
1182 if (!cpuc->n_events) in collect_events()
1183 cpuc->pebs_output = 0; in collect_events()
1185 if (!cpuc->is_fake && leader->attr.precise_ip) { in collect_events()
1197 if (cpuc->pebs_output && in collect_events()
1198 cpuc->pebs_output != is_pebs_pt(leader) + 1) in collect_events()
1201 cpuc->pebs_output = is_pebs_pt(leader) + 1; in collect_events()
1205 if (collect_event(cpuc, leader, max_count, n)) in collect_events()
1217 if (collect_event(cpuc, event, max_count, n)) in collect_events()
1226 struct cpu_hw_events *cpuc, int i) in x86_assign_hw_event() argument
1231 idx = hwc->idx = cpuc->assign[i]; in x86_assign_hw_event()
1233 hwc->last_tag = ++cpuc->tags[i]; in x86_assign_hw_event()
1285 struct cpu_hw_events *cpuc, in match_prev_assignment() argument
1288 return hwc->idx == cpuc->assign[i] && in match_prev_assignment()
1290 hwc->last_tag == cpuc->tags[i]; in match_prev_assignment()
1297 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_enable() local
1300 int i, added = cpuc->n_added; in x86_pmu_enable()
1305 if (cpuc->enabled) in x86_pmu_enable()
1308 if (cpuc->n_added) { in x86_pmu_enable()
1309 int n_running = cpuc->n_events - cpuc->n_added; in x86_pmu_enable()
1326 event = cpuc->event_list[i]; in x86_pmu_enable()
1336 match_prev_assignment(hwc, cpuc, i)) in x86_pmu_enable()
1352 for (i = 0; i < cpuc->n_events; i++) { in x86_pmu_enable()
1353 event = cpuc->event_list[i]; in x86_pmu_enable()
1356 if (!match_prev_assignment(hwc, cpuc, i)) in x86_pmu_enable()
1357 x86_assign_hw_event(event, cpuc, i); in x86_pmu_enable()
1370 cpuc->n_added = 0; in x86_pmu_enable()
1374 cpuc->enabled = 1; in x86_pmu_enable()
1460 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_add() local
1467 n0 = cpuc->n_events; in x86_pmu_add()
1468 ret = n = collect_events(cpuc, event, false); in x86_pmu_add()
1484 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) in x86_pmu_add()
1487 ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign); in x86_pmu_add()
1494 memcpy(cpuc->assign, assign, n*sizeof(int)); in x86_pmu_add()
1501 cpuc->n_events = n; in x86_pmu_add()
1502 cpuc->n_added += n - n0; in x86_pmu_add()
1503 cpuc->n_txn += n - n0; in x86_pmu_add()
1518 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_start() local
1534 cpuc->events[idx] = event; in x86_pmu_start()
1535 __set_bit(idx, cpuc->active_mask); in x86_pmu_start()
1545 struct cpu_hw_events *cpuc; in perf_event_print_debug() local
1552 cpuc = &per_cpu(cpu_hw_events, cpu); in perf_event_print_debug()
1553 cntr_mask = hybrid(cpuc->pmu, cntr_mask); in perf_event_print_debug()
1554 fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask); in perf_event_print_debug()
1555 pebs_constraints = hybrid(cpuc->pmu, pebs_constraints); in perf_event_print_debug()
1580 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); in perf_event_print_debug()
1596 if (fixed_counter_disabled(idx, cpuc->pmu)) in perf_event_print_debug()
1607 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_stop() local
1610 if (test_bit(hwc->idx, cpuc->active_mask)) { in x86_pmu_stop()
1612 __clear_bit(hwc->idx, cpuc->active_mask); in x86_pmu_stop()
1613 cpuc->events[hwc->idx] = NULL; in x86_pmu_stop()
1630 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_del() local
1631 union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap); in x86_pmu_del()
1642 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) in x86_pmu_del()
1645 __set_bit(event->hw.idx, cpuc->dirty); in x86_pmu_del()
1652 for (i = 0; i < cpuc->n_events; i++) { in x86_pmu_del()
1653 if (event == cpuc->event_list[i]) in x86_pmu_del()
1657 if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */ in x86_pmu_del()
1661 if (i >= cpuc->n_events - cpuc->n_added) in x86_pmu_del()
1662 --cpuc->n_added; in x86_pmu_del()
1664 static_call_cond(x86_pmu_put_event_constraints)(cpuc, event); in x86_pmu_del()
1667 while (++i < cpuc->n_events) { in x86_pmu_del()
1668 cpuc->event_list[i-1] = cpuc->event_list[i]; in x86_pmu_del()
1669 cpuc->event_constraint[i-1] = cpuc->event_constraint[i]; in x86_pmu_del()
1670 cpuc->assign[i-1] = cpuc->assign[i]; in x86_pmu_del()
1672 cpuc->event_constraint[i-1] = NULL; in x86_pmu_del()
1673 --cpuc->n_events; in x86_pmu_del()
1675 del_nr_metric_event(cpuc, event); in x86_pmu_del()
1691 struct cpu_hw_events *cpuc; in x86_pmu_handle_irq() local
1697 cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_handle_irq()
1710 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_handle_irq()
1713 event = cpuc->events[idx]; in x86_pmu_handle_irq()
1730 perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); in x86_pmu_handle_irq()
1781 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); in x86_pmu_prepare_cpu() local
1785 cpuc->kfree_on_online[i] = NULL; in x86_pmu_prepare_cpu()
1800 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); in x86_pmu_online_cpu() local
1804 kfree(cpuc->kfree_on_online[i]); in x86_pmu_online_cpu()
1805 cpuc->kfree_on_online[i] = NULL; in x86_pmu_online_cpu()
2245 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_start_txn() local
2247 WARN_ON_ONCE(cpuc->txn_flags); /* txn already in flight */ in x86_pmu_start_txn()
2249 cpuc->txn_flags = txn_flags; in x86_pmu_start_txn()
2267 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_cancel_txn() local
2269 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */ in x86_pmu_cancel_txn()
2271 txn_flags = cpuc->txn_flags; in x86_pmu_cancel_txn()
2272 cpuc->txn_flags = 0; in x86_pmu_cancel_txn()
2296 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_commit_txn() local
2300 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */ in x86_pmu_commit_txn()
2302 if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) { in x86_pmu_commit_txn()
2303 cpuc->txn_flags = 0; in x86_pmu_commit_txn()
2307 n = cpuc->n_events; in x86_pmu_commit_txn()
2312 ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign); in x86_pmu_commit_txn()
2320 memcpy(cpuc->assign, assign, n*sizeof(int)); in x86_pmu_commit_txn()
2322 cpuc->txn_flags = 0; in x86_pmu_commit_txn()
2334 static void free_fake_cpuc(struct cpu_hw_events *cpuc) in free_fake_cpuc() argument
2336 intel_cpuc_finish(cpuc); in free_fake_cpuc()
2337 kfree(cpuc); in free_fake_cpuc()
2342 struct cpu_hw_events *cpuc; in allocate_fake_cpuc() local
2345 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); in allocate_fake_cpuc()
2346 if (!cpuc) in allocate_fake_cpuc()
2348 cpuc->is_fake = 1; in allocate_fake_cpuc()
2359 cpuc->pmu = event_pmu; in allocate_fake_cpuc()
2361 if (intel_cpuc_prepare(cpuc, cpu)) in allocate_fake_cpuc()
2364 return cpuc; in allocate_fake_cpuc()
2366 free_fake_cpuc(cpuc); in allocate_fake_cpuc()
2497 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in perf_clear_dirty_counters() local
2501 for (i = 0; i < cpuc->n_events; i++) in perf_clear_dirty_counters()
2502 __clear_bit(cpuc->assign[i], cpuc->dirty); in perf_clear_dirty_counters()
2504 if (bitmap_empty(cpuc->dirty, X86_PMC_IDX_MAX)) in perf_clear_dirty_counters()
2507 for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) { in perf_clear_dirty_counters()
2510 if (!test_bit(i - INTEL_PMC_IDX_FIXED, hybrid(cpuc->pmu, fixed_cntr_mask))) in perf_clear_dirty_counters()
2519 bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX); in perf_clear_dirty_counters()