Lines Matching refs:cpuc
106 static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
123 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in __intel_pmu_lbr_enable() local
137 if (cpuc->lbr_sel) in __intel_pmu_lbr_enable()
138 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask; in __intel_pmu_lbr_enable()
139 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && !pmi && cpuc->lbr_sel) in __intel_pmu_lbr_enable()
192 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_lbr_reset() local
199 cpuc->last_task_ctx = NULL; in intel_pmu_lbr_reset()
200 cpuc->last_log_id = 0; in intel_pmu_lbr_reset()
201 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && cpuc->lbr_select) in intel_pmu_lbr_reset()
362 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_lbr_restore() local
385 if (cpuc->lbr_select) in intel_pmu_lbr_restore()
427 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in __intel_pmu_lbr_restore() local
440 if ((ctx == cpuc->last_task_ctx) && in __intel_pmu_lbr_restore()
441 (task_context_opt(ctx)->log_id == cpuc->last_log_id) && in __intel_pmu_lbr_restore()
454 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_lbr_save() local
471 if (cpuc->lbr_select) in intel_pmu_lbr_save()
504 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in __intel_pmu_lbr_save() local
515 cpuc->last_task_ctx = ctx; in __intel_pmu_lbr_save()
516 cpuc->last_log_id = ++task_context_opt(ctx)->log_id; in __intel_pmu_lbr_save()
544 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_lbr_sched_task() local
547 if (!cpuc->lbr_users) in intel_pmu_lbr_sched_task()
581 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_lbr_add() local
587 cpuc->lbr_select = 1; in intel_pmu_lbr_add()
589 cpuc->br_sel = event->hw.branch_reg.reg; in intel_pmu_lbr_add()
591 if (branch_user_callstack(cpuc->br_sel) && event->pmu_ctx->task_ctx_data) in intel_pmu_lbr_add()
614 cpuc->lbr_pebs_users++; in intel_pmu_lbr_add()
616 if (!cpuc->lbr_users++ && !event->total_time_running) in intel_pmu_lbr_add()
623 struct cpu_hw_events *cpuc; in release_lbr_buffers() local
630 cpuc = per_cpu_ptr(&cpu_hw_events, cpu); in release_lbr_buffers()
632 if (kmem_cache && cpuc->lbr_xsave) { in release_lbr_buffers()
633 kmem_cache_free(kmem_cache, cpuc->lbr_xsave); in release_lbr_buffers()
634 cpuc->lbr_xsave = NULL; in release_lbr_buffers()
642 struct cpu_hw_events *cpuc; in reserve_lbr_buffers() local
649 cpuc = per_cpu_ptr(&cpu_hw_events, cpu); in reserve_lbr_buffers()
651 if (!kmem_cache || cpuc->lbr_xsave) in reserve_lbr_buffers()
654 cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache, in reserve_lbr_buffers()
662 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_lbr_del() local
667 if (branch_user_callstack(cpuc->br_sel) && in intel_pmu_lbr_del()
672 cpuc->lbr_select = 0; in intel_pmu_lbr_del()
675 cpuc->lbr_pebs_users--; in intel_pmu_lbr_del()
676 cpuc->lbr_users--; in intel_pmu_lbr_del()
677 WARN_ON_ONCE(cpuc->lbr_users < 0); in intel_pmu_lbr_del()
678 WARN_ON_ONCE(cpuc->lbr_pebs_users < 0); in intel_pmu_lbr_del()
703 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in vlbr_exclude_host() local
706 (unsigned long *)&cpuc->intel_ctrl_guest_mask); in vlbr_exclude_host()
711 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_lbr_enable_all() local
713 if (cpuc->lbr_users && !vlbr_exclude_host()) in intel_pmu_lbr_enable_all()
719 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_lbr_disable_all() local
721 if (cpuc->lbr_users && !vlbr_exclude_host()) { in intel_pmu_lbr_disable_all()
729 void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) in intel_pmu_lbr_read_32() argument
732 struct perf_branch_entry *br = cpuc->lbr_entries; in intel_pmu_lbr_read_32()
754 cpuc->lbr_stack.nr = i; in intel_pmu_lbr_read_32()
755 cpuc->lbr_stack.hw_idx = tos; in intel_pmu_lbr_read_32()
763 void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) in intel_pmu_lbr_read_64() argument
767 struct perf_branch_entry *br = cpuc->lbr_entries; in intel_pmu_lbr_read_64()
773 if (cpuc->lbr_sel) { in intel_pmu_lbr_read_64()
774 need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO); in intel_pmu_lbr_read_64()
775 if (cpuc->lbr_sel->config & LBR_CALL_STACK) in intel_pmu_lbr_read_64()
849 cpuc->lbr_stack.nr = out; in intel_pmu_lbr_read_64()
850 cpuc->lbr_stack.hw_idx = tos; in intel_pmu_lbr_read_64()
891 static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc, in intel_pmu_store_lbr() argument
901 e = &cpuc->lbr_entries[i]; in intel_pmu_store_lbr()
933 cpuc->lbr_stack.nr = i; in intel_pmu_store_lbr()
940 static void intel_pmu_lbr_counters_reorder(struct cpu_hw_events *cpuc, in intel_pmu_lbr_counters_reorder() argument
959 for (i = 0; i < cpuc->lbr_stack.nr; i++) { in intel_pmu_lbr_counters_reorder()
960 src = cpuc->lbr_entries[i].reserved; in intel_pmu_lbr_counters_reorder()
966 cpuc->lbr_counters[i] = dst; in intel_pmu_lbr_counters_reorder()
967 cpuc->lbr_entries[i].reserved = 0; in intel_pmu_lbr_counters_reorder()
972 struct cpu_hw_events *cpuc, in intel_pmu_lbr_save_brstack() argument
976 intel_pmu_lbr_counters_reorder(cpuc, event); in intel_pmu_lbr_save_brstack()
977 perf_sample_save_brstack(data, event, &cpuc->lbr_stack, cpuc->lbr_counters); in intel_pmu_lbr_save_brstack()
981 perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); in intel_pmu_lbr_save_brstack()
984 static void intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc) in intel_pmu_arch_lbr_read() argument
986 intel_pmu_store_lbr(cpuc, NULL); in intel_pmu_arch_lbr_read()
989 static void intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events *cpuc) in intel_pmu_arch_lbr_read_xsave() argument
991 struct x86_perf_task_context_arch_lbr_xsave *xsave = cpuc->lbr_xsave; in intel_pmu_arch_lbr_read_xsave()
994 intel_pmu_store_lbr(cpuc, NULL); in intel_pmu_arch_lbr_read_xsave()
999 intel_pmu_store_lbr(cpuc, xsave->lbr.entries); in intel_pmu_arch_lbr_read_xsave()
1004 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_lbr_read() local
1012 if (!cpuc->lbr_users || vlbr_exclude_host() || in intel_pmu_lbr_read()
1013 cpuc->lbr_users == cpuc->lbr_pebs_users) in intel_pmu_lbr_read()
1016 x86_pmu.lbr_read(cpuc); in intel_pmu_lbr_read()
1018 intel_pmu_lbr_filter(cpuc); in intel_pmu_lbr_read()
1201 intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) in intel_pmu_lbr_filter() argument
1204 int br_sel = cpuc->br_sel; in intel_pmu_lbr_filter()
1213 for (i = 0; i < cpuc->lbr_stack.nr; i++) { in intel_pmu_lbr_filter()
1215 from = cpuc->lbr_entries[i].from; in intel_pmu_lbr_filter()
1216 to = cpuc->lbr_entries[i].to; in intel_pmu_lbr_filter()
1217 type = cpuc->lbr_entries[i].type; in intel_pmu_lbr_filter()
1229 type = branch_type(from, to, cpuc->lbr_entries[i].abort); in intel_pmu_lbr_filter()
1231 if (cpuc->lbr_entries[i].in_tx) in intel_pmu_lbr_filter()
1239 cpuc->lbr_entries[i].from = 0; in intel_pmu_lbr_filter()
1244 cpuc->lbr_entries[i].type = common_branch_type(type); in intel_pmu_lbr_filter()
1251 for (i = 0; i < cpuc->lbr_stack.nr; ) { in intel_pmu_lbr_filter()
1252 if (!cpuc->lbr_entries[i].from) { in intel_pmu_lbr_filter()
1254 while (++j < cpuc->lbr_stack.nr) { in intel_pmu_lbr_filter()
1255 cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j]; in intel_pmu_lbr_filter()
1256 cpuc->lbr_counters[j-1] = cpuc->lbr_counters[j]; in intel_pmu_lbr_filter()
1258 cpuc->lbr_stack.nr--; in intel_pmu_lbr_filter()
1259 if (!cpuc->lbr_entries[i].from) in intel_pmu_lbr_filter()
1268 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_store_pebs_lbrs() local
1272 (cpuc->n_pebs == cpuc->n_large_pebs)) in intel_pmu_store_pebs_lbrs()
1273 cpuc->lbr_stack.hw_idx = -1ULL; in intel_pmu_store_pebs_lbrs()
1275 cpuc->lbr_stack.hw_idx = intel_pmu_lbr_tos(); in intel_pmu_store_pebs_lbrs()
1277 intel_pmu_store_lbr(cpuc, lbr); in intel_pmu_store_pebs_lbrs()
1278 intel_pmu_lbr_filter(cpuc); in intel_pmu_store_pebs_lbrs()