/linux/arch/alpha/kernel/ |
H A D | perf_event.c | 391 static void maybe_change_configuration(struct cpu_hw_events *cpuc) in maybe_change_configuration() argument 395 if (cpuc->n_added == 0) in maybe_change_configuration() 399 for (j = 0; j < cpuc->n_events; j++) { in maybe_change_configuration() 400 struct perf_event *pe = cpuc->event[j]; in maybe_change_configuration() 402 if (cpuc->current_idx[j] != PMC_NO_INDEX && in maybe_change_configuration() 403 cpuc->current_idx[j] != pe->hw.idx) { in maybe_change_configuration() 404 alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0); in maybe_change_configuration() 405 cpuc->current_idx[j] = PMC_NO_INDEX; in maybe_change_configuration() 410 cpuc->idx_mask = 0; in maybe_change_configuration() 411 for (j = 0; j < cpuc->n_events; j++) { in maybe_change_configuration() [all …]
|
/linux/arch/x86/events/intel/ |
H A D | lbr.c | 106 static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc); 123 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in __intel_pmu_lbr_enable() local 137 if (cpuc->lbr_sel) in __intel_pmu_lbr_enable() 138 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask; in __intel_pmu_lbr_enable() 139 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && !pmi && cpuc->lbr_sel) in __intel_pmu_lbr_enable() 192 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_lbr_reset() local 199 cpuc->last_task_ctx = NULL; in intel_pmu_lbr_reset() 200 cpuc->last_log_id = 0; in intel_pmu_lbr_reset() 201 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && cpuc->lbr_select) in intel_pmu_lbr_reset() 362 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_lbr_restore() local [all …]
|
H A D | ds.c | 850 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_disable_bts() local 853 if (!cpuc->ds) in intel_pmu_disable_bts() 867 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_drain_bts_buffer() local 868 struct debug_store *ds = cpuc->ds; in intel_pmu_drain_bts_buffer() 874 struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; in intel_pmu_drain_bts_buffer() 1256 static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc) in pebs_needs_sched_cb() argument 1258 if (cpuc->n_pebs == cpuc->n_pebs_via_pt) in pebs_needs_sched_cb() 1261 return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs); in pebs_needs_sched_cb() 1266 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_pebs_sched_task() local 1268 if (!sched_in && pebs_needs_sched_cb(cpuc)) in intel_pmu_pebs_sched_task() [all …]
|
H A D | core.c | 2282 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in __intel_pmu_disable_all() local 2286 if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) in __intel_pmu_disable_all() 2299 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in __intel_pmu_enable_all() local 2300 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); in __intel_pmu_enable_all() 2304 if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) { in __intel_pmu_enable_all() 2305 wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val); in __intel_pmu_enable_all() 2306 cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val; in __intel_pmu_enable_all() 2310 intel_ctrl & ~cpuc->intel_ctrl_guest_mask); in __intel_pmu_enable_all() 2312 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { in __intel_pmu_enable_all() 2314 cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; in __intel_pmu_enable_all() [all …]
|
H A D | bts.c | 262 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in bts_event_start() local 273 bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base; in bts_event_start() 274 bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum; in bts_event_start() 275 bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold; in bts_event_start() 307 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in bts_event_stop() local 332 cpuc->ds->bts_index = bts->ds_back.bts_buffer_base; in bts_event_stop() 333 cpuc->ds->bts_buffer_base = bts->ds_back.bts_buffer_base; in bts_event_stop() 334 cpuc->ds->bts_absolute_maximum = bts->ds_back.bts_absolute_maximum; in bts_event_stop() 335 cpuc->ds->bts_interrupt_threshold = bts->ds_back.bts_interrupt_threshold; in bts_event_stop() 522 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in bts_event_add() local [all …]
|
H A D | knc.c | 216 struct cpu_hw_events *cpuc; in knc_pmu_handle_irq() local 221 cpuc = this_cpu_ptr(&cpu_hw_events); in knc_pmu_handle_irq() 243 struct perf_event *event = cpuc->events[bit]; in knc_pmu_handle_irq() 247 if (!test_bit(bit, cpuc->active_mask)) in knc_pmu_handle_irq() 268 if (cpuc->enabled) in knc_pmu_handle_irq()
|
H A D | p4.c | 919 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in p4_pmu_disable_all() local 923 struct perf_event *event = cpuc->events[idx]; in p4_pmu_disable_all() 924 if (!test_bit(idx, cpuc->active_mask)) in p4_pmu_disable_all() 998 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in p4_pmu_enable_all() local 1002 struct perf_event *event = cpuc->events[idx]; in p4_pmu_enable_all() 1003 if (!test_bit(idx, cpuc->active_mask)) in p4_pmu_enable_all() 1035 struct cpu_hw_events *cpuc; in p4_pmu_handle_irq() local 1041 cpuc = this_cpu_ptr(&cpu_hw_events); in p4_pmu_handle_irq() 1046 if (!test_bit(idx, cpuc->active_mask)) { in p4_pmu_handle_irq() 1053 event = cpuc->events[idx]; in p4_pmu_handle_irq() [all …]
|
/linux/arch/x86/events/amd/ |
H A D | lbr.c | 99 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in amd_pmu_lbr_filter() local 100 int br_sel = cpuc->br_sel, offset, type, i, j; in amd_pmu_lbr_filter() 110 for (i = 0; i < cpuc->lbr_stack.nr; i++) { in amd_pmu_lbr_filter() 111 from = cpuc->lbr_entries[i].from; in amd_pmu_lbr_filter() 112 to = cpuc->lbr_entries[i].to; in amd_pmu_lbr_filter() 121 cpuc->lbr_entries[i].from += offset; in amd_pmu_lbr_filter() 128 cpuc->lbr_entries[i].from = 0; /* mark invalid */ in amd_pmu_lbr_filter() 133 cpuc->lbr_entries[i].type = common_branch_type(type); in amd_pmu_lbr_filter() 140 for (i = 0; i < cpuc->lbr_stack.nr; ) { in amd_pmu_lbr_filter() 141 if (!cpuc->lbr_entries[i].from) { in amd_pmu_lbr_filter() [all …]
|
H A D | brs.c | 205 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in amd_brs_enable() local 209 if (++cpuc->brs_active > 1) in amd_brs_enable() 221 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in amd_brs_enable_all() local 222 if (cpuc->lbr_users) in amd_brs_enable_all() 228 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in amd_brs_disable() local 232 if (!cpuc->brs_active) in amd_brs_disable() 236 if (--cpuc->brs_active) in amd_brs_disable() 257 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in amd_brs_disable_all() local 258 if (cpuc->lbr_users) in amd_brs_disable_all() 283 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in amd_brs_drain() local [all …]
|
/linux/arch/x86/events/ |
H A D | core.c | 686 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_disable_all() local 690 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; in x86_pmu_disable_all() 693 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_disable_all() 726 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_disable() local 731 if (!cpuc->enabled) in x86_pmu_disable() 734 cpuc->n_added = 0; in x86_pmu_disable() 735 cpuc->enabled = 0; in x86_pmu_disable() 743 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_enable_all() local 747 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; in x86_pmu_enable_all() 749 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_enable_all() [all …]
|
/linux/arch/sparc/kernel/ |
H A D | perf_event.c | 827 static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, in… in sparc_pmu_enable_event() argument 835 enc = perf_event_get_enc(cpuc->events[idx]); in sparc_pmu_enable_event() 837 val = cpuc->pcr[pcr_index]; in sparc_pmu_enable_event() 840 cpuc->pcr[pcr_index] = val; in sparc_pmu_enable_event() 842 pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]); in sparc_pmu_enable_event() 845 static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, i… in sparc_pmu_disable_event() argument 855 val = cpuc->pcr[pcr_index]; in sparc_pmu_disable_event() 858 cpuc->pcr[pcr_index] = val; in sparc_pmu_disable_event() 860 pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]); in sparc_pmu_disable_event() 923 static void read_in_all_counters(struct cpu_hw_events *cpuc) in read_in_all_counters() argument [all …]
|
/linux/tools/sched_ext/ |
H A D | scx_qmap.bpf.c | 368 struct cpu_ctx *cpuc; in BPF_STRUCT_OPS() local 394 if (!(cpuc = bpf_map_lookup_elem(&cpu_ctx_stor, &zero))) { in BPF_STRUCT_OPS() 401 if (!cpuc->dsp_cnt) { in BPF_STRUCT_OPS() 402 cpuc->dsp_idx = (cpuc->dsp_idx + 1) % 5; in BPF_STRUCT_OPS() 403 cpuc->dsp_cnt = 1 << cpuc->dsp_idx; in BPF_STRUCT_OPS() 406 fifo = bpf_map_lookup_elem(&queue_arr, &cpuc->dsp_idx); in BPF_STRUCT_OPS() 408 scx_bpf_error("failed to find ring %llu", cpuc->dsp_idx); in BPF_STRUCT_OPS() 438 cpuc->dsp_cnt--; in BPF_STRUCT_OPS() 445 if (!cpuc->dsp_cnt) in BPF_STRUCT_OPS() 449 cpuc->dsp_cnt = 0; in BPF_STRUCT_OPS() [all …]
|
H A D | scx_flatcg.bpf.c | 157 struct fcg_cpu_ctx *cpuc; in find_cpu_ctx() local 160 cpuc = bpf_map_lookup_elem(&cpu_ctx, &idx); in find_cpu_ctx() 161 if (!cpuc) { in find_cpu_ctx() 165 return cpuc; in find_cpu_ctx() 734 struct fcg_cpu_ctx *cpuc; in BPF_STRUCT_OPS() local 740 cpuc = find_cpu_ctx(); in BPF_STRUCT_OPS() 741 if (!cpuc) in BPF_STRUCT_OPS() 744 if (!cpuc->cur_cgid) in BPF_STRUCT_OPS() 747 if (vtime_before(now, cpuc->cur_at + cgrp_slice_ns)) { in BPF_STRUCT_OPS() 748 if (scx_bpf_dsq_move_to_local(cpuc->cur_cgid)) { in BPF_STRUCT_OPS() [all …]
|
/linux/arch/loongarch/kernel/ |
H A D | perf_event.c | 253 static int loongarch_pmu_alloc_counter(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) in loongarch_pmu_alloc_counter() argument 258 if (!test_and_set_bit(i, cpuc->used_mask)) in loongarch_pmu_alloc_counter() 269 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in loongarch_pmu_enable_event() local 274 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base) | in loongarch_pmu_enable_event() 288 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in loongarch_pmu_disable_event() local 293 cpuc->saved_ctrl[idx] = loongarch_pmu_read_control(idx) & in loongarch_pmu_disable_event() 295 loongarch_pmu_write_control(idx, cpuc->saved_ctrl[idx]); in loongarch_pmu_disable_event() 388 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in loongarch_pmu_add() local 394 idx = loongarch_pmu_alloc_counter(cpuc, hwc); in loongarch_pmu_add() 406 cpuc->events[idx] = event; in loongarch_pmu_add() [all …]
|
/linux/arch/sh/kernel/ |
H A D | perf_event.c | 201 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in sh_pmu_stop() local 207 cpuc->events[idx] = NULL; in sh_pmu_stop() 219 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in sh_pmu_start() local 229 cpuc->events[idx] = event; in sh_pmu_start() 236 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in sh_pmu_del() local 239 __clear_bit(event->hw.idx, cpuc->used_mask); in sh_pmu_del() 246 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in sh_pmu_add() local 253 if (__test_and_set_bit(idx, cpuc->used_mask)) { in sh_pmu_add() 254 idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); in sh_pmu_add() 258 __set_bit(idx, cpuc->used_mask); in sh_pmu_add()
|
/linux/drivers/perf/ |
H A D | arm_xscale_pmu.c | 149 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); in xscale1pmu_handle_irq() local 174 struct perf_event *event = cpuc->events[idx]; in xscale1pmu_handle_irq() 267 xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, in xscale1pmu_get_event_idx() argument 272 if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) in xscale1pmu_get_event_idx() 277 if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) in xscale1pmu_get_event_idx() 280 if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) in xscale1pmu_get_event_idx() 287 static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc, in xscalepmu_clear_event_idx() argument 290 clear_bit(event->hw.idx, cpuc->used_mask); in xscalepmu_clear_event_idx() 488 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); in xscale2pmu_handle_irq() local 507 struct perf_event *event = cpuc->events[idx]; in xscale2pmu_handle_irq() [all …]
|
H A D | riscv_pmu.c | 260 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in riscv_pmu_add() local 269 cpuc->events[idx] = event; in riscv_pmu_add() 270 cpuc->n_events++; in riscv_pmu_add() 284 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in riscv_pmu_del() local 288 cpuc->events[hwc->idx] = NULL; in riscv_pmu_del() 292 cpuc->n_events--; in riscv_pmu_del() 390 struct cpu_hw_events *cpuc; in riscv_pmu_alloc() local 403 cpuc = per_cpu_ptr(pmu->hw_events, cpuid); in riscv_pmu_alloc() 404 cpuc->n_events = 0; in riscv_pmu_alloc() 406 cpuc->events[i] = NULL; in riscv_pmu_alloc() [all …]
|
H A D | arm_v6_pmu.c | 242 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); in armv6pmu_handle_irq() local 259 struct perf_event *event = cpuc->events[idx]; in armv6pmu_handle_irq() 314 armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, in armv6pmu_get_event_idx() argument 320 if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) in armv6pmu_get_event_idx() 329 if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) in armv6pmu_get_event_idx() 332 if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) in armv6pmu_get_event_idx() 340 static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc, in armv6pmu_clear_event_idx() argument 343 clear_bit(event->hw.idx, cpuc->used_mask); in armv6pmu_clear_event_idx()
|
H A D | arm_pmuv3.c | 771 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); in armv8pmu_enable_user_access() local 775 for_each_set_bit(i, cpuc->used_mask, ARMPMU_MAX_HWEVENTS) { in armv8pmu_enable_user_access() 776 if (armv8pmu_event_has_user_read(cpuc->events[i])) in armv8pmu_enable_user_access() 782 for_each_andnot_bit(i, cpu_pmu->cntr_mask, cpuc->used_mask, in armv8pmu_enable_user_access() 844 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); in armv8pmu_handle_irq() local 870 struct perf_event *event = cpuc->events[idx]; in armv8pmu_handle_irq() 903 static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc, in armv8pmu_get_single_idx() argument 909 if (!test_and_set_bit(idx, cpuc->used_mask)) in armv8pmu_get_single_idx() 915 static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc, in armv8pmu_get_chain_idx() argument 927 if (!test_and_set_bit(idx, cpuc->used_mask)) { in armv8pmu_get_chain_idx() [all …]
|
H A D | arm_v7_pmu.c | 920 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); in armv7pmu_handle_irq() local 941 struct perf_event *event = cpuc->events[idx]; in armv7pmu_handle_irq() 989 static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, in armv7pmu_get_event_idx() argument 999 if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask)) in armv7pmu_get_event_idx() 1010 if (!test_and_set_bit(idx, cpuc->used_mask)) in armv7pmu_get_event_idx() 1018 static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc, in armv7pmu_clear_event_idx() argument 1021 clear_bit(event->hw.idx, cpuc->used_mask); in armv7pmu_clear_event_idx() 1555 static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc, in krait_pmu_get_event_idx() argument 1575 if (test_and_set_bit(bit, cpuc->used_mask)) in krait_pmu_get_event_idx() 1579 idx = armv7pmu_get_event_idx(cpuc, event); in krait_pmu_get_event_idx() [all …]
|
H A D | apple_m1_cpu_pmu.c | 416 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); in m1_pmu_handle_irq() local 436 struct perf_event *event = cpuc->events[idx]; in m1_pmu_handle_irq() 467 static int m1_pmu_get_event_idx(struct pmu_hw_events *cpuc, in m1_pmu_get_event_idx() argument 483 if (!test_and_set_bit(idx, cpuc->used_mask)) in m1_pmu_get_event_idx() 490 static void m1_pmu_clear_event_idx(struct pmu_hw_events *cpuc, in m1_pmu_clear_event_idx() argument 493 clear_bit(event->hw.idx, cpuc->used_mask); in m1_pmu_clear_event_idx()
|
H A D | riscv_pmu_sbi.c | 406 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in pmu_sbi_ctr_get_idx() local 450 if (!test_and_set_bit(idx, cpuc->used_fw_ctrs)) in pmu_sbi_ctr_get_idx() 453 if (!test_and_set_bit(idx, cpuc->used_hw_ctrs)) in pmu_sbi_ctr_get_idx() 465 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in pmu_sbi_ctr_clear_idx() local 469 clear_bit(idx, cpuc->used_fw_ctrs); in pmu_sbi_ctr_clear_idx() 471 clear_bit(idx, cpuc->used_hw_ctrs); in pmu_sbi_ctr_clear_idx() 1154 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in riscv_pm_pmu_notify() local 1155 int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS); in riscv_pm_pmu_notify() 1163 event = cpuc->events[idx]; in riscv_pm_pmu_notify()
|
/linux/arch/mips/kernel/ |
H A D | perf_event_mipsxx.c | 314 static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, in mipsxx_pmu_alloc_counter() argument 341 !test_and_set_bit(i, cpuc->used_mask)) in mipsxx_pmu_alloc_counter() 351 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in mipsxx_pmu_enable_event() local 357 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0x3ff) | in mipsxx_pmu_enable_event() 362 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) | in mipsxx_pmu_enable_event() 369 cpuc->saved_ctrl[idx] |= in mipsxx_pmu_enable_event() 374 cpuc->saved_ctrl[idx] |= M_TC_EN_ALL; in mipsxx_pmu_enable_event() 387 cpuc->saved_ctrl[idx] |= ctrl; in mipsxx_pmu_enable_event() 397 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in mipsxx_pmu_disable_event() local 403 cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) & in mipsxx_pmu_disable_event() [all …]
|
/linux/arch/x86/events/zhaoxin/ |
H A D | core.c | 357 struct cpu_hw_events *cpuc; in zhaoxin_pmu_handle_irq() local 362 cpuc = this_cpu_ptr(&cpu_hw_events); in zhaoxin_pmu_handle_irq() 387 struct perf_event *event = cpuc->events[bit]; in zhaoxin_pmu_handle_irq() 391 if (!test_bit(bit, cpuc->active_mask)) in zhaoxin_pmu_handle_irq() 422 zhaoxin_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in zhaoxin_get_event_constraints() argument
|
/linux/arch/csky/kernel/ |
H A D | perf_event.c | 1106 struct pmu_hw_events *cpuc = this_cpu_ptr(csky_pmu.hw_events); in csky_pmu_handle_irq() local 1124 struct perf_event *event = cpuc->events[idx]; in csky_pmu_handle_irq()
|