Home
last modified time | relevance | path

Searched refs:lbr_nr (Results 1 – 9 of 9) sorted by relevance

/linux/arch/x86/events/intel/
H A Dlbr.c168 for (i = 0; i < x86_pmu.lbr_nr; i++) in intel_pmu_lbr_reset_32()
176 for (i = 0; i < x86_pmu.lbr_nr; i++) { in intel_pmu_lbr_reset_64()
187 wrmsrl(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr); in intel_pmu_arch_lbr_reset()
194 if (!x86_pmu.lbr_nr) in intel_pmu_lbr_reset()
369 mask = x86_pmu.lbr_nr - 1; in intel_pmu_lbr_restore()
375 for (; i < x86_pmu.lbr_nr; i++) { in intel_pmu_lbr_restore()
396 if (!entries[x86_pmu.lbr_nr - 1].from) in intel_pmu_arch_lbr_restore()
399 for (i = 0; i < x86_pmu.lbr_nr; i++) { in intel_pmu_arch_lbr_restore()
461 mask = x86_pmu.lbr_nr - 1; in intel_pmu_lbr_save()
463 for (i = 0; i < x86_pmu.lbr_nr; i++) { in intel_pmu_lbr_save()
[all …]
H A Dcore.c2336 cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr); in __intel_pmu_snapshot_branch_stack()
2969 if (x86_pmu.lbr_nr) { in intel_pmu_reset()
5419 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr; in is_lbr_from()
5760 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr); in branches_show()
5795 return x86_pmu.lbr_nr ? attr->mode : 0; in lbr_is_visible()
7186 x86_pmu.lbr_nr = 0; in intel_pmu_init()
7187 for (i = 0; i < x86_pmu.lbr_nr; i++) { in intel_pmu_init()
7190 x86_pmu.lbr_nr = 0; in intel_pmu_init()
7193 if (x86_pmu.lbr_nr) { in intel_pmu_init()
7196 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); in intel_pmu_init()
H A Dds.c1289 sz += x86_pmu.lbr_nr * sizeof(struct lbr_entry); in adaptive_pebs_record_size_update()
1340 ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT); in pebs_update_adaptive_cfg()
/linux/arch/x86/events/amd/
H A Dlbr.c170 for (i = 0; i < x86_pmu.lbr_nr; i++) { in amd_pmu_lbr_read()
253 if (!x86_pmu.lbr_nr) in amd_pmu_lbr_setup_filter()
325 if (!x86_pmu.lbr_nr) in amd_pmu_lbr_reset()
329 for (i = 0; i < x86_pmu.lbr_nr; i++) { in amd_pmu_lbr_reset()
344 if (!x86_pmu.lbr_nr) in amd_pmu_lbr_add()
363 if (!x86_pmu.lbr_nr) in amd_pmu_lbr_del()
392 if (!cpuc->lbr_users || !x86_pmu.lbr_nr) in amd_pmu_lbr_enable_all()
414 if (!cpuc->lbr_users || !x86_pmu.lbr_nr) in amd_pmu_lbr_disable_all()
429 x86_pmu.lbr_nr = ebx.split.lbr_v2_stack_sz; in amd_pmu_lbr_init()
431 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); in amd_pmu_lbr_init()
H A Dbrs.c62 x86_pmu.lbr_nr = 16; in amd_brs_detect()
86 if (!x86_pmu.lbr_nr) in amd_brs_setup_filter()
148 if (event->attr.sample_period <= x86_pmu.lbr_nr) in amd_brs_hw_config()
170 return (cfg->msroff ? cfg->msroff : x86_pmu.lbr_nr) - 1; in amd_brs_get_tos()
198 pr_cont("%d-deep BRS, ", x86_pmu.lbr_nr); in amd_brs_init()
302 if (WARN_ON_ONCE(cfg.msroff >= x86_pmu.lbr_nr)) in amd_brs_drain()
H A Dcore.c408 if (has_branch_stack(event) && !x86_pmu.lbr_nr) in amd_pmu_hw_config()
559 if (x86_pmu.lbr_nr) in amd_pmu_cpu_reset()
934 cnt = min(cnt, x86_pmu.lbr_nr); in amd_pmu_v2_snapshot_branch_stack()
970 if (x86_pmu.lbr_nr) { in amd_pmu_v2_handle_irq()
1300 if (has_branch_stack(event) && *left > x86_pmu.lbr_nr) in amd_pmu_limit_period()
1301 *left -= x86_pmu.lbr_nr; in amd_pmu_limit_period()
1343 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr); in branches_show()
1356 return x86_pmu.lbr_nr ? attr->mode : 0; in amd_branches_is_visible()
1378 return static_cpu_has(X86_FEATURE_BRS) && x86_pmu.lbr_nr ? in amd_brs_is_visible()
/linux/tools/perf/util/
H A Dmachine.c2305 int lbr_nr = lbr_stack->nr; in lbr_callchain_add_lbr_ip() local
2352 for (i = 0; i < lbr_nr; i++) { in lbr_callchain_add_lbr_ip()
2367 for (i = lbr_nr - 1; i >= 0; i--) { in lbr_callchain_add_lbr_ip()
2379 if (lbr_nr > 0) { in lbr_callchain_add_lbr_ip()
/linux/arch/x86/events/
H A Dcore.c559 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) in x86_pmu_max_precise()
1557 if (x86_pmu.lbr_nr) { in perf_event_print_debug()
H A Dperf_event.h893 lbr_info, lbr_nr; /* LBR base regs and size */ member