Lines Matching +full:int +full:- +full:fwd +full:- +full:mask
1 // SPDX-License-Identifier: GPL-2.0
40 unsigned int ld_dse:4;
41 unsigned int ld_stlb_miss:1;
42 unsigned int ld_locked:1;
43 unsigned int ld_data_blk:1;
44 unsigned int ld_addr_blk:1;
45 unsigned int ld_reserved:24;
48 unsigned int st_l1d_hit:1;
49 unsigned int st_reserved1:3;
50 unsigned int st_stlb_miss:1;
51 unsigned int st_locked:1;
52 unsigned int st_reserved2:26;
55 unsigned int st_lat_dse:4;
56 unsigned int st_lat_stlb_miss:1;
57 unsigned int st_lat_locked:1;
58 unsigned int ld_reserved3:26;
61 unsigned int mtl_dse:5;
62 unsigned int mtl_locked:1;
63 unsigned int mtl_stlb_miss:1;
64 unsigned int mtl_fwd_blk:1;
65 unsigned int ld_reserved4:24;
68 unsigned int lnc_dse:8;
69 unsigned int ld_reserved5:2;
70 unsigned int lnc_stlb_miss:1;
71 unsigned int lnc_locked:1;
72 unsigned int lnc_data_blk:1;
73 unsigned int lnc_addr_blk:1;
74 unsigned int ld_reserved6:18;
124 data_source[0x0c] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOPX, FWD); in __intel_pmu_pebs_data_source_skl()
137 data_source[0x08] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD); in __intel_pmu_pebs_data_source_grt()
160 data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD); in __intel_pmu_pebs_data_source_cmt()
164 data_source[0x0c] = OP_LH | LEVEL(RAM) | REM | P(SNOOPX, FWD); in __intel_pmu_pebs_data_source_cmt()
211 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD), /* 0x0c: L3 Hit Snoop Fwd */
215 OP_LH | LEVEL(MSC) | P(SNOOP, NONE), /* 0x10: Memory-side Cache Hit */
275 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) in precise_datala_hsw()
277 else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW) in precise_datala_hsw()
288 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) { in precise_datala_hsw()
314 /* Retrieve the latency data for e-core of ADL */
321 hybrid_pmu(event->pmu)->pmu_type == hybrid_big); in __grt_latency_data()
324 val = hybrid_var(event->pmu, pebs_data_source)[dse]; in __grt_latency_data()
347 /* Retrieve the latency data for e-core of MTL */
368 val = hybrid_var(event->pmu, pebs_data_source)[status & PERF_PEBS_DATA_SOURCE_MASK]; in lnc_latency_data()
388 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) in lnc_latency_data()
396 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); in lnl_latency_data()
398 if (pmu->pmu_type == hybrid_small) in lnl_latency_data()
406 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); in arl_h_latency_data()
408 if (pmu->pmu_type == hybrid_tiny) in arl_h_latency_data()
422 * use the mapping table for bit 0-3 in load_latency_data()
424 val = hybrid_var(event->pmu, pebs_data_source)[dse.ld_dse]; in load_latency_data()
472 * use the mapping table for bit 0-3 in store_latency_data()
474 val = hybrid_var(event->pmu, pebs_data_source)[dse.st_lat_dse]; in store_latency_data()
550 void init_debug_store_on_cpu(int cpu) in init_debug_store_on_cpu()
562 void fini_debug_store_on_cpu(int cpu) in fini_debug_store_on_cpu()
585 * This is a cross-CPU update of the cpu_entry_area, we must shoot down in ds_update_cea()
605 static void *dsalloc_pages(size_t size, gfp_t flags, int cpu) in dsalloc_pages()
607 unsigned int order = get_order(size); in dsalloc_pages()
608 int node = cpu_to_node(cpu); in dsalloc_pages()
621 static int alloc_pebs_buffer(int cpu) in alloc_pebs_buffer()
624 struct debug_store *ds = hwev->ds; in alloc_pebs_buffer()
626 int max, node = cpu_to_node(cpu); in alloc_pebs_buffer()
634 return -ENOMEM; in alloc_pebs_buffer()
637 hwev->pebs_vaddr = buffer; in alloc_pebs_buffer()
649 return -ENOMEM; in alloc_pebs_buffer()
653 hwev->pebs_vaddr = buffer; in alloc_pebs_buffer()
655 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; in alloc_pebs_buffer()
656 ds->pebs_buffer_base = (unsigned long) cea; in alloc_pebs_buffer()
658 ds->pebs_index = ds->pebs_buffer_base; in alloc_pebs_buffer()
660 ds->pebs_absolute_maximum = ds->pebs_buffer_base + max; in alloc_pebs_buffer()
664 static void release_pebs_buffer(int cpu) in release_pebs_buffer()
677 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; in release_pebs_buffer()
681 dsfree_pages(hwev->pebs_vaddr, x86_pmu.pebs_buffer_size); in release_pebs_buffer()
682 hwev->pebs_vaddr = NULL; in release_pebs_buffer()
685 static int alloc_bts_buffer(int cpu) in alloc_bts_buffer()
688 struct debug_store *ds = hwev->ds; in alloc_bts_buffer()
690 int max; in alloc_bts_buffer()
698 return -ENOMEM; in alloc_bts_buffer()
700 hwev->ds_bts_vaddr = buffer; in alloc_bts_buffer()
702 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; in alloc_bts_buffer()
703 ds->bts_buffer_base = (unsigned long) cea; in alloc_bts_buffer()
705 ds->bts_index = ds->bts_buffer_base; in alloc_bts_buffer()
707 ds->bts_absolute_maximum = ds->bts_buffer_base + in alloc_bts_buffer()
709 ds->bts_interrupt_threshold = ds->bts_absolute_maximum - in alloc_bts_buffer()
714 static void release_bts_buffer(int cpu) in release_bts_buffer()
723 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; in release_bts_buffer()
725 dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE); in release_bts_buffer()
726 hwev->ds_bts_vaddr = NULL; in release_bts_buffer()
729 static int alloc_ds_buffer(int cpu) in alloc_ds_buffer()
731 struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store; in alloc_ds_buffer()
738 static void release_ds_buffer(int cpu) in release_ds_buffer()
745 int cpu; in release_ds_buffers()
771 int bts_err = 0, pebs_err = 0; in reserve_ds_buffers()
772 int cpu; in reserve_ds_buffers()
835 inline int alloc_arch_pebs_buf_on_cpu(int cpu) in alloc_arch_pebs_buf_on_cpu()
843 inline void release_arch_pebs_buf_on_cpu(int cpu) in release_arch_pebs_buf_on_cpu()
851 void init_arch_pebs_on_cpu(int cpu) in init_arch_pebs_on_cpu()
859 if (!cpuc->pebs_vaddr) { in init_arch_pebs_on_cpu()
866 * 4KB-aligned pointer of the output buffer in init_arch_pebs_on_cpu()
871 arch_pebs_base = virt_to_phys(cpuc->pebs_vaddr) | PEBS_BUFFER_SHIFT; in init_arch_pebs_on_cpu()
877 inline void fini_arch_pebs_on_cpu(int cpu) in fini_arch_pebs_on_cpu()
917 if (!cpuc->ds) in intel_pmu_disable_bts()
929 int intel_pmu_drain_bts_buffer(void) in intel_pmu_drain_bts_buffer()
932 struct debug_store *ds = cpuc->ds; in intel_pmu_drain_bts_buffer()
938 struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; in intel_pmu_drain_bts_buffer()
952 base = (struct bts_record *)(unsigned long)ds->bts_buffer_base; in intel_pmu_drain_bts_buffer()
953 top = (struct bts_record *)(unsigned long)ds->bts_index; in intel_pmu_drain_bts_buffer()
960 ds->bts_index = ds->bts_buffer_base; in intel_pmu_drain_bts_buffer()
962 perf_sample_data_init(&data, 0, event->hw.last_period); in intel_pmu_drain_bts_buffer()
980 if (event->attr.exclude_kernel && in intel_pmu_drain_bts_buffer()
981 (kernel_ip(at->from) || kernel_ip(at->to))) in intel_pmu_drain_bts_buffer()
995 header.size * (top - base - skip))) in intel_pmu_drain_bts_buffer()
1000 if (event->attr.exclude_kernel && in intel_pmu_drain_bts_buffer()
1001 (kernel_ip(at->from) || kernel_ip(at->to))) in intel_pmu_drain_bts_buffer()
1004 data.ip = at->from; in intel_pmu_drain_bts_buffer()
1005 data.addr = at->to; in intel_pmu_drain_bts_buffer()
1013 event->hw.interrupts++; in intel_pmu_drain_bts_buffer()
1014 event->pending_kill = POLL_IN; in intel_pmu_drain_bts_buffer()
1290 struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints); in intel_pebs_constraints()
1293 if (!event->attr.precise_ip) in intel_pebs_constraints()
1298 if (constraint_match(c, event->hw.config)) { in intel_pebs_constraints()
1299 event->hw.flags |= c->flags; in intel_pebs_constraints()
1316 * We need the sched_task callback even for per-cpu events when we use
1322 if (cpuc->n_pebs == cpuc->n_pebs_via_pt) in pebs_needs_sched_cb()
1325 return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs); in pebs_needs_sched_cb()
1338 struct debug_store *ds = cpuc->ds; in pebs_update_threshold()
1339 int max_pebs_events = intel_pmu_max_num_pebs(cpuc->pmu); in pebs_update_threshold()
1341 int reserved; in pebs_update_threshold()
1343 if (cpuc->n_pebs_via_pt) in pebs_update_threshold()
1347 reserved = max_pebs_events + x86_pmu_max_num_counters_fixed(cpuc->pmu); in pebs_update_threshold()
1351 if (cpuc->n_pebs == cpuc->n_large_pebs) { in pebs_update_threshold()
1352 threshold = ds->pebs_absolute_maximum - in pebs_update_threshold()
1353 reserved * cpuc->pebs_record_size; in pebs_update_threshold()
1355 threshold = ds->pebs_buffer_base + cpuc->pebs_record_size; in pebs_update_threshold()
1358 ds->pebs_interrupt_threshold = threshold; in pebs_update_threshold()
1377 u64 pebs_data_cfg = cpuc->pebs_data_cfg; in adaptive_pebs_record_size_update()
1378 int sz = sizeof(struct pebs_basic); in adaptive_pebs_record_size_update()
1402 cpuc->pebs_record_size = sz; in adaptive_pebs_record_size_update()
1406 int idx, u64 *pebs_data_cfg) in __intel_pmu_pebs_update_cfg()
1416 *pebs_data_cfg |= PEBS_DATACFG_FIX_BIT(idx - INTEL_PMC_IDX_FIXED); in __intel_pmu_pebs_update_cfg()
1426 int i; in intel_pmu_pebs_late_setup()
1428 for (i = 0; i < cpuc->n_events; i++) { in intel_pmu_pebs_late_setup()
1429 event = cpuc->event_list[i]; in intel_pmu_pebs_late_setup()
1432 __intel_pmu_pebs_update_cfg(event, cpuc->assign[i], &pebs_data_cfg); in intel_pmu_pebs_late_setup()
1435 if (pebs_data_cfg & ~cpuc->pebs_data_cfg) in intel_pmu_pebs_late_setup()
1436 cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW; in intel_pmu_pebs_late_setup()
1447 struct perf_event_attr *attr = &event->attr; in pebs_update_adaptive_cfg()
1448 u64 sample_type = attr->sample_type; in pebs_update_adaptive_cfg()
1453 attr->precise_ip > 1) in pebs_update_adaptive_cfg()
1466 (attr->sample_regs_intr & PEBS_GP_REGS)) || in pebs_update_adaptive_cfg()
1468 (attr->sample_regs_user & PEBS_GP_REGS)); in pebs_update_adaptive_cfg()
1471 ((attr->config & INTEL_ARCH_EVENT_MASK) == in pebs_update_adaptive_cfg()
1474 if (gprs || (attr->precise_ip < 2) || tsx_weight) in pebs_update_adaptive_cfg()
1478 (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK)) in pebs_update_adaptive_cfg()
1487 ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT); in pebs_update_adaptive_cfg()
1497 struct pmu *pmu = event->pmu; in pebs_update_state()
1501 * During removal, ->pebs_data_cfg is still valid for in pebs_update_state()
1504 if ((cpuc->n_pebs == 1) && add) in pebs_update_state()
1505 cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW; in pebs_update_state()
1513 cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW; in pebs_update_state()
1527 if (pebs_data_cfg & ~cpuc->pebs_data_cfg) in pebs_update_state()
1528 cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW; in pebs_update_state()
1538 if (WARN_ON(event->hw.idx < 0 || event->hw.idx >= X86_PMC_IDX_MAX)) in intel_get_arch_pebs_data_config()
1546 pebs_data_cfg |= cpuc->pebs_data_cfg & cntr_mask; in intel_get_arch_pebs_data_config()
1554 struct hw_perf_event *hwc = &event->hw; in intel_pmu_pebs_add()
1557 cpuc->n_pebs++; in intel_pmu_pebs_add()
1558 if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) in intel_pmu_pebs_add()
1559 cpuc->n_large_pebs++; in intel_pmu_pebs_add()
1560 if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT) in intel_pmu_pebs_add()
1561 cpuc->n_pebs_via_pt++; in intel_pmu_pebs_add()
1573 if (!(cpuc->pebs_enabled & ~PEBS_VIA_PT_MASK)) in intel_pmu_pebs_via_pt_disable()
1574 cpuc->pebs_enabled &= ~PEBS_VIA_PT_MASK; in intel_pmu_pebs_via_pt_disable()
1580 struct hw_perf_event *hwc = &event->hw; in intel_pmu_pebs_via_pt_enable()
1581 struct debug_store *ds = cpuc->ds; in intel_pmu_pebs_via_pt_enable()
1582 u64 value = ds->pebs_event_reset[hwc->idx]; in intel_pmu_pebs_via_pt_enable()
1584 unsigned int idx = hwc->idx; in intel_pmu_pebs_via_pt_enable()
1589 if (!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS)) in intel_pmu_pebs_via_pt_enable()
1590 cpuc->pebs_enabled |= PEBS_PMI_AFTER_EACH_RECORD; in intel_pmu_pebs_via_pt_enable()
1592 cpuc->pebs_enabled |= PEBS_OUTPUT_PT; in intel_pmu_pebs_via_pt_enable()
1594 if (hwc->idx >= INTEL_PMC_IDX_FIXED) { in intel_pmu_pebs_via_pt_enable()
1596 idx = hwc->idx - INTEL_PMC_IDX_FIXED; in intel_pmu_pebs_via_pt_enable()
1598 value = ds->pebs_event_reset[MAX_PEBS_EVENTS_FMT4 + idx]; in intel_pmu_pebs_via_pt_enable()
1600 value = ds->pebs_event_reset[MAX_PEBS_EVENTS + idx]; in intel_pmu_pebs_via_pt_enable()
1607 if (cpuc->n_pebs == cpuc->n_large_pebs && in intel_pmu_drain_large_pebs()
1608 cpuc->n_pebs != cpuc->n_pebs_via_pt) in intel_pmu_drain_large_pebs()
1615 struct hw_perf_event *hwc = &event->hw; in __intel_pmu_pebs_enable()
1617 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; in __intel_pmu_pebs_enable()
1618 cpuc->pebs_enabled |= 1ULL << hwc->idx; in __intel_pmu_pebs_enable()
1624 u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW; in intel_pmu_pebs_enable()
1625 struct hw_perf_event *hwc = &event->hw; in intel_pmu_pebs_enable()
1626 struct debug_store *ds = cpuc->ds; in intel_pmu_pebs_enable()
1627 unsigned int idx = hwc->idx; in intel_pmu_pebs_enable()
1631 if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5)) in intel_pmu_pebs_enable()
1632 cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32); in intel_pmu_pebs_enable()
1633 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) in intel_pmu_pebs_enable()
1634 cpuc->pebs_enabled |= 1ULL << 63; in intel_pmu_pebs_enable()
1637 hwc->config |= ICL_EVENTSEL_ADAPTIVE; in intel_pmu_pebs_enable()
1638 if (pebs_data_cfg != cpuc->active_pebs_data_cfg) { in intel_pmu_pebs_enable()
1647 cpuc->active_pebs_data_cfg = pebs_data_cfg; in intel_pmu_pebs_enable()
1650 if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) { in intel_pmu_pebs_enable()
1651 cpuc->pebs_data_cfg = pebs_data_cfg; in intel_pmu_pebs_enable()
1657 idx = MAX_PEBS_EVENTS_FMT4 + (idx - INTEL_PMC_IDX_FIXED); in intel_pmu_pebs_enable()
1659 idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED); in intel_pmu_pebs_enable()
1663 * Use auto-reload if possible to save a MSR write in the PMI. in intel_pmu_pebs_enable()
1666 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { in intel_pmu_pebs_enable()
1667 ds->pebs_event_reset[idx] = in intel_pmu_pebs_enable()
1668 (u64)(-hwc->sample_period) & x86_pmu.cntval_mask; in intel_pmu_pebs_enable()
1670 ds->pebs_event_reset[idx] = 0; in intel_pmu_pebs_enable()
1679 struct hw_perf_event *hwc = &event->hw; in intel_pmu_pebs_del()
1682 cpuc->n_pebs--; in intel_pmu_pebs_del()
1683 if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) in intel_pmu_pebs_del()
1684 cpuc->n_large_pebs--; in intel_pmu_pebs_del()
1685 if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT) in intel_pmu_pebs_del()
1686 cpuc->n_pebs_via_pt--; in intel_pmu_pebs_del()
1694 struct hw_perf_event *hwc = &event->hw; in __intel_pmu_pebs_disable()
1697 cpuc->pebs_enabled &= ~(1ULL << hwc->idx); in __intel_pmu_pebs_disable()
1698 hwc->config |= ARCH_PERFMON_EVENTSEL_INT; in __intel_pmu_pebs_disable()
1704 struct hw_perf_event *hwc = &event->hw; in intel_pmu_pebs_disable()
1708 if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && in intel_pmu_pebs_disable()
1710 cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32)); in intel_pmu_pebs_disable()
1711 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) in intel_pmu_pebs_disable()
1712 cpuc->pebs_enabled &= ~(1ULL << 63); in intel_pmu_pebs_disable()
1716 if (cpuc->enabled) in intel_pmu_pebs_disable()
1717 wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); in intel_pmu_pebs_disable()
1724 if (cpuc->pebs_enabled) in intel_pmu_pebs_enable_all()
1725 wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); in intel_pmu_pebs_enable_all()
1732 if (cpuc->pebs_enabled) in intel_pmu_pebs_disable_all()
1736 static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) in intel_pmu_pebs_fixup_ip()
1739 unsigned long from = cpuc->lbr_entries[0].from; in intel_pmu_pebs_fixup_ip()
1740 unsigned long old_to, to = cpuc->lbr_entries[0].to; in intel_pmu_pebs_fixup_ip()
1741 unsigned long ip = regs->ip; in intel_pmu_pebs_fixup_ip()
1742 int is_64bit = 0; in intel_pmu_pebs_fixup_ip()
1744 int size; in intel_pmu_pebs_fixup_ip()
1755 if (!cpuc->lbr_stack.nr || !from || !to) in intel_pmu_pebs_fixup_ip()
1768 if ((ip - to) > PEBS_FIXUP_SIZE) in intel_pmu_pebs_fixup_ip()
1779 size = ip - to; in intel_pmu_pebs_fixup_ip()
1781 int bytes; in intel_pmu_pebs_fixup_ip()
1814 size -= insn.length; in intel_pmu_pebs_fixup_ip()
1851 return ((struct pebs_record_nhm *)n)->status; in get_pebs_status()
1852 return ((struct pebs_basic *)n)->applicable_counters; in get_pebs_status()
1863 int fl = event->hw.flags; in get_data_src()
1883 /* Converting to a user-defined clock is not supported yet. */ in setup_pebs_time()
1884 if (event->attr.use_clockid != 0) in setup_pebs_time()
1896 data->time = native_sched_clock_from_tsc(tsc) + __sched_clock_offset; in setup_pebs_time()
1897 data->sample_flags |= PERF_SAMPLE_TIME; in setup_pebs_time()
1916 int fll; in setup_pebs_fixed_sample_data()
1921 sample_type = event->attr.sample_type; in setup_pebs_fixed_sample_data()
1922 fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT; in setup_pebs_fixed_sample_data()
1924 perf_sample_data_init(data, 0, event->hw.last_period); in setup_pebs_fixed_sample_data()
1927 * Use latency for weight (only avail with PEBS-LL) in setup_pebs_fixed_sample_data()
1930 data->weight.full = pebs->lat; in setup_pebs_fixed_sample_data()
1931 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; in setup_pebs_fixed_sample_data()
1938 data->data_src.val = get_data_src(event, pebs->dse); in setup_pebs_fixed_sample_data()
1939 data->sample_flags |= PERF_SAMPLE_DATA_SRC; in setup_pebs_fixed_sample_data()
1964 regs->flags = pebs->flags & ~PERF_EFLAGS_EXACT; in setup_pebs_fixed_sample_data()
1967 regs->ax = pebs->ax; in setup_pebs_fixed_sample_data()
1968 regs->bx = pebs->bx; in setup_pebs_fixed_sample_data()
1969 regs->cx = pebs->cx; in setup_pebs_fixed_sample_data()
1970 regs->dx = pebs->dx; in setup_pebs_fixed_sample_data()
1971 regs->si = pebs->si; in setup_pebs_fixed_sample_data()
1972 regs->di = pebs->di; in setup_pebs_fixed_sample_data()
1974 regs->bp = pebs->bp; in setup_pebs_fixed_sample_data()
1975 regs->sp = pebs->sp; in setup_pebs_fixed_sample_data()
1978 regs->r8 = pebs->r8; in setup_pebs_fixed_sample_data()
1979 regs->r9 = pebs->r9; in setup_pebs_fixed_sample_data()
1980 regs->r10 = pebs->r10; in setup_pebs_fixed_sample_data()
1981 regs->r11 = pebs->r11; in setup_pebs_fixed_sample_data()
1982 regs->r12 = pebs->r12; in setup_pebs_fixed_sample_data()
1983 regs->r13 = pebs->r13; in setup_pebs_fixed_sample_data()
1984 regs->r14 = pebs->r14; in setup_pebs_fixed_sample_data()
1985 regs->r15 = pebs->r15; in setup_pebs_fixed_sample_data()
1989 if (event->attr.precise_ip > 1) { in setup_pebs_fixed_sample_data()
1992 * (real IP) which fixes the off-by-1 skid in hardware. in setup_pebs_fixed_sample_data()
1996 set_linear_ip(regs, pebs->real_ip); in setup_pebs_fixed_sample_data()
1997 regs->flags |= PERF_EFLAGS_EXACT; in setup_pebs_fixed_sample_data()
1999 /* Otherwise, use PEBS off-by-1 IP: */ in setup_pebs_fixed_sample_data()
2000 set_linear_ip(regs, pebs->ip); in setup_pebs_fixed_sample_data()
2003 * With precise_ip >= 2, try to fix up the off-by-1 IP in setup_pebs_fixed_sample_data()
2005 * corrects regs->ip and calls set_linear_ip() on regs: in setup_pebs_fixed_sample_data()
2008 regs->flags |= PERF_EFLAGS_EXACT; in setup_pebs_fixed_sample_data()
2012 * When precise_ip == 1, return the PEBS off-by-1 IP, in setup_pebs_fixed_sample_data()
2015 set_linear_ip(regs, pebs->ip); in setup_pebs_fixed_sample_data()
2021 data->addr = pebs->dla; in setup_pebs_fixed_sample_data()
2022 data->sample_flags |= PERF_SAMPLE_ADDR; in setup_pebs_fixed_sample_data()
2028 data->weight.full = intel_get_tsx_weight(pebs->tsx_tuning); in setup_pebs_fixed_sample_data()
2029 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; in setup_pebs_fixed_sample_data()
2032 data->txn = intel_get_tsx_transaction(pebs->tsx_tuning, in setup_pebs_fixed_sample_data()
2033 pebs->ax); in setup_pebs_fixed_sample_data()
2034 data->sample_flags |= PERF_SAMPLE_TRANSACTION; in setup_pebs_fixed_sample_data()
2045 setup_pebs_time(event, data, pebs->tsc); in setup_pebs_fixed_sample_data()
2047 perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); in setup_pebs_fixed_sample_data()
2053 regs->ax = gprs->ax; in adaptive_pebs_save_regs()
2054 regs->bx = gprs->bx; in adaptive_pebs_save_regs()
2055 regs->cx = gprs->cx; in adaptive_pebs_save_regs()
2056 regs->dx = gprs->dx; in adaptive_pebs_save_regs()
2057 regs->si = gprs->si; in adaptive_pebs_save_regs()
2058 regs->di = gprs->di; in adaptive_pebs_save_regs()
2059 regs->bp = gprs->bp; in adaptive_pebs_save_regs()
2060 regs->sp = gprs->sp; in adaptive_pebs_save_regs()
2062 regs->r8 = gprs->r8; in adaptive_pebs_save_regs()
2063 regs->r9 = gprs->r9; in adaptive_pebs_save_regs()
2064 regs->r10 = gprs->r10; in adaptive_pebs_save_regs()
2065 regs->r11 = gprs->r11; in adaptive_pebs_save_regs()
2066 regs->r12 = gprs->r12; in adaptive_pebs_save_regs()
2067 regs->r13 = gprs->r13; in adaptive_pebs_save_regs()
2068 regs->r14 = gprs->r14; in adaptive_pebs_save_regs()
2069 regs->r15 = gprs->r15; in adaptive_pebs_save_regs()
2075 int shift = 64 - x86_pmu.cntval_bits; in intel_perf_event_update_pmc()
2082 * - An event is deleted. There is still an active PEBS event. in intel_perf_event_update_pmc()
2087 * - An event is stopped for some reason, e.g., throttled. in intel_perf_event_update_pmc()
2094 * But the cpuc->events[uninitialized_counter] is always NULL, in intel_perf_event_update_pmc()
2101 hwc = &event->hw; in intel_perf_event_update_pmc()
2102 prev_pmc = local64_read(&hwc->prev_count); in intel_perf_event_update_pmc()
2106 local64_set(&hwc->prev_count, pmc); in intel_perf_event_update_pmc()
2108 delta = (pmc << shift) - (prev_pmc << shift); in intel_perf_event_update_pmc()
2111 local64_add(delta, &event->count); in intel_perf_event_update_pmc()
2112 local64_sub(delta, &hwc->period_left); in intel_perf_event_update_pmc()
2120 int bit; in __setup_pebs_counter_group()
2122 for_each_set_bit(bit, (unsigned long *)&cntr->cntr, INTEL_PMC_MAX_GENERIC) { in __setup_pebs_counter_group()
2123 intel_perf_event_update_pmc(cpuc->events[bit], *(u64 *)next_record); in __setup_pebs_counter_group()
2127 for_each_set_bit(bit, (unsigned long *)&cntr->fixed, INTEL_PMC_MAX_FIXED) { in __setup_pebs_counter_group()
2129 if ((cntr->metrics == INTEL_CNTR_METRICS) && in __setup_pebs_counter_group()
2134 intel_perf_event_update_pmc(cpuc->events[bit + INTEL_PMC_IDX_FIXED], in __setup_pebs_counter_group()
2140 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) in __setup_pebs_counter_group()
2141 local64_set(&event->hw.prev_count, (u64)-event->hw.sample_period); in __setup_pebs_counter_group()
2143 if (cntr->metrics == INTEL_CNTR_METRICS) { in __setup_pebs_counter_group()
2145 (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS], in __setup_pebs_counter_group()
2157 perf_sample_data_init(data, 0, event->hw.last_period); in __setup_perf_sample_data()
2176 regs->flags = PERF_EFLAGS_EXACT; in __setup_pebs_basic_group()
2180 data->weight.var3_w = retire; in __setup_pebs_basic_group()
2188 if (event->attr.precise_ip < 2) { in __setup_pebs_gpr_group()
2189 set_linear_ip(regs, gprs->ip); in __setup_pebs_gpr_group()
2190 regs->flags &= ~PERF_EFLAGS_EXACT; in __setup_pebs_gpr_group()
2206 data->weight.var2_w = instr_latency; in __setup_pebs_meminfo_group()
2214 data->weight.full = latency ?: tsx_latency; in __setup_pebs_meminfo_group()
2216 data->weight.var1_dw = (u32)latency ?: tsx_latency; in __setup_pebs_meminfo_group()
2218 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; in __setup_pebs_meminfo_group()
2222 data->data_src.val = get_data_src(event, aux); in __setup_pebs_meminfo_group()
2223 data->sample_flags |= PERF_SAMPLE_DATA_SRC; in __setup_pebs_meminfo_group()
2227 data->addr = address; in __setup_pebs_meminfo_group()
2228 data->sample_flags |= PERF_SAMPLE_ADDR; in __setup_pebs_meminfo_group()
2232 data->txn = intel_get_tsx_transaction(tsx_tuning, ax); in __setup_pebs_meminfo_group()
2233 data->sample_flags |= PERF_SAMPLE_TRANSACTION; in __setup_pebs_meminfo_group()
2246 u64 sample_type = event->attr.sample_type; in setup_pebs_adaptive_sample_data()
2259 perf_regs->xmm_regs = NULL; in setup_pebs_adaptive_sample_data()
2261 format_group = basic->format_group; in setup_pebs_adaptive_sample_data()
2269 basic->retire_latency : 0; in setup_pebs_adaptive_sample_data()
2271 basic->ip, basic->tsc, retire); in setup_pebs_adaptive_sample_data()
2275 * But PERF_SAMPLE_TRANSACTION needs gprs->ax. in setup_pebs_adaptive_sample_data()
2292 meminfo->cache_latency : meminfo->mem_latency; in setup_pebs_adaptive_sample_data()
2294 meminfo->instr_latency : 0; in setup_pebs_adaptive_sample_data()
2295 u64 ax = gprs ? gprs->ax : 0; in setup_pebs_adaptive_sample_data()
2298 instr_latency, meminfo->address, in setup_pebs_adaptive_sample_data()
2299 meminfo->aux, meminfo->tsx_tuning, in setup_pebs_adaptive_sample_data()
2307 perf_regs->xmm_regs = xmm->xmm; in setup_pebs_adaptive_sample_data()
2312 int num_lbr = ((format_group >> PEBS_DATACFG_LBR_SHIFT) in setup_pebs_adaptive_sample_data()
2324 unsigned int nr; in setup_pebs_adaptive_sample_data()
2330 * For the PEBS record of non-sample-read group, ignore in setup_pebs_adaptive_sample_data()
2335 data->sample_flags |= PERF_SAMPLE_READ; in setup_pebs_adaptive_sample_data()
2338 nr = hweight32(cntr->cntr) + hweight32(cntr->fixed); in setup_pebs_adaptive_sample_data()
2339 if (cntr->metrics == INTEL_CNTR_METRICS) in setup_pebs_adaptive_sample_data()
2344 WARN_ONCE(next_record != __pebs + basic->format_size, in setup_pebs_adaptive_sample_data()
2346 basic->format_size, in setup_pebs_adaptive_sample_data()
2347 (u64)(next_record - __pebs), in setup_pebs_adaptive_sample_data()
2354 return header->cont || !(header->format & GENMASK_ULL(63, 16)); in arch_pebs_record_continued()
2364 u64 sample_type = event->attr.sample_type; in setup_arch_pebs_sample_data()
2376 perf_regs->xmm_regs = NULL; in setup_arch_pebs_sample_data()
2385 if (header->basic) { in setup_arch_pebs_sample_data()
2392 retire = basic->valid ? basic->retire : 0; in setup_arch_pebs_sample_data()
2394 basic->ip, basic->tsc, retire); in setup_arch_pebs_sample_data()
2399 * But PERF_SAMPLE_TRANSACTION needs gprs->ax. in setup_arch_pebs_sample_data()
2402 if (header->aux) { in setup_arch_pebs_sample_data()
2407 if (header->gpr) { in setup_arch_pebs_sample_data()
2416 if (header->aux) { in setup_arch_pebs_sample_data()
2417 u64 ax = gprs ? gprs->ax : 0; in setup_arch_pebs_sample_data()
2420 meminfo->cache_latency, in setup_arch_pebs_sample_data()
2421 meminfo->instr_latency, in setup_arch_pebs_sample_data()
2422 meminfo->address, meminfo->aux, in setup_arch_pebs_sample_data()
2423 meminfo->tsx_tuning, ax); in setup_arch_pebs_sample_data()
2426 if (header->xmm) { in setup_arch_pebs_sample_data()
2432 perf_regs->xmm_regs = xmm->xmm; in setup_arch_pebs_sample_data()
2436 if (header->lbr) { in setup_arch_pebs_sample_data()
2439 int num_lbr; in setup_arch_pebs_sample_data()
2444 num_lbr = header->lbr == ARCH_PEBS_LBR_NUM_VAR ? in setup_arch_pebs_sample_data()
2445 lbr_header->depth : in setup_arch_pebs_sample_data()
2446 header->lbr * ARCH_PEBS_BASE_LBR_ENTRIES; in setup_arch_pebs_sample_data()
2455 if (header->cntr) { in setup_arch_pebs_sample_data()
2457 unsigned int nr; in setup_arch_pebs_sample_data()
2464 data->sample_flags |= PERF_SAMPLE_READ; in setup_arch_pebs_sample_data()
2467 nr = hweight32(cntr->cntr) + hweight32(cntr->fixed); in setup_arch_pebs_sample_data()
2468 if (cntr->metrics == INTEL_CNTR_METRICS) in setup_arch_pebs_sample_data()
2475 at = at + header->size; in setup_arch_pebs_sample_data()
2481 get_next_pebs_record_by_bit(void *base, void *top, int bit) in get_next_pebs_record_by_bit()
2497 for (at = base; at < top; at += cpuc->pebs_record_size) { in get_next_pebs_record_by_bit()
2508 /* clear non-PEBS bit and re-check */ in get_next_pebs_record_by_bit()
2509 pebs_status = status & cpuc->pebs_enabled; in get_next_pebs_record_by_bit()
2519 * Special variant of intel_pmu_save_and_restart() for auto-reload.
2521 static int
2522 intel_pmu_save_and_restart_reload(struct perf_event *event, int count) in intel_pmu_save_and_restart_reload()
2524 struct hw_perf_event *hwc = &event->hw; in intel_pmu_save_and_restart_reload()
2525 int shift = 64 - x86_pmu.cntval_bits; in intel_pmu_save_and_restart_reload()
2526 u64 period = hwc->sample_period; in intel_pmu_save_and_restart_reload()
2537 prev_raw_count = local64_read(&hwc->prev_count); in intel_pmu_save_and_restart_reload()
2538 new_raw_count = rdpmc(hwc->event_base_rdpmc); in intel_pmu_save_and_restart_reload()
2539 local64_set(&hwc->prev_count, new_raw_count); in intel_pmu_save_and_restart_reload()
2545 * [-period, 0] in intel_pmu_save_and_restart_reload()
2549 * A) value2 - value1; in intel_pmu_save_and_restart_reload()
2552 * B) (0 - value1) + (value2 - (-period)); in intel_pmu_save_and_restart_reload()
2555 * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period)); in intel_pmu_save_and_restart_reload()
2566 * value2 - value1 + n * period in intel_pmu_save_and_restart_reload()
2570 local64_add(new - old + count * period, &event->count); in intel_pmu_save_and_restart_reload()
2572 local64_set(&hwc->period_left, -new); in intel_pmu_save_and_restart_reload()
2602 int count, in __intel_pmu_pebs_last_event()
2605 struct hw_perf_event *hwc = &event->hw; in __intel_pmu_pebs_last_event()
2610 * The PEBS records may be drained in the non-overflow context, in __intel_pmu_pebs_last_event()
2624 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { in __intel_pmu_pebs_last_event()
2633 * Now, auto-reload is only enabled in fixed period mode. in __intel_pmu_pebs_last_event()
2634 * The reload value is always hwc->sample_period. in __intel_pmu_pebs_last_event()
2635 * May need to change it, if auto-reload is enabled in in __intel_pmu_pebs_last_event()
2642 * For a non-precise event, it's possible the in __intel_pmu_pebs_last_event()
2643 * counters-snapshotting records a positive value for the in __intel_pmu_pebs_last_event()
2644 * overflowed event. Then the HW auto-reload mechanism in __intel_pmu_pebs_last_event()
2651 * counters-snapshotting record, only needs to set the new in __intel_pmu_pebs_last_event()
2666 int bit, int count, in __intel_pmu_pebs_events()
2673 int cnt = count; in __intel_pmu_pebs_events()
2680 at += cpuc->pebs_record_size; in __intel_pmu_pebs_events()
2682 cnt--; in __intel_pmu_pebs_events()
2691 struct debug_store *ds = cpuc->ds; in intel_pmu_drain_pebs_core()
2692 struct perf_event *event = cpuc->events[0]; /* PMC0 only */ in intel_pmu_drain_pebs_core()
2694 int n; in intel_pmu_drain_pebs_core()
2699 at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base; in intel_pmu_drain_pebs_core()
2700 top = (struct pebs_record_core *)(unsigned long)ds->pebs_index; in intel_pmu_drain_pebs_core()
2705 ds->pebs_index = ds->pebs_buffer_base; in intel_pmu_drain_pebs_core()
2707 if (!test_bit(0, cpuc->active_mask)) in intel_pmu_drain_pebs_core()
2712 if (!event->attr.precise_ip) in intel_pmu_drain_pebs_core()
2715 n = top - at; in intel_pmu_drain_pebs_core()
2717 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) in intel_pmu_drain_pebs_core()
2726 static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, u64 mask) in intel_pmu_pebs_event_update_no_drain() argument
2728 u64 pebs_enabled = cpuc->pebs_enabled & mask; in intel_pmu_pebs_event_update_no_drain()
2730 int bit; in intel_pmu_pebs_event_update_no_drain()
2734 * for auto-reload event in pmu::read(). There are no in intel_pmu_pebs_event_update_no_drain()
2737 * update the event->count for this case. in intel_pmu_pebs_event_update_no_drain()
2740 event = cpuc->events[bit]; in intel_pmu_pebs_event_update_no_drain()
2741 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) in intel_pmu_pebs_event_update_no_drain()
2749 struct debug_store *ds = cpuc->ds; in intel_pmu_drain_pebs_nhm()
2754 int max_pebs_events = intel_pmu_max_num_pebs(NULL); in intel_pmu_drain_pebs_nhm()
2755 int bit, i, size; in intel_pmu_drain_pebs_nhm()
2756 u64 mask; in intel_pmu_drain_pebs_nhm() local
2761 base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base; in intel_pmu_drain_pebs_nhm()
2762 top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index; in intel_pmu_drain_pebs_nhm()
2764 ds->pebs_index = ds->pebs_buffer_base; in intel_pmu_drain_pebs_nhm()
2766 mask = x86_pmu.pebs_events_mask; in intel_pmu_drain_pebs_nhm()
2769 mask |= x86_pmu.fixed_cntr_mask64 << INTEL_PMC_IDX_FIXED; in intel_pmu_drain_pebs_nhm()
2774 intel_pmu_pebs_event_update_no_drain(cpuc, mask); in intel_pmu_drain_pebs_nhm()
2782 pebs_status = p->status & cpuc->pebs_enabled; in intel_pmu_drain_pebs_nhm()
2783 pebs_status &= mask; in intel_pmu_drain_pebs_nhm()
2801 if (!pebs_status && cpuc->pebs_enabled && in intel_pmu_drain_pebs_nhm()
2802 !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1))) in intel_pmu_drain_pebs_nhm()
2803 pebs_status = p->status = cpuc->pebs_enabled; in intel_pmu_drain_pebs_nhm()
2816 * If these events include one PEBS and multiple non-PEBS in intel_pmu_drain_pebs_nhm()
2835 for_each_set_bit(bit, (unsigned long *)&mask, size) { in intel_pmu_drain_pebs_nhm()
2839 event = cpuc->events[bit]; in intel_pmu_drain_pebs_nhm()
2843 if (WARN_ON_ONCE(!event->attr.precise_ip)) in intel_pmu_drain_pebs_nhm()
2872 int bit; in __intel_pmu_handle_pebs_record()
2875 event = cpuc->events[bit]; in __intel_pmu_handle_pebs_record()
2878 WARN_ON_ONCE(!event->attr.precise_ip)) in __intel_pmu_handle_pebs_record()
2894 u64 mask, short *counts, void **last, in __intel_pmu_handle_last_pebs_record() argument
2899 int bit; in __intel_pmu_handle_last_pebs_record()
2901 for_each_set_bit(bit, (unsigned long *)&mask, X86_PMC_IDX_MAX) { in __intel_pmu_handle_last_pebs_record()
2905 event = cpuc->events[bit]; in __intel_pmu_handle_last_pebs_record()
2918 struct debug_store *ds = cpuc->ds; in intel_pmu_drain_pebs_icl()
2923 u64 mask; in intel_pmu_drain_pebs_icl() local
2928 base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base; in intel_pmu_drain_pebs_icl()
2929 top = (struct pebs_basic *)(unsigned long)ds->pebs_index; in intel_pmu_drain_pebs_icl()
2931 ds->pebs_index = ds->pebs_buffer_base; in intel_pmu_drain_pebs_icl()
2933 mask = hybrid(cpuc->pmu, pebs_events_mask) | in intel_pmu_drain_pebs_icl()
2934 (hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED); in intel_pmu_drain_pebs_icl()
2935 mask &= cpuc->pebs_enabled; in intel_pmu_drain_pebs_icl()
2938 intel_pmu_pebs_event_update_no_drain(cpuc, mask); in intel_pmu_drain_pebs_icl()
2946 for (at = base; at < top; at += basic->format_size) { in intel_pmu_drain_pebs_icl()
2950 if (basic->format_size != cpuc->pebs_record_size) in intel_pmu_drain_pebs_icl()
2953 pebs_status = mask & basic->applicable_counters; in intel_pmu_drain_pebs_icl()
2959 __intel_pmu_handle_last_pebs_record(iregs, regs, data, mask, counts, last, in intel_pmu_drain_pebs_icl()
2973 u64 mask; in intel_pmu_drain_arch_pebs() local
2982 base = cpuc->pebs_vaddr; in intel_pmu_drain_arch_pebs()
2983 top = cpuc->pebs_vaddr + (index.wr << ARCH_PEBS_INDEX_WR_SHIFT); in intel_pmu_drain_arch_pebs()
2988 if (cpuc->n_pebs == cpuc->n_large_pebs) in intel_pmu_drain_arch_pebs()
2994 mask = hybrid(cpuc->pmu, arch_pebs_cap).counters & cpuc->pebs_enabled; in intel_pmu_drain_arch_pebs()
3007 if (WARN_ON_ONCE(!header->size)) in intel_pmu_drain_arch_pebs()
3011 if (!header->basic) { in intel_pmu_drain_arch_pebs()
3012 at += header->size; in intel_pmu_drain_arch_pebs()
3017 pebs_status = mask & basic->applicable_counters; in intel_pmu_drain_arch_pebs()
3022 /* Skip non-last fragments */ in intel_pmu_drain_arch_pebs()
3024 if (!header->size) in intel_pmu_drain_arch_pebs()
3026 at += header->size; in intel_pmu_drain_arch_pebs()
3031 at += header->size; in intel_pmu_drain_arch_pebs()
3034 __intel_pmu_handle_last_pebs_record(iregs, regs, data, mask, in intel_pmu_drain_arch_pebs()
3042 * Current hybrid platforms always both support arch-PEBS or not in intel_arch_pebs_init()
3044 * if boot cpu supports arch-PEBS. in intel_arch_pebs_init()
3074 char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; in intel_ds_pebs_init()
3076 int format = x86_pmu.intel_cap.pebs_format; in intel_ds_pebs_init()
3137 pebs_qual = "-baseline"; in intel_ds_pebs_init()
3138 x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS; in intel_ds_pebs_init()
3152 * The PEBS-via-PT is not supported on hybrid platforms, in intel_ds_pebs_init()
3156 * of the feature. The per-PMU pebs_output_pt_available in intel_ds_pebs_init()
3160 pr_cont("PEBS-via-PT, "); in intel_ds_pebs_init()
3161 x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_AUX_OUTPUT; in intel_ds_pebs_init()