127f6d22bSBorislav Petkov /* 227f6d22bSBorislav Petkov * Performance events x86 architecture header 327f6d22bSBorislav Petkov * 427f6d22bSBorislav Petkov * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 527f6d22bSBorislav Petkov * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 627f6d22bSBorislav Petkov * Copyright (C) 2009 Jaswinder Singh Rajput 727f6d22bSBorislav Petkov * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 827f6d22bSBorislav Petkov * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra 927f6d22bSBorislav Petkov * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 1027f6d22bSBorislav Petkov * Copyright (C) 2009 Google, Inc., Stephane Eranian 1127f6d22bSBorislav Petkov * 1227f6d22bSBorislav Petkov * For licencing details see kernel-base/COPYING 1327f6d22bSBorislav Petkov */ 1427f6d22bSBorislav Petkov 1527f6d22bSBorislav Petkov #include <linux/perf_event.h> 1627f6d22bSBorislav Petkov 17b50854ecSThomas Gleixner #include <asm/fpu/xstate.h> 1810043e02SThomas Gleixner #include <asm/intel_ds.h> 19d9977c43SKan Liang #include <asm/cpu.h> 2010043e02SThomas Gleixner 2127f6d22bSBorislav Petkov /* To enable MSR tracing please use the generic trace points. */ 2227f6d22bSBorislav Petkov 2327f6d22bSBorislav Petkov /* 2427f6d22bSBorislav Petkov * | NHM/WSM | SNB | 2527f6d22bSBorislav Petkov * register ------------------------------- 2627f6d22bSBorislav Petkov * | HT | no HT | HT | no HT | 2727f6d22bSBorislav Petkov *----------------------------------------- 2827f6d22bSBorislav Petkov * offcore | core | core | cpu | core | 2927f6d22bSBorislav Petkov * lbr_sel | core | core | cpu | core | 3027f6d22bSBorislav Petkov * ld_lat | cpu | core | cpu | core | 3127f6d22bSBorislav Petkov *----------------------------------------- 3227f6d22bSBorislav Petkov * 3327f6d22bSBorislav Petkov * Given that there is a small number of shared regs, 3427f6d22bSBorislav Petkov * we can pre-allocate their slot in the per-cpu 3527f6d22bSBorislav Petkov * per-core reg tables. 3627f6d22bSBorislav Petkov */ 3727f6d22bSBorislav Petkov enum extra_reg_type { 3827f6d22bSBorislav Petkov EXTRA_REG_NONE = -1, /* not used */ 3927f6d22bSBorislav Petkov 4027f6d22bSBorislav Petkov EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ 4127f6d22bSBorislav Petkov EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ 4227f6d22bSBorislav Petkov EXTRA_REG_LBR = 2, /* lbr_select */ 4327f6d22bSBorislav Petkov EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ 4427f6d22bSBorislav Petkov EXTRA_REG_FE = 4, /* fe_* */ 4538aaf921SKan Liang EXTRA_REG_SNOOP_0 = 5, /* snoop response 0 */ 4638aaf921SKan Liang EXTRA_REG_SNOOP_1 = 6, /* snoop response 1 */ 4727f6d22bSBorislav Petkov 4827f6d22bSBorislav Petkov EXTRA_REG_MAX /* number of entries needed */ 4927f6d22bSBorislav Petkov }; 5027f6d22bSBorislav Petkov 5127f6d22bSBorislav Petkov struct event_constraint { 5227f6d22bSBorislav Petkov union { 5327f6d22bSBorislav Petkov unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 5427f6d22bSBorislav Petkov u64 idxmsk64; 5527f6d22bSBorislav Petkov }; 5627f6d22bSBorislav Petkov u64 code; 5727f6d22bSBorislav Petkov u64 cmask; 5827f6d22bSBorislav Petkov int weight; 5927f6d22bSBorislav Petkov int overlap; 6027f6d22bSBorislav Petkov int flags; 6163b79f6eSPeter Zijlstra unsigned int size; 6227f6d22bSBorislav Petkov }; 631f6a1e2dSPeter Zijlstra 6463b79f6eSPeter Zijlstra static inline bool constraint_match(struct event_constraint *c, u64 ecode) 6563b79f6eSPeter Zijlstra { 6663b79f6eSPeter Zijlstra return ((ecode & c->cmask) - c->code) <= (u64)c->size; 6763b79f6eSPeter Zijlstra } 6863b79f6eSPeter Zijlstra 6988081cfbSAnshuman Khandual #define PERF_ARCH(name, val) \ 7088081cfbSAnshuman Khandual PERF_X86_EVENT_##name = val, 7188081cfbSAnshuman Khandual 7227f6d22bSBorislav Petkov /* 7327f6d22bSBorislav Petkov * struct hw_perf_event.flags flags 7427f6d22bSBorislav Petkov */ 7588081cfbSAnshuman Khandual enum { 7688081cfbSAnshuman Khandual #include "perf_event_flags.h" 7788081cfbSAnshuman Khandual }; 78369461ceSRob Herring 7988081cfbSAnshuman Khandual #undef PERF_ARCH 8088081cfbSAnshuman Khandual 8188081cfbSAnshuman Khandual #define PERF_ARCH(name, val) \ 8288081cfbSAnshuman Khandual static_assert((PERF_X86_EVENT_##name & PERF_EVENT_FLAG_ARCH) == \ 8388081cfbSAnshuman Khandual PERF_X86_EVENT_##name); 8488081cfbSAnshuman Khandual 8588081cfbSAnshuman Khandual #include "perf_event_flags.h" 8688081cfbSAnshuman Khandual 8788081cfbSAnshuman Khandual #undef PERF_ARCH 887b2c05a1SKan Liang 897b2c05a1SKan Liang static inline bool is_topdown_count(struct perf_event *event) 907b2c05a1SKan Liang { 917b2c05a1SKan Liang return event->hw.flags & PERF_X86_EVENT_TOPDOWN; 927b2c05a1SKan Liang } 937b2c05a1SKan Liang 947b2c05a1SKan Liang static inline bool is_metric_event(struct perf_event *event) 957b2c05a1SKan Liang { 967b2c05a1SKan Liang u64 config = event->attr.config; 977b2c05a1SKan Liang 987b2c05a1SKan Liang return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) && 997b2c05a1SKan Liang ((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING) && 1007b2c05a1SKan Liang ((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX); 1017b2c05a1SKan Liang } 1027b2c05a1SKan Liang 1037b2c05a1SKan Liang static inline bool is_slots_event(struct perf_event *event) 1047b2c05a1SKan Liang { 1057b2c05a1SKan Liang return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS; 1067b2c05a1SKan Liang } 1077b2c05a1SKan Liang 1087b2c05a1SKan Liang static inline bool is_topdown_event(struct perf_event *event) 1097b2c05a1SKan Liang { 1107b2c05a1SKan Liang return is_metric_event(event) || is_slots_event(event); 1117b2c05a1SKan Liang } 11227f6d22bSBorislav Petkov 11333744916SKan Liang static inline bool is_branch_counters_group(struct perf_event *event) 11433744916SKan Liang { 11533744916SKan Liang return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS; 11633744916SKan Liang } 11733744916SKan Liang 11827f6d22bSBorislav Petkov struct amd_nb { 11927f6d22bSBorislav Petkov int nb_id; /* NorthBridge id */ 12027f6d22bSBorislav Petkov int refcnt; /* reference count */ 12127f6d22bSBorislav Petkov struct perf_event *owners[X86_PMC_IDX_MAX]; 12227f6d22bSBorislav Petkov struct event_constraint event_constraints[X86_PMC_IDX_MAX]; 12327f6d22bSBorislav Petkov }; 12427f6d22bSBorislav Petkov 125fd583ad1SKan Liang #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1) 12642880f72SAlexander Shishkin #define PEBS_PMI_AFTER_EACH_RECORD BIT_ULL(60) 12742880f72SAlexander Shishkin #define PEBS_OUTPUT_OFFSET 61 12842880f72SAlexander Shishkin #define PEBS_OUTPUT_MASK (3ull << PEBS_OUTPUT_OFFSET) 12942880f72SAlexander Shishkin #define PEBS_OUTPUT_PT (1ull << PEBS_OUTPUT_OFFSET) 13042880f72SAlexander Shishkin #define PEBS_VIA_PT_MASK (PEBS_OUTPUT_PT | PEBS_PMI_AFTER_EACH_RECORD) 13127f6d22bSBorislav Petkov 13227f6d22bSBorislav Petkov /* 13327f6d22bSBorislav Petkov * Flags PEBS can handle without an PMI. 13427f6d22bSBorislav Petkov * 13527f6d22bSBorislav Petkov * TID can only be handled by flushing at context switch. 1362fe1bc1fSAndi Kleen * REGS_USER can be handled for events limited to ring 3. 13727f6d22bSBorislav Petkov * 13827f6d22bSBorislav Petkov */ 139174afc3eSKan Liang #define LARGE_PEBS_FLAGS \ 14027f6d22bSBorislav Petkov (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \ 14127f6d22bSBorislav Petkov PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ 14227f6d22bSBorislav Petkov PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ 1432fe1bc1fSAndi Kleen PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \ 14411974914SJiri Olsa PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \ 145e60b7cb0SLike Xu PERF_SAMPLE_PERIOD | PERF_SAMPLE_CODE_PAGE_SIZE | \ 146e60b7cb0SLike Xu PERF_SAMPLE_WEIGHT_TYPE) 14727f6d22bSBorislav Petkov 1489d5dcc93SKan Liang #define PEBS_GP_REGS \ 1499d5dcc93SKan Liang ((1ULL << PERF_REG_X86_AX) | \ 1509d5dcc93SKan Liang (1ULL << PERF_REG_X86_BX) | \ 1519d5dcc93SKan Liang (1ULL << PERF_REG_X86_CX) | \ 1529d5dcc93SKan Liang (1ULL << PERF_REG_X86_DX) | \ 1539d5dcc93SKan Liang (1ULL << PERF_REG_X86_DI) | \ 1549d5dcc93SKan Liang (1ULL << PERF_REG_X86_SI) | \ 1559d5dcc93SKan Liang (1ULL << PERF_REG_X86_SP) | \ 1569d5dcc93SKan Liang (1ULL << PERF_REG_X86_BP) | \ 1579d5dcc93SKan Liang (1ULL << PERF_REG_X86_IP) | \ 1589d5dcc93SKan Liang (1ULL << PERF_REG_X86_FLAGS) | \ 1599d5dcc93SKan Liang (1ULL << PERF_REG_X86_R8) | \ 1609d5dcc93SKan Liang (1ULL << PERF_REG_X86_R9) | \ 1619d5dcc93SKan Liang (1ULL << PERF_REG_X86_R10) | \ 1629d5dcc93SKan Liang (1ULL << PERF_REG_X86_R11) | \ 1639d5dcc93SKan Liang (1ULL << PERF_REG_X86_R12) | \ 1649d5dcc93SKan Liang (1ULL << PERF_REG_X86_R13) | \ 1659d5dcc93SKan Liang (1ULL << PERF_REG_X86_R14) | \ 1669d5dcc93SKan Liang (1ULL << PERF_REG_X86_R15)) 1672fe1bc1fSAndi Kleen 16827f6d22bSBorislav Petkov /* 16927f6d22bSBorislav Petkov * Per register state. 17027f6d22bSBorislav Petkov */ 17127f6d22bSBorislav Petkov struct er_account { 17227f6d22bSBorislav Petkov raw_spinlock_t lock; /* per-core: protect structure */ 17327f6d22bSBorislav Petkov u64 config; /* extra MSR config */ 17427f6d22bSBorislav Petkov u64 reg; /* extra MSR number */ 17527f6d22bSBorislav Petkov atomic_t ref; /* reference count */ 17627f6d22bSBorislav Petkov }; 17727f6d22bSBorislav Petkov 17827f6d22bSBorislav Petkov /* 17927f6d22bSBorislav Petkov * Per core/cpu state 18027f6d22bSBorislav Petkov * 18127f6d22bSBorislav Petkov * Used to coordinate shared registers between HT threads or 18227f6d22bSBorislav Petkov * among events on a single PMU. 18327f6d22bSBorislav Petkov */ 18427f6d22bSBorislav Petkov struct intel_shared_regs { 18527f6d22bSBorislav Petkov struct er_account regs[EXTRA_REG_MAX]; 18627f6d22bSBorislav Petkov int refcnt; /* per-core: #HT threads */ 18727f6d22bSBorislav Petkov unsigned core_id; /* per-core: core id */ 18827f6d22bSBorislav Petkov }; 18927f6d22bSBorislav Petkov 19027f6d22bSBorislav Petkov enum intel_excl_state_type { 19127f6d22bSBorislav Petkov INTEL_EXCL_UNUSED = 0, /* counter is unused */ 19227f6d22bSBorislav Petkov INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */ 19327f6d22bSBorislav Petkov INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */ 19427f6d22bSBorislav Petkov }; 19527f6d22bSBorislav Petkov 19627f6d22bSBorislav Petkov struct intel_excl_states { 19727f6d22bSBorislav Petkov enum intel_excl_state_type state[X86_PMC_IDX_MAX]; 19827f6d22bSBorislav Petkov bool sched_started; /* true if scheduling has started */ 19927f6d22bSBorislav Petkov }; 20027f6d22bSBorislav Petkov 20127f6d22bSBorislav Petkov struct intel_excl_cntrs { 20227f6d22bSBorislav Petkov raw_spinlock_t lock; 20327f6d22bSBorislav Petkov 20427f6d22bSBorislav Petkov struct intel_excl_states states[2]; 20527f6d22bSBorislav Petkov 20627f6d22bSBorislav Petkov union { 20727f6d22bSBorislav Petkov u16 has_exclusive[2]; 20827f6d22bSBorislav Petkov u32 exclusive_present; 20927f6d22bSBorislav Petkov }; 21027f6d22bSBorislav Petkov 21127f6d22bSBorislav Petkov int refcnt; /* per-core: #HT threads */ 21227f6d22bSBorislav Petkov unsigned core_id; /* per-core: core id */ 21327f6d22bSBorislav Petkov }; 21427f6d22bSBorislav Petkov 2158b077e4aSKan Liang struct x86_perf_task_context; 21627f6d22bSBorislav Petkov #define MAX_LBR_ENTRIES 32 21727f6d22bSBorislav Petkov 21827f6d22bSBorislav Petkov enum { 2199f354a72SKan Liang LBR_FORMAT_32 = 0x00, 2209f354a72SKan Liang LBR_FORMAT_LIP = 0x01, 2219f354a72SKan Liang LBR_FORMAT_EIP = 0x02, 2229f354a72SKan Liang LBR_FORMAT_EIP_FLAGS = 0x03, 2239f354a72SKan Liang LBR_FORMAT_EIP_FLAGS2 = 0x04, 2249f354a72SKan Liang LBR_FORMAT_INFO = 0x05, 2259f354a72SKan Liang LBR_FORMAT_TIME = 0x06, 2261ac7fd81SPeter Zijlstra (Intel) LBR_FORMAT_INFO2 = 0x07, 2271ac7fd81SPeter Zijlstra (Intel) LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_INFO2, 2289f354a72SKan Liang }; 2299f354a72SKan Liang 2309f354a72SKan Liang enum { 23127f6d22bSBorislav Petkov X86_PERF_KFREE_SHARED = 0, 23227f6d22bSBorislav Petkov X86_PERF_KFREE_EXCL = 1, 23327f6d22bSBorislav Petkov X86_PERF_KFREE_MAX 23427f6d22bSBorislav Petkov }; 23527f6d22bSBorislav Petkov 23627f6d22bSBorislav Petkov struct cpu_hw_events { 23727f6d22bSBorislav Petkov /* 23827f6d22bSBorislav Petkov * Generic x86 PMC bits 23927f6d22bSBorislav Petkov */ 24027f6d22bSBorislav Petkov struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ 24127f6d22bSBorislav Petkov unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 2425471eea5SKan Liang unsigned long dirty[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 24327f6d22bSBorislav Petkov int enabled; 24427f6d22bSBorislav Petkov 24527f6d22bSBorislav Petkov int n_events; /* the # of events in the below arrays */ 24627f6d22bSBorislav Petkov int n_added; /* the # last events in the below arrays; 24727f6d22bSBorislav Petkov they've never been enabled yet */ 24827f6d22bSBorislav Petkov int n_txn; /* the # last events in the below arrays; 24927f6d22bSBorislav Petkov added in the current transaction */ 250871a93b0SPeter Zijlstra int n_txn_pair; 2513dbde695SPeter Zijlstra int n_txn_metric; 25227f6d22bSBorislav Petkov int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ 25327f6d22bSBorislav Petkov u64 tags[X86_PMC_IDX_MAX]; 25427f6d22bSBorislav Petkov 25527f6d22bSBorislav Petkov struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ 25627f6d22bSBorislav Petkov struct event_constraint *event_constraint[X86_PMC_IDX_MAX]; 25727f6d22bSBorislav Petkov 25827f6d22bSBorislav Petkov int n_excl; /* the number of exclusive events */ 25927f6d22bSBorislav Petkov 26027f6d22bSBorislav Petkov unsigned int txn_flags; 26127f6d22bSBorislav Petkov int is_fake; 26227f6d22bSBorislav Petkov 26327f6d22bSBorislav Petkov /* 26427f6d22bSBorislav Petkov * Intel DebugStore bits 26527f6d22bSBorislav Petkov */ 26627f6d22bSBorislav Petkov struct debug_store *ds; 267c1961a46SHugh Dickins void *ds_pebs_vaddr; 268c1961a46SHugh Dickins void *ds_bts_vaddr; 26927f6d22bSBorislav Petkov u64 pebs_enabled; 27009e61b4fSPeter Zijlstra int n_pebs; 27109e61b4fSPeter Zijlstra int n_large_pebs; 27242880f72SAlexander Shishkin int n_pebs_via_pt; 27342880f72SAlexander Shishkin int pebs_output; 27427f6d22bSBorislav Petkov 275c22497f5SKan Liang /* Current super set of events hardware configuration */ 276c22497f5SKan Liang u64 pebs_data_cfg; 277c22497f5SKan Liang u64 active_pebs_data_cfg; 278c22497f5SKan Liang int pebs_record_size; 279c22497f5SKan Liang 280fae9ebdeSKan Liang /* Intel Fixed counter configuration */ 281fae9ebdeSKan Liang u64 fixed_ctrl_val; 282fae9ebdeSKan Liang u64 active_fixed_ctrl_val; 283fae9ebdeSKan Liang 28427f6d22bSBorislav Petkov /* 28527f6d22bSBorislav Petkov * Intel LBR bits 28627f6d22bSBorislav Petkov */ 28727f6d22bSBorislav Petkov int lbr_users; 288d3617b98SAndi Kleen int lbr_pebs_users; 28927f6d22bSBorislav Petkov struct perf_branch_stack lbr_stack; 29027f6d22bSBorislav Petkov struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; 29133744916SKan Liang u64 lbr_counters[MAX_LBR_ENTRIES]; /* branch stack extra */ 29249d8184fSKan Liang union { 29327f6d22bSBorislav Petkov struct er_account *lbr_sel; 29449d8184fSKan Liang struct er_account *lbr_ctl; 29549d8184fSKan Liang }; 29627f6d22bSBorislav Petkov u64 br_sel; 297f42be865SKan Liang void *last_task_ctx; 2988b077e4aSKan Liang int last_log_id; 299e1ad1ac2SLike Xu int lbr_select; 300c085fb87SKan Liang void *lbr_xsave; 30127f6d22bSBorislav Petkov 30227f6d22bSBorislav Petkov /* 30327f6d22bSBorislav Petkov * Intel host/guest exclude bits 30427f6d22bSBorislav Petkov */ 30527f6d22bSBorislav Petkov u64 intel_ctrl_guest_mask; 30627f6d22bSBorislav Petkov u64 intel_ctrl_host_mask; 30727f6d22bSBorislav Petkov struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; 30827f6d22bSBorislav Petkov 30927f6d22bSBorislav Petkov /* 31027f6d22bSBorislav Petkov * Intel checkpoint mask 31127f6d22bSBorislav Petkov */ 31227f6d22bSBorislav Petkov u64 intel_cp_status; 31327f6d22bSBorislav Petkov 31427f6d22bSBorislav Petkov /* 31527f6d22bSBorislav Petkov * manage shared (per-core, per-cpu) registers 31627f6d22bSBorislav Petkov * used on Intel NHM/WSM/SNB 31727f6d22bSBorislav Petkov */ 31827f6d22bSBorislav Petkov struct intel_shared_regs *shared_regs; 31927f6d22bSBorislav Petkov /* 32027f6d22bSBorislav Petkov * manage exclusive counter access between hyperthread 32127f6d22bSBorislav Petkov */ 32227f6d22bSBorislav Petkov struct event_constraint *constraint_list; /* in enable order */ 32327f6d22bSBorislav Petkov struct intel_excl_cntrs *excl_cntrs; 32427f6d22bSBorislav Petkov int excl_thread_id; /* 0 or 1 */ 32527f6d22bSBorislav Petkov 32627f6d22bSBorislav Petkov /* 327400816f6SPeter Zijlstra (Intel) * SKL TSX_FORCE_ABORT shadow 328400816f6SPeter Zijlstra (Intel) */ 329400816f6SPeter Zijlstra (Intel) u64 tfa_shadow; 330400816f6SPeter Zijlstra (Intel) 331400816f6SPeter Zijlstra (Intel) /* 3327b2c05a1SKan Liang * Perf Metrics 3337b2c05a1SKan Liang */ 3347b2c05a1SKan Liang /* number of accepted metrics events */ 3357b2c05a1SKan Liang int n_metric; 3367b2c05a1SKan Liang 3377b2c05a1SKan Liang /* 33827f6d22bSBorislav Petkov * AMD specific bits 33927f6d22bSBorislav Petkov */ 34027f6d22bSBorislav Petkov struct amd_nb *amd_nb; 341ada54345SStephane Eranian int brs_active; /* BRS is enabled */ 342ada54345SStephane Eranian 34327f6d22bSBorislav Petkov /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ 34427f6d22bSBorislav Petkov u64 perf_ctr_virt_mask; 34557388912SKim Phillips int n_pair; /* Large increment events */ 34627f6d22bSBorislav Petkov 34727f6d22bSBorislav Petkov void *kfree_on_online[X86_PERF_KFREE_MAX]; 34861e76d53SKan Liang 34961e76d53SKan Liang struct pmu *pmu; 35027f6d22bSBorislav Petkov }; 35127f6d22bSBorislav Petkov 35263b79f6eSPeter Zijlstra #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \ 35327f6d22bSBorislav Petkov { .idxmsk64 = (n) }, \ 35427f6d22bSBorislav Petkov .code = (c), \ 35563b79f6eSPeter Zijlstra .size = (e) - (c), \ 35627f6d22bSBorislav Petkov .cmask = (m), \ 35727f6d22bSBorislav Petkov .weight = (w), \ 35827f6d22bSBorislav Petkov .overlap = (o), \ 35927f6d22bSBorislav Petkov .flags = f, \ 36027f6d22bSBorislav Petkov } 36127f6d22bSBorislav Petkov 36263b79f6eSPeter Zijlstra #define __EVENT_CONSTRAINT(c, n, m, w, o, f) \ 36363b79f6eSPeter Zijlstra __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f) 36463b79f6eSPeter Zijlstra 36527f6d22bSBorislav Petkov #define EVENT_CONSTRAINT(c, n, m) \ 36627f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) 36727f6d22bSBorislav Petkov 36863b79f6eSPeter Zijlstra /* 36963b79f6eSPeter Zijlstra * The constraint_match() function only works for 'simple' event codes 37063b79f6eSPeter Zijlstra * and not for extended (AMD64_EVENTSEL_EVENT) events codes. 37163b79f6eSPeter Zijlstra */ 37263b79f6eSPeter Zijlstra #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \ 37363b79f6eSPeter Zijlstra __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0) 37463b79f6eSPeter Zijlstra 37527f6d22bSBorislav Petkov #define INTEL_EXCLEVT_CONSTRAINT(c, n) \ 37627f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\ 37727f6d22bSBorislav Petkov 0, PERF_X86_EVENT_EXCL) 37827f6d22bSBorislav Petkov 37927f6d22bSBorislav Petkov /* 38027f6d22bSBorislav Petkov * The overlap flag marks event constraints with overlapping counter 38127f6d22bSBorislav Petkov * masks. This is the case if the counter mask of such an event is not 38227f6d22bSBorislav Petkov * a subset of any other counter mask of a constraint with an equal or 38327f6d22bSBorislav Petkov * higher weight, e.g.: 38427f6d22bSBorislav Petkov * 38527f6d22bSBorislav Petkov * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); 38627f6d22bSBorislav Petkov * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); 38727f6d22bSBorislav Petkov * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); 38827f6d22bSBorislav Petkov * 38927f6d22bSBorislav Petkov * The event scheduler may not select the correct counter in the first 39027f6d22bSBorislav Petkov * cycle because it needs to know which subsequent events will be 39127f6d22bSBorislav Petkov * scheduled. It may fail to schedule the events then. So we set the 39227f6d22bSBorislav Petkov * overlap flag for such constraints to give the scheduler a hint which 39327f6d22bSBorislav Petkov * events to select for counter rescheduling. 39427f6d22bSBorislav Petkov * 39527f6d22bSBorislav Petkov * Care must be taken as the rescheduling algorithm is O(n!) which 39600f52685SIngo Molnar * will increase scheduling cycles for an over-committed system 39727f6d22bSBorislav Petkov * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros 39827f6d22bSBorislav Petkov * and its counter masks must be kept at a minimum. 39927f6d22bSBorislav Petkov */ 40027f6d22bSBorislav Petkov #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ 40127f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0) 40227f6d22bSBorislav Petkov 40327f6d22bSBorislav Petkov /* 40427f6d22bSBorislav Petkov * Constraint on the Event code. 40527f6d22bSBorislav Petkov */ 40627f6d22bSBorislav Petkov #define INTEL_EVENT_CONSTRAINT(c, n) \ 40727f6d22bSBorislav Petkov EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) 40827f6d22bSBorislav Petkov 40927f6d22bSBorislav Petkov /* 41063b79f6eSPeter Zijlstra * Constraint on a range of Event codes 41163b79f6eSPeter Zijlstra */ 41263b79f6eSPeter Zijlstra #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \ 41363b79f6eSPeter Zijlstra EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT) 41463b79f6eSPeter Zijlstra 41563b79f6eSPeter Zijlstra /* 41627f6d22bSBorislav Petkov * Constraint on the Event code + UMask + fixed-mask 41727f6d22bSBorislav Petkov * 41827f6d22bSBorislav Petkov * filter mask to validate fixed counter events. 41927f6d22bSBorislav Petkov * the following filters disqualify for fixed counters: 42027f6d22bSBorislav Petkov * - inv 42127f6d22bSBorislav Petkov * - edge 42227f6d22bSBorislav Petkov * - cnt-mask 42327f6d22bSBorislav Petkov * - in_tx 42427f6d22bSBorislav Petkov * - in_tx_checkpointed 42527f6d22bSBorislav Petkov * The other filters are supported by fixed counters. 42627f6d22bSBorislav Petkov * The any-thread option is supported starting with v3. 42727f6d22bSBorislav Petkov */ 42827f6d22bSBorislav Petkov #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED) 42927f6d22bSBorislav Petkov #define FIXED_EVENT_CONSTRAINT(c, n) \ 43027f6d22bSBorislav Petkov EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) 43127f6d22bSBorislav Petkov 43227f6d22bSBorislav Petkov /* 43359a854e2SKan Liang * The special metric counters do not actually exist. They are calculated from 43459a854e2SKan Liang * the combination of the FxCtr3 + MSR_PERF_METRICS. 43559a854e2SKan Liang * 43659a854e2SKan Liang * The special metric counters are mapped to a dummy offset for the scheduler. 43759a854e2SKan Liang * The sharing between multiple users of the same metric without multiplexing 43859a854e2SKan Liang * is not allowed, even though the hardware supports that in principle. 43959a854e2SKan Liang */ 44059a854e2SKan Liang 44159a854e2SKan Liang #define METRIC_EVENT_CONSTRAINT(c, n) \ 44259a854e2SKan Liang EVENT_CONSTRAINT(c, (1ULL << (INTEL_PMC_IDX_METRIC_BASE + n)), \ 44359a854e2SKan Liang INTEL_ARCH_EVENT_MASK) 44459a854e2SKan Liang 44559a854e2SKan Liang /* 44627f6d22bSBorislav Petkov * Constraint on the Event code + UMask 44727f6d22bSBorislav Petkov */ 44827f6d22bSBorislav Petkov #define INTEL_UEVENT_CONSTRAINT(c, n) \ 44927f6d22bSBorislav Petkov EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) 45027f6d22bSBorislav Petkov 45127f6d22bSBorislav Petkov /* Constraint on specific umask bit only + event */ 45227f6d22bSBorislav Petkov #define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \ 45327f6d22bSBorislav Petkov EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c)) 45427f6d22bSBorislav Petkov 45527f6d22bSBorislav Petkov /* Like UEVENT_CONSTRAINT, but match flags too */ 45627f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \ 45727f6d22bSBorislav Petkov EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) 45827f6d22bSBorislav Petkov 45927f6d22bSBorislav Petkov #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \ 46027f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ 46127f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_EXCL) 46227f6d22bSBorislav Petkov 46327f6d22bSBorislav Petkov #define INTEL_PLD_CONSTRAINT(c, n) \ 46427f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 46527f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) 46627f6d22bSBorislav Petkov 46761b985e3SKan Liang #define INTEL_PSD_CONSTRAINT(c, n) \ 46861b985e3SKan Liang __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 46961b985e3SKan Liang HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_STLAT) 47061b985e3SKan Liang 47127f6d22bSBorislav Petkov #define INTEL_PST_CONSTRAINT(c, n) \ 47227f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 47327f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) 47427f6d22bSBorislav Petkov 47539a41278SKan Liang #define INTEL_HYBRID_LAT_CONSTRAINT(c, n) \ 47639a41278SKan Liang __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 47739a41278SKan Liang HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LAT_HYBRID) 47839a41278SKan Liang 47927f6d22bSBorislav Petkov /* Event constraint, but match on all event flags too. */ 48027f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ 4816b89d4c1SStephane Eranian EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) 48227f6d22bSBorislav Petkov 48363b79f6eSPeter Zijlstra #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \ 4846b89d4c1SStephane Eranian EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) 48563b79f6eSPeter Zijlstra 48627f6d22bSBorislav Petkov /* Check only flags, but allow all event/umask */ 48727f6d22bSBorislav Petkov #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ 48827f6d22bSBorislav Petkov EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) 48927f6d22bSBorislav Petkov 49027f6d22bSBorislav Petkov /* Check flags and event code, and set the HSW store flag */ 49127f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \ 49227f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 49327f6d22bSBorislav Petkov ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ 49427f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) 49527f6d22bSBorislav Petkov 49627f6d22bSBorislav Petkov /* Check flags and event code, and set the HSW load flag */ 49727f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \ 49827f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 49927f6d22bSBorislav Petkov ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ 50027f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) 50127f6d22bSBorislav Petkov 50263b79f6eSPeter Zijlstra #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \ 50363b79f6eSPeter Zijlstra __EVENT_CONSTRAINT_RANGE(code, end, n, \ 50463b79f6eSPeter Zijlstra ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ 50563b79f6eSPeter Zijlstra HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) 50663b79f6eSPeter Zijlstra 50727f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \ 50827f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 50927f6d22bSBorislav Petkov ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ 51027f6d22bSBorislav Petkov HWEIGHT(n), 0, \ 51127f6d22bSBorislav Petkov PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) 51227f6d22bSBorislav Petkov 51327f6d22bSBorislav Petkov /* Check flags and event code/umask, and set the HSW store flag */ 51427f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \ 51527f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 51627f6d22bSBorislav Petkov INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 51727f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) 51827f6d22bSBorislav Petkov 51927f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \ 52027f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 52127f6d22bSBorislav Petkov INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 52227f6d22bSBorislav Petkov HWEIGHT(n), 0, \ 52327f6d22bSBorislav Petkov PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL) 52427f6d22bSBorislav Petkov 52527f6d22bSBorislav Petkov /* Check flags and event code/umask, and set the HSW load flag */ 52627f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \ 52727f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 52827f6d22bSBorislav Petkov INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 52927f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) 53027f6d22bSBorislav Petkov 53127f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \ 53227f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 53327f6d22bSBorislav Petkov INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 53427f6d22bSBorislav Petkov HWEIGHT(n), 0, \ 53527f6d22bSBorislav Petkov PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) 53627f6d22bSBorislav Petkov 53727f6d22bSBorislav Petkov /* Check flags and event code/umask, and set the HSW N/A flag */ 53827f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ 53927f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 54027f6d22bSBorislav Petkov INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 54127f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) 54227f6d22bSBorislav Petkov 54327f6d22bSBorislav Petkov 54427f6d22bSBorislav Petkov /* 54527f6d22bSBorislav Petkov * We define the end marker as having a weight of -1 54627f6d22bSBorislav Petkov * to enable blacklisting of events using a counter bitmask 54727f6d22bSBorislav Petkov * of zero and thus a weight of zero. 54827f6d22bSBorislav Petkov * The end marker has a weight that cannot possibly be 54927f6d22bSBorislav Petkov * obtained from counting the bits in the bitmask. 55027f6d22bSBorislav Petkov */ 55127f6d22bSBorislav Petkov #define EVENT_CONSTRAINT_END { .weight = -1 } 55227f6d22bSBorislav Petkov 55327f6d22bSBorislav Petkov /* 55427f6d22bSBorislav Petkov * Check for end marker with weight == -1 55527f6d22bSBorislav Petkov */ 55627f6d22bSBorislav Petkov #define for_each_event_constraint(e, c) \ 55727f6d22bSBorislav Petkov for ((e) = (c); (e)->weight != -1; (e)++) 55827f6d22bSBorislav Petkov 55927f6d22bSBorislav Petkov /* 56027f6d22bSBorislav Petkov * Extra registers for specific events. 56127f6d22bSBorislav Petkov * 56227f6d22bSBorislav Petkov * Some events need large masks and require external MSRs. 56327f6d22bSBorislav Petkov * Those extra MSRs end up being shared for all events on 56427f6d22bSBorislav Petkov * a PMU and sometimes between PMU of sibling HT threads. 56527f6d22bSBorislav Petkov * In either case, the kernel needs to handle conflicting 56627f6d22bSBorislav Petkov * accesses to those extra, shared, regs. The data structure 56727f6d22bSBorislav Petkov * to manage those registers is stored in cpu_hw_event. 56827f6d22bSBorislav Petkov */ 56927f6d22bSBorislav Petkov struct extra_reg { 57027f6d22bSBorislav Petkov unsigned int event; 57127f6d22bSBorislav Petkov unsigned int msr; 57227f6d22bSBorislav Petkov u64 config_mask; 57327f6d22bSBorislav Petkov u64 valid_mask; 57427f6d22bSBorislav Petkov int idx; /* per_xxx->regs[] reg index */ 57527f6d22bSBorislav Petkov bool extra_msr_access; 57627f6d22bSBorislav Petkov }; 57727f6d22bSBorislav Petkov 57827f6d22bSBorislav Petkov #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ 57927f6d22bSBorislav Petkov .event = (e), \ 58027f6d22bSBorislav Petkov .msr = (ms), \ 58127f6d22bSBorislav Petkov .config_mask = (m), \ 58227f6d22bSBorislav Petkov .valid_mask = (vm), \ 58327f6d22bSBorislav Petkov .idx = EXTRA_REG_##i, \ 58427f6d22bSBorislav Petkov .extra_msr_access = true, \ 58527f6d22bSBorislav Petkov } 58627f6d22bSBorislav Petkov 58727f6d22bSBorislav Petkov #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ 58827f6d22bSBorislav Petkov EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) 58927f6d22bSBorislav Petkov 59027f6d22bSBorislav Petkov #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \ 59127f6d22bSBorislav Petkov EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \ 59227f6d22bSBorislav Petkov ARCH_PERFMON_EVENTSEL_UMASK, vm, idx) 59327f6d22bSBorislav Petkov 59427f6d22bSBorislav Petkov #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \ 59527f6d22bSBorislav Petkov INTEL_UEVENT_EXTRA_REG(c, \ 59627f6d22bSBorislav Petkov MSR_PEBS_LD_LAT_THRESHOLD, \ 59727f6d22bSBorislav Petkov 0xffff, \ 59827f6d22bSBorislav Petkov LDLAT) 59927f6d22bSBorislav Petkov 60027f6d22bSBorislav Petkov #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) 60127f6d22bSBorislav Petkov 60227f6d22bSBorislav Petkov union perf_capabilities { 60327f6d22bSBorislav Petkov struct { 60427f6d22bSBorislav Petkov u64 lbr_format:6; 60527f6d22bSBorislav Petkov u64 pebs_trap:1; 60627f6d22bSBorislav Petkov u64 pebs_arch_reg:1; 60727f6d22bSBorislav Petkov u64 pebs_format:4; 60827f6d22bSBorislav Petkov u64 smm_freeze:1; 60927f6d22bSBorislav Petkov /* 61027f6d22bSBorislav Petkov * PMU supports separate counter range for writing 61127f6d22bSBorislav Petkov * values > 32bit. 61227f6d22bSBorislav Petkov */ 61327f6d22bSBorislav Petkov u64 full_width_write:1; 614c22497f5SKan Liang u64 pebs_baseline:1; 615bbdbde2aSKan Liang u64 perf_metrics:1; 61642880f72SAlexander Shishkin u64 pebs_output_pt_available:1; 617c87a3109SKan Liang u64 pebs_timing_info:1; 618cadbaa03SStephane Eranian u64 anythread_deprecated:1; 61927f6d22bSBorislav Petkov }; 62027f6d22bSBorislav Petkov u64 capabilities; 62127f6d22bSBorislav Petkov }; 62227f6d22bSBorislav Petkov 62327f6d22bSBorislav Petkov struct x86_pmu_quirk { 62427f6d22bSBorislav Petkov struct x86_pmu_quirk *next; 62527f6d22bSBorislav Petkov void (*func)(void); 62627f6d22bSBorislav Petkov }; 62727f6d22bSBorislav Petkov 62827f6d22bSBorislav Petkov union x86_pmu_config { 62927f6d22bSBorislav Petkov struct { 63027f6d22bSBorislav Petkov u64 event:8, 63127f6d22bSBorislav Petkov umask:8, 63227f6d22bSBorislav Petkov usr:1, 63327f6d22bSBorislav Petkov os:1, 63427f6d22bSBorislav Petkov edge:1, 63527f6d22bSBorislav Petkov pc:1, 63627f6d22bSBorislav Petkov interrupt:1, 63727f6d22bSBorislav Petkov __reserved1:1, 63827f6d22bSBorislav Petkov en:1, 63927f6d22bSBorislav Petkov inv:1, 64027f6d22bSBorislav Petkov cmask:8, 64127f6d22bSBorislav Petkov event2:4, 64227f6d22bSBorislav Petkov __reserved2:4, 64327f6d22bSBorislav Petkov go:1, 64427f6d22bSBorislav Petkov ho:1; 64527f6d22bSBorislav Petkov } bits; 64627f6d22bSBorislav Petkov u64 value; 64727f6d22bSBorislav Petkov }; 64827f6d22bSBorislav Petkov 64927f6d22bSBorislav Petkov #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value 65027f6d22bSBorislav Petkov 65127f6d22bSBorislav Petkov enum { 65227f6d22bSBorislav Petkov x86_lbr_exclusive_lbr, 65327f6d22bSBorislav Petkov x86_lbr_exclusive_bts, 65427f6d22bSBorislav Petkov x86_lbr_exclusive_pt, 65527f6d22bSBorislav Petkov x86_lbr_exclusive_max, 65627f6d22bSBorislav Petkov }; 65727f6d22bSBorislav Petkov 658ccf170e9SKan Liang #define PERF_PEBS_DATA_SOURCE_MAX 0x10 65938aaf921SKan Liang #define PERF_PEBS_DATA_SOURCE_MASK (PERF_PEBS_DATA_SOURCE_MAX - 1) 660ccf170e9SKan Liang 661b0560bfdSKan Liang enum hybrid_cpu_type { 662b0560bfdSKan Liang HYBRID_INTEL_NONE, 663b0560bfdSKan Liang HYBRID_INTEL_ATOM = 0x20, 664b0560bfdSKan Liang HYBRID_INTEL_CORE = 0x40, 665b0560bfdSKan Liang }; 666b0560bfdSKan Liang 667b0560bfdSKan Liang enum hybrid_pmu_type { 668b0560bfdSKan Liang not_hybrid, 669b0560bfdSKan Liang hybrid_small = BIT(0), 670b0560bfdSKan Liang hybrid_big = BIT(1), 671b0560bfdSKan Liang 672b0560bfdSKan Liang hybrid_big_small = hybrid_big | hybrid_small, /* only used for matching */ 673b0560bfdSKan Liang }; 674b0560bfdSKan Liang 675b0560bfdSKan Liang #define X86_HYBRID_PMU_ATOM_IDX 0 676b0560bfdSKan Liang #define X86_HYBRID_PMU_CORE_IDX 1 677b0560bfdSKan Liang 678b0560bfdSKan Liang #define X86_HYBRID_NUM_PMUS 2 679b0560bfdSKan Liang 680d0946a88SKan Liang struct x86_hybrid_pmu { 681d0946a88SKan Liang struct pmu pmu; 682d9977c43SKan Liang const char *name; 683b0560bfdSKan Liang enum hybrid_pmu_type pmu_type; 684d9977c43SKan Liang cpumask_t supported_cpus; 685d0946a88SKan Liang union perf_capabilities intel_cap; 686fc4b8fcaSKan Liang u64 intel_ctrl; 687d4b294bfSKan Liang int max_pebs_events; 688d4b294bfSKan Liang int num_counters; 689d4b294bfSKan Liang int num_counters_fixed; 690eaacf07dSKan Liang struct event_constraint unconstrained; 6910d18f2dfSKan Liang 6920d18f2dfSKan Liang u64 hw_cache_event_ids 6930d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_MAX] 6940d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_OP_MAX] 6950d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_RESULT_MAX]; 6960d18f2dfSKan Liang u64 hw_cache_extra_regs 6970d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_MAX] 6980d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_OP_MAX] 6990d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_RESULT_MAX]; 70024ee38ffSKan Liang struct event_constraint *event_constraints; 70124ee38ffSKan Liang struct event_constraint *pebs_constraints; 702183af736SKan Liang struct extra_reg *extra_regs; 703acade637SKan Liang 704acade637SKan Liang unsigned int late_ack :1, 705acade637SKan Liang mid_ack :1, 706acade637SKan Liang enabled_ack :1; 707ccf170e9SKan Liang 708ccf170e9SKan Liang u64 pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX]; 709d0946a88SKan Liang }; 710d0946a88SKan Liang 711d0946a88SKan Liang static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu) 712d0946a88SKan Liang { 713d0946a88SKan Liang return container_of(pmu, struct x86_hybrid_pmu, pmu); 714d0946a88SKan Liang } 715d0946a88SKan Liang 716d0946a88SKan Liang extern struct static_key_false perf_is_hybrid; 717d0946a88SKan Liang #define is_hybrid() static_branch_unlikely(&perf_is_hybrid) 718d0946a88SKan Liang 719d0946a88SKan Liang #define hybrid(_pmu, _field) \ 720d0946a88SKan Liang (*({ \ 721d0946a88SKan Liang typeof(&x86_pmu._field) __Fp = &x86_pmu._field; \ 722d0946a88SKan Liang \ 723d0946a88SKan Liang if (is_hybrid() && (_pmu)) \ 724d0946a88SKan Liang __Fp = &hybrid_pmu(_pmu)->_field; \ 725d0946a88SKan Liang \ 726d0946a88SKan Liang __Fp; \ 727d0946a88SKan Liang })) 728d0946a88SKan Liang 729eaacf07dSKan Liang #define hybrid_var(_pmu, _var) \ 730eaacf07dSKan Liang (*({ \ 731eaacf07dSKan Liang typeof(&_var) __Fp = &_var; \ 732eaacf07dSKan Liang \ 733eaacf07dSKan Liang if (is_hybrid() && (_pmu)) \ 734eaacf07dSKan Liang __Fp = &hybrid_pmu(_pmu)->_var; \ 735eaacf07dSKan Liang \ 736eaacf07dSKan Liang __Fp; \ 737eaacf07dSKan Liang })) 738eaacf07dSKan Liang 739acade637SKan Liang #define hybrid_bit(_pmu, _field) \ 740acade637SKan Liang ({ \ 741acade637SKan Liang bool __Fp = x86_pmu._field; \ 742acade637SKan Liang \ 743acade637SKan Liang if (is_hybrid() && (_pmu)) \ 744acade637SKan Liang __Fp = hybrid_pmu(_pmu)->_field; \ 745acade637SKan Liang \ 746acade637SKan Liang __Fp; \ 747acade637SKan Liang }) 748acade637SKan Liang 74927f6d22bSBorislav Petkov /* 75027f6d22bSBorislav Petkov * struct x86_pmu - generic x86 pmu 75127f6d22bSBorislav Petkov */ 75227f6d22bSBorislav Petkov struct x86_pmu { 75327f6d22bSBorislav Petkov /* 75427f6d22bSBorislav Petkov * Generic x86 PMC bits 75527f6d22bSBorislav Petkov */ 75627f6d22bSBorislav Petkov const char *name; 75727f6d22bSBorislav Petkov int version; 75827f6d22bSBorislav Petkov int (*handle_irq)(struct pt_regs *); 75927f6d22bSBorislav Petkov void (*disable_all)(void); 76027f6d22bSBorislav Petkov void (*enable_all)(int added); 76127f6d22bSBorislav Petkov void (*enable)(struct perf_event *); 76227f6d22bSBorislav Petkov void (*disable)(struct perf_event *); 7638b8ff8ccSAdrian Hunter void (*assign)(struct perf_event *event, int idx); 76468f7082fSPeter Zijlstra void (*add)(struct perf_event *); 76568f7082fSPeter Zijlstra void (*del)(struct perf_event *); 766bcfbe5c4SKan Liang void (*read)(struct perf_event *event); 76773759c34SPeter Zijlstra int (*set_period)(struct perf_event *event); 76873759c34SPeter Zijlstra u64 (*update)(struct perf_event *event); 76927f6d22bSBorislav Petkov int (*hw_config)(struct perf_event *event); 77027f6d22bSBorislav Petkov int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); 77127f6d22bSBorislav Petkov unsigned eventsel; 77227f6d22bSBorislav Petkov unsigned perfctr; 77327f6d22bSBorislav Petkov int (*addr_offset)(int index, bool eventsel); 77427f6d22bSBorislav Petkov int (*rdpmc_index)(int index); 77527f6d22bSBorislav Petkov u64 (*event_map)(int); 77627f6d22bSBorislav Petkov int max_events; 77727f6d22bSBorislav Petkov int num_counters; 77827f6d22bSBorislav Petkov int num_counters_fixed; 77927f6d22bSBorislav Petkov int cntval_bits; 78027f6d22bSBorislav Petkov u64 cntval_mask; 78127f6d22bSBorislav Petkov union { 78227f6d22bSBorislav Petkov unsigned long events_maskl; 78327f6d22bSBorislav Petkov unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; 78427f6d22bSBorislav Petkov }; 78527f6d22bSBorislav Petkov int events_mask_len; 78627f6d22bSBorislav Petkov int apic; 78727f6d22bSBorislav Petkov u64 max_period; 78827f6d22bSBorislav Petkov struct event_constraint * 78927f6d22bSBorislav Petkov (*get_event_constraints)(struct cpu_hw_events *cpuc, 79027f6d22bSBorislav Petkov int idx, 79127f6d22bSBorislav Petkov struct perf_event *event); 79227f6d22bSBorislav Petkov 79327f6d22bSBorislav Petkov void (*put_event_constraints)(struct cpu_hw_events *cpuc, 79427f6d22bSBorislav Petkov struct perf_event *event); 79527f6d22bSBorislav Petkov 79627f6d22bSBorislav Petkov void (*start_scheduling)(struct cpu_hw_events *cpuc); 79727f6d22bSBorislav Petkov 79827f6d22bSBorislav Petkov void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr); 79927f6d22bSBorislav Petkov 80027f6d22bSBorislav Petkov void (*stop_scheduling)(struct cpu_hw_events *cpuc); 80127f6d22bSBorislav Petkov 80227f6d22bSBorislav Petkov struct event_constraint *event_constraints; 80327f6d22bSBorislav Petkov struct x86_pmu_quirk *quirks; 80428f0f3c4SPeter Zijlstra void (*limit_period)(struct perf_event *event, s64 *l); 80527f6d22bSBorislav Petkov 806af3bdb99SAndi Kleen /* PMI handler bits */ 807af3bdb99SAndi Kleen unsigned int late_ack :1, 808acade637SKan Liang mid_ack :1, 8093daa96d6SPeter Zijlstra enabled_ack :1; 81027f6d22bSBorislav Petkov /* 81127f6d22bSBorislav Petkov * sysfs attrs 81227f6d22bSBorislav Petkov */ 81327f6d22bSBorislav Petkov int attr_rdpmc_broken; 81427f6d22bSBorislav Petkov int attr_rdpmc; 81527f6d22bSBorislav Petkov struct attribute **format_attrs; 81627f6d22bSBorislav Petkov 81727f6d22bSBorislav Petkov ssize_t (*events_sysfs_show)(char *page, u64 config); 818baa0c833SJiri Olsa const struct attribute_group **attr_update; 81927f6d22bSBorislav Petkov 8206089327fSKan Liang unsigned long attr_freeze_on_smi; 8216089327fSKan Liang 82227f6d22bSBorislav Petkov /* 82327f6d22bSBorislav Petkov * CPU Hotplug hooks 82427f6d22bSBorislav Petkov */ 82527f6d22bSBorislav Petkov int (*cpu_prepare)(int cpu); 82627f6d22bSBorislav Petkov void (*cpu_starting)(int cpu); 82727f6d22bSBorislav Petkov void (*cpu_dying)(int cpu); 82827f6d22bSBorislav Petkov void (*cpu_dead)(int cpu); 82927f6d22bSBorislav Petkov 83027f6d22bSBorislav Petkov void (*check_microcode)(void); 831bd275681SPeter Zijlstra void (*sched_task)(struct perf_event_pmu_context *pmu_ctx, 83227f6d22bSBorislav Petkov bool sched_in); 83327f6d22bSBorislav Petkov 83427f6d22bSBorislav Petkov /* 83527f6d22bSBorislav Petkov * Intel Arch Perfmon v2+ 83627f6d22bSBorislav Petkov */ 83727f6d22bSBorislav Petkov u64 intel_ctrl; 83827f6d22bSBorislav Petkov union perf_capabilities intel_cap; 83927f6d22bSBorislav Petkov 84027f6d22bSBorislav Petkov /* 84127f6d22bSBorislav Petkov * Intel DebugStore bits 84227f6d22bSBorislav Petkov */ 84327f6d22bSBorislav Petkov unsigned int bts :1, 84427f6d22bSBorislav Petkov bts_active :1, 84527f6d22bSBorislav Petkov pebs :1, 84627f6d22bSBorislav Petkov pebs_active :1, 84727f6d22bSBorislav Petkov pebs_broken :1, 84895298355SAndi Kleen pebs_prec_dist :1, 8499b545c04SAndi Kleen pebs_no_tlb :1, 85061b985e3SKan Liang pebs_no_isolation :1, 851fb358e0bSLike Xu pebs_block :1, 852fb358e0bSLike Xu pebs_ept :1; 85327f6d22bSBorislav Petkov int pebs_record_size; 854e72daf3fSJiri Olsa int pebs_buffer_size; 855c22497f5SKan Liang int max_pebs_events; 8569dfa9a5cSPeter Zijlstra void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data); 85727f6d22bSBorislav Petkov struct event_constraint *pebs_constraints; 85827f6d22bSBorislav Petkov void (*pebs_aliases)(struct perf_event *event); 85939a41278SKan Liang u64 (*pebs_latency_data)(struct perf_event *event, u64 status); 860174afc3eSKan Liang unsigned long large_pebs_flags; 861c22497f5SKan Liang u64 rtm_abort_event; 8620d23dc34SPeter Zijlstra (Intel) u64 pebs_capable; 86327f6d22bSBorislav Petkov 86427f6d22bSBorislav Petkov /* 86527f6d22bSBorislav Petkov * Intel LBR 86627f6d22bSBorislav Petkov */ 8673cb9d546SWei Wang unsigned int lbr_tos, lbr_from, lbr_to, 868fda1f99fSKan Liang lbr_info, lbr_nr; /* LBR base regs and size */ 86949d8184fSKan Liang union { 87027f6d22bSBorislav Petkov u64 lbr_sel_mask; /* LBR_SELECT valid bits */ 87149d8184fSKan Liang u64 lbr_ctl_mask; /* LBR_CTL valid bits */ 87249d8184fSKan Liang }; 87349d8184fSKan Liang union { 87427f6d22bSBorislav Petkov const int *lbr_sel_map; /* lbr_select mappings */ 87549d8184fSKan Liang int *lbr_ctl_map; /* LBR_CTL mappings */ 87649d8184fSKan Liang }; 87727f6d22bSBorislav Petkov bool lbr_double_abort; /* duplicated lbr aborts */ 878b0c1ef52SAndi Kleen bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */ 87927f6d22bSBorislav Petkov 8801ac7fd81SPeter Zijlstra (Intel) unsigned int lbr_has_info:1; 8811ac7fd81SPeter Zijlstra (Intel) unsigned int lbr_has_tsx:1; 8821ac7fd81SPeter Zijlstra (Intel) unsigned int lbr_from_flags:1; 8831ac7fd81SPeter Zijlstra (Intel) unsigned int lbr_to_cycles:1; 8841ac7fd81SPeter Zijlstra (Intel) 885af6cf129SKan Liang /* 886af6cf129SKan Liang * Intel Architectural LBR CPUID Enumeration 887af6cf129SKan Liang */ 888af6cf129SKan Liang unsigned int lbr_depth_mask:8; 889af6cf129SKan Liang unsigned int lbr_deep_c_reset:1; 890af6cf129SKan Liang unsigned int lbr_lip:1; 891af6cf129SKan Liang unsigned int lbr_cpl:1; 892af6cf129SKan Liang unsigned int lbr_filter:1; 893af6cf129SKan Liang unsigned int lbr_call_stack:1; 894af6cf129SKan Liang unsigned int lbr_mispred:1; 895af6cf129SKan Liang unsigned int lbr_timed_lbr:1; 896af6cf129SKan Liang unsigned int lbr_br_type:1; 89733744916SKan Liang unsigned int lbr_counters:4; 898af6cf129SKan Liang 8999f354a72SKan Liang void (*lbr_reset)(void); 900c301b1d8SKan Liang void (*lbr_read)(struct cpu_hw_events *cpuc); 901799571bfSKan Liang void (*lbr_save)(void *ctx); 902799571bfSKan Liang void (*lbr_restore)(void *ctx); 9039f354a72SKan Liang 90427f6d22bSBorislav Petkov /* 90527f6d22bSBorislav Petkov * Intel PT/LBR/BTS are exclusive 90627f6d22bSBorislav Petkov */ 90727f6d22bSBorislav Petkov atomic_t lbr_exclusive[x86_lbr_exclusive_max]; 90827f6d22bSBorislav Petkov 90927f6d22bSBorislav Petkov /* 9107b2c05a1SKan Liang * Intel perf metrics 9117b2c05a1SKan Liang */ 9121ab5f235SKan Liang int num_topdown_events; 9137b2c05a1SKan Liang 9147b2c05a1SKan Liang /* 915bd275681SPeter Zijlstra * perf task context (i.e. struct perf_event_pmu_context::task_ctx_data) 916fc1adfe3SAlexey Budankov * switch helper to bridge calls from perf/core to perf/x86. 917fc1adfe3SAlexey Budankov * See struct pmu::swap_task_ctx() usage for examples; 918fc1adfe3SAlexey Budankov */ 919bd275681SPeter Zijlstra void (*swap_task_ctx)(struct perf_event_pmu_context *prev_epc, 920bd275681SPeter Zijlstra struct perf_event_pmu_context *next_epc); 921fc1adfe3SAlexey Budankov 922fc1adfe3SAlexey Budankov /* 92332b62f44SPeter Zijlstra * AMD bits 92432b62f44SPeter Zijlstra */ 92532b62f44SPeter Zijlstra unsigned int amd_nb_constraints : 1; 92657388912SKim Phillips u64 perf_ctr_pair_en; 92732b62f44SPeter Zijlstra 92832b62f44SPeter Zijlstra /* 92927f6d22bSBorislav Petkov * Extra registers for events 93027f6d22bSBorislav Petkov */ 93127f6d22bSBorislav Petkov struct extra_reg *extra_regs; 93227f6d22bSBorislav Petkov unsigned int flags; 93327f6d22bSBorislav Petkov 93427f6d22bSBorislav Petkov /* 93527f6d22bSBorislav Petkov * Intel host/guest support (KVM) 93627f6d22bSBorislav Petkov */ 93739a4d779SLike Xu struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr, void *data); 93881ec3f3cSJiri Olsa 93981ec3f3cSJiri Olsa /* 94081ec3f3cSJiri Olsa * Check period value for PERF_EVENT_IOC_PERIOD ioctl. 94181ec3f3cSJiri Olsa */ 94281ec3f3cSJiri Olsa int (*check_period) (struct perf_event *event, u64 period); 94342880f72SAlexander Shishkin 94442880f72SAlexander Shishkin int (*aux_output_match) (struct perf_event *event); 945d0946a88SKan Liang 946bd275681SPeter Zijlstra void (*filter)(struct pmu *pmu, int cpu, bool *ret); 947d0946a88SKan Liang /* 948d0946a88SKan Liang * Hybrid support 949d0946a88SKan Liang * 950d0946a88SKan Liang * Most PMU capabilities are the same among different hybrid PMUs. 951d0946a88SKan Liang * The global x86_pmu saves the architecture capabilities, which 952d0946a88SKan Liang * are available for all PMUs. The hybrid_pmu only includes the 953d0946a88SKan Liang * unique capabilities. 954d0946a88SKan Liang */ 955d4b294bfSKan Liang int num_hybrid_pmus; 956d0946a88SKan Liang struct x86_hybrid_pmu *hybrid_pmu; 957b0560bfdSKan Liang enum hybrid_cpu_type (*get_hybrid_cpu_type) (void); 95827f6d22bSBorislav Petkov }; 95927f6d22bSBorislav Petkov 960530bfff6SKan Liang struct x86_perf_task_context_opt { 961530bfff6SKan Liang int lbr_callstack_users; 962530bfff6SKan Liang int lbr_stack_state; 963530bfff6SKan Liang int log_id; 964530bfff6SKan Liang }; 965530bfff6SKan Liang 96627f6d22bSBorislav Petkov struct x86_perf_task_context { 967e1ad1ac2SLike Xu u64 lbr_sel; 96827f6d22bSBorislav Petkov int tos; 9690592e57bSKan Liang int valid_lbrs; 970530bfff6SKan Liang struct x86_perf_task_context_opt opt; 9715624986dSKan Liang struct lbr_entry lbr[MAX_LBR_ENTRIES]; 97227f6d22bSBorislav Petkov }; 97327f6d22bSBorislav Petkov 97447125db2SKan Liang struct x86_perf_task_context_arch_lbr { 97547125db2SKan Liang struct x86_perf_task_context_opt opt; 97647125db2SKan Liang struct lbr_entry entries[]; 97747125db2SKan Liang }; 97847125db2SKan Liang 979ce711ea3SKan Liang /* 980ce711ea3SKan Liang * Add padding to guarantee the 64-byte alignment of the state buffer. 981ce711ea3SKan Liang * 982ce711ea3SKan Liang * The structure is dynamically allocated. The size of the LBR state may vary 983ce711ea3SKan Liang * based on the number of LBR registers. 984ce711ea3SKan Liang * 985ce711ea3SKan Liang * Do not put anything after the LBR state. 986ce711ea3SKan Liang */ 987ce711ea3SKan Liang struct x86_perf_task_context_arch_lbr_xsave { 988ce711ea3SKan Liang struct x86_perf_task_context_opt opt; 989ce711ea3SKan Liang 990ce711ea3SKan Liang union { 991ce711ea3SKan Liang struct xregs_state xsave; 992ce711ea3SKan Liang struct { 993ce711ea3SKan Liang struct fxregs_state i387; 994ce711ea3SKan Liang struct xstate_header header; 995ce711ea3SKan Liang struct arch_lbr_state lbr; 996ce711ea3SKan Liang } __attribute__ ((packed, aligned (XSAVE_ALIGNMENT))); 997ce711ea3SKan Liang }; 998ce711ea3SKan Liang }; 999ce711ea3SKan Liang 100027f6d22bSBorislav Petkov #define x86_add_quirk(func_) \ 100127f6d22bSBorislav Petkov do { \ 100227f6d22bSBorislav Petkov static struct x86_pmu_quirk __quirk __initdata = { \ 100327f6d22bSBorislav Petkov .func = func_, \ 100427f6d22bSBorislav Petkov }; \ 100527f6d22bSBorislav Petkov __quirk.next = x86_pmu.quirks; \ 100627f6d22bSBorislav Petkov x86_pmu.quirks = &__quirk; \ 100727f6d22bSBorislav Petkov } while (0) 100827f6d22bSBorislav Petkov 100927f6d22bSBorislav Petkov /* 101027f6d22bSBorislav Petkov * x86_pmu flags 101127f6d22bSBorislav Petkov */ 101227f6d22bSBorislav Petkov #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */ 101327f6d22bSBorislav Petkov #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */ 101427f6d22bSBorislav Petkov #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ 101527f6d22bSBorislav Petkov #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ 101631962340SKan Liang #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */ 1017400816f6SPeter Zijlstra (Intel) #define PMU_FL_TFA 0x20 /* deal with TSX force abort */ 1018471af006SKim Phillips #define PMU_FL_PAIR 0x40 /* merge counters for large incr. events */ 101961b985e3SKan Liang #define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */ 102061b985e3SKan Liang #define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */ 1021c87a3109SKan Liang #define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */ 102233744916SKan Liang #define PMU_FL_BR_CNTR 0x400 /* Support branch counter logging */ 102327f6d22bSBorislav Petkov 102427f6d22bSBorislav Petkov #define EVENT_VAR(_id) event_attr_##_id 102527f6d22bSBorislav Petkov #define EVENT_PTR(_id) &event_attr_##_id.attr.attr 102627f6d22bSBorislav Petkov 102727f6d22bSBorislav Petkov #define EVENT_ATTR(_name, _id) \ 102827f6d22bSBorislav Petkov static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ 102927f6d22bSBorislav Petkov .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ 103027f6d22bSBorislav Petkov .id = PERF_COUNT_HW_##_id, \ 103127f6d22bSBorislav Petkov .event_str = NULL, \ 103227f6d22bSBorislav Petkov }; 103327f6d22bSBorislav Petkov 103427f6d22bSBorislav Petkov #define EVENT_ATTR_STR(_name, v, str) \ 103527f6d22bSBorislav Petkov static struct perf_pmu_events_attr event_attr_##v = { \ 103627f6d22bSBorislav Petkov .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ 103727f6d22bSBorislav Petkov .id = 0, \ 103827f6d22bSBorislav Petkov .event_str = str, \ 103927f6d22bSBorislav Petkov }; 104027f6d22bSBorislav Petkov 1041fc07e9f9SAndi Kleen #define EVENT_ATTR_STR_HT(_name, v, noht, ht) \ 1042fc07e9f9SAndi Kleen static struct perf_pmu_events_ht_attr event_attr_##v = { \ 1043fc07e9f9SAndi Kleen .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\ 1044fc07e9f9SAndi Kleen .id = 0, \ 1045fc07e9f9SAndi Kleen .event_str_noht = noht, \ 1046fc07e9f9SAndi Kleen .event_str_ht = ht, \ 1047fc07e9f9SAndi Kleen } 1048fc07e9f9SAndi Kleen 1049a9c81ccdSKan Liang #define EVENT_ATTR_STR_HYBRID(_name, v, str, _pmu) \ 1050a9c81ccdSKan Liang static struct perf_pmu_events_hybrid_attr event_attr_##v = { \ 1051a9c81ccdSKan Liang .attr = __ATTR(_name, 0444, events_hybrid_sysfs_show, NULL),\ 1052a9c81ccdSKan Liang .id = 0, \ 1053a9c81ccdSKan Liang .event_str = str, \ 1054a9c81ccdSKan Liang .pmu_type = _pmu, \ 1055a9c81ccdSKan Liang } 1056a9c81ccdSKan Liang 1057a9c81ccdSKan Liang #define FORMAT_HYBRID_PTR(_id) (&format_attr_hybrid_##_id.attr.attr) 1058a9c81ccdSKan Liang 1059a9c81ccdSKan Liang #define FORMAT_ATTR_HYBRID(_name, _pmu) \ 1060a9c81ccdSKan Liang static struct perf_pmu_format_hybrid_attr format_attr_hybrid_##_name = {\ 1061a9c81ccdSKan Liang .attr = __ATTR_RO(_name), \ 1062a9c81ccdSKan Liang .pmu_type = _pmu, \ 1063a9c81ccdSKan Liang } 1064a9c81ccdSKan Liang 106561e76d53SKan Liang struct pmu *x86_get_pmu(unsigned int cpu); 106627f6d22bSBorislav Petkov extern struct x86_pmu x86_pmu __read_mostly; 106727f6d22bSBorislav Petkov 106873759c34SPeter Zijlstra DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period); 106973759c34SPeter Zijlstra DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update); 107073759c34SPeter Zijlstra 1071f42be865SKan Liang static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx) 1072f42be865SKan Liang { 107347125db2SKan Liang if (static_cpu_has(X86_FEATURE_ARCH_LBR)) 107447125db2SKan Liang return &((struct x86_perf_task_context_arch_lbr *)ctx)->opt; 107547125db2SKan Liang 1076f42be865SKan Liang return &((struct x86_perf_task_context *)ctx)->opt; 1077f42be865SKan Liang } 1078f42be865SKan Liang 107927f6d22bSBorislav Petkov static inline bool x86_pmu_has_lbr_callstack(void) 108027f6d22bSBorislav Petkov { 108127f6d22bSBorislav Petkov return x86_pmu.lbr_sel_map && 108227f6d22bSBorislav Petkov x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; 108327f6d22bSBorislav Petkov } 108427f6d22bSBorislav Petkov 108527f6d22bSBorislav Petkov DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); 1086dbf4e792SPeter Zijlstra DECLARE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); 108727f6d22bSBorislav Petkov 108827f6d22bSBorislav Petkov int x86_perf_event_set_period(struct perf_event *event); 108927f6d22bSBorislav Petkov 109027f6d22bSBorislav Petkov /* 109127f6d22bSBorislav Petkov * Generalized hw caching related hw_event table, filled 109227f6d22bSBorislav Petkov * in on a per model basis. A value of 0 means 109327f6d22bSBorislav Petkov * 'not supported', -1 means 'hw_event makes no sense on 109427f6d22bSBorislav Petkov * this CPU', any other value means the raw hw_event 109527f6d22bSBorislav Petkov * ID. 109627f6d22bSBorislav Petkov */ 109727f6d22bSBorislav Petkov 109827f6d22bSBorislav Petkov #define C(x) PERF_COUNT_HW_CACHE_##x 109927f6d22bSBorislav Petkov 110027f6d22bSBorislav Petkov extern u64 __read_mostly hw_cache_event_ids 110127f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_MAX] 110227f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_OP_MAX] 110327f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_RESULT_MAX]; 110427f6d22bSBorislav Petkov extern u64 __read_mostly hw_cache_extra_regs 110527f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_MAX] 110627f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_OP_MAX] 110727f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_RESULT_MAX]; 110827f6d22bSBorislav Petkov 110927f6d22bSBorislav Petkov u64 x86_perf_event_update(struct perf_event *event); 111027f6d22bSBorislav Petkov 111127f6d22bSBorislav Petkov static inline unsigned int x86_pmu_config_addr(int index) 111227f6d22bSBorislav Petkov { 111327f6d22bSBorislav Petkov return x86_pmu.eventsel + (x86_pmu.addr_offset ? 111427f6d22bSBorislav Petkov x86_pmu.addr_offset(index, true) : index); 111527f6d22bSBorislav Petkov } 111627f6d22bSBorislav Petkov 111727f6d22bSBorislav Petkov static inline unsigned int x86_pmu_event_addr(int index) 111827f6d22bSBorislav Petkov { 111927f6d22bSBorislav Petkov return x86_pmu.perfctr + (x86_pmu.addr_offset ? 112027f6d22bSBorislav Petkov x86_pmu.addr_offset(index, false) : index); 112127f6d22bSBorislav Petkov } 112227f6d22bSBorislav Petkov 112327f6d22bSBorislav Petkov static inline int x86_pmu_rdpmc_index(int index) 112427f6d22bSBorislav Petkov { 112527f6d22bSBorislav Petkov return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; 112627f6d22bSBorislav Petkov } 112727f6d22bSBorislav Petkov 1128fc4b8fcaSKan Liang bool check_hw_exists(struct pmu *pmu, int num_counters, 1129fc4b8fcaSKan Liang int num_counters_fixed); 1130fc4b8fcaSKan Liang 113127f6d22bSBorislav Petkov int x86_add_exclusive(unsigned int what); 113227f6d22bSBorislav Petkov 113327f6d22bSBorislav Petkov void x86_del_exclusive(unsigned int what); 113427f6d22bSBorislav Petkov 113527f6d22bSBorislav Petkov int x86_reserve_hardware(void); 113627f6d22bSBorislav Petkov 113727f6d22bSBorislav Petkov void x86_release_hardware(void); 113827f6d22bSBorislav Petkov 1139b00233b5SAndi Kleen int x86_pmu_max_precise(void); 1140b00233b5SAndi Kleen 114127f6d22bSBorislav Petkov void hw_perf_lbr_event_destroy(struct perf_event *event); 114227f6d22bSBorislav Petkov 114327f6d22bSBorislav Petkov int x86_setup_perfctr(struct perf_event *event); 114427f6d22bSBorislav Petkov 114527f6d22bSBorislav Petkov int x86_pmu_hw_config(struct perf_event *event); 114627f6d22bSBorislav Petkov 114727f6d22bSBorislav Petkov void x86_pmu_disable_all(void); 114827f6d22bSBorislav Petkov 1149ada54345SStephane Eranian static inline bool has_amd_brs(struct hw_perf_event *hwc) 1150ada54345SStephane Eranian { 1151ada54345SStephane Eranian return hwc->flags & PERF_X86_EVENT_AMD_BRS; 1152ada54345SStephane Eranian } 1153ada54345SStephane Eranian 115457388912SKim Phillips static inline bool is_counter_pair(struct hw_perf_event *hwc) 115557388912SKim Phillips { 115657388912SKim Phillips return hwc->flags & PERF_X86_EVENT_PAIR; 115757388912SKim Phillips } 115857388912SKim Phillips 115927f6d22bSBorislav Petkov static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, 116027f6d22bSBorislav Petkov u64 enable_mask) 116127f6d22bSBorislav Petkov { 116227f6d22bSBorislav Petkov u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); 116327f6d22bSBorislav Petkov 116427f6d22bSBorislav Petkov if (hwc->extra_reg.reg) 116527f6d22bSBorislav Petkov wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); 116657388912SKim Phillips 116757388912SKim Phillips /* 116857388912SKim Phillips * Add enabled Merge event on next counter 116957388912SKim Phillips * if large increment event being enabled on this counter 117057388912SKim Phillips */ 117157388912SKim Phillips if (is_counter_pair(hwc)) 117257388912SKim Phillips wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en); 117357388912SKim Phillips 117427f6d22bSBorislav Petkov wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); 117527f6d22bSBorislav Petkov } 117627f6d22bSBorislav Petkov 117727f6d22bSBorislav Petkov void x86_pmu_enable_all(int added); 117827f6d22bSBorislav Petkov 117927f6d22bSBorislav Petkov int perf_assign_events(struct event_constraint **constraints, int n, 118027f6d22bSBorislav Petkov int wmin, int wmax, int gpmax, int *assign); 118127f6d22bSBorislav Petkov int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); 118227f6d22bSBorislav Petkov 118327f6d22bSBorislav Petkov void x86_pmu_stop(struct perf_event *event, int flags); 118427f6d22bSBorislav Petkov 118527f6d22bSBorislav Petkov static inline void x86_pmu_disable_event(struct perf_event *event) 118627f6d22bSBorislav Petkov { 1187df51fe7eSLike Xu u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); 118827f6d22bSBorislav Petkov struct hw_perf_event *hwc = &event->hw; 118927f6d22bSBorislav Petkov 1190df51fe7eSLike Xu wrmsrl(hwc->config_base, hwc->config & ~disable_mask); 119157388912SKim Phillips 119257388912SKim Phillips if (is_counter_pair(hwc)) 119357388912SKim Phillips wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0); 119427f6d22bSBorislav Petkov } 119527f6d22bSBorislav Petkov 119627f6d22bSBorislav Petkov void x86_pmu_enable_event(struct perf_event *event); 119727f6d22bSBorislav Petkov 119827f6d22bSBorislav Petkov int x86_pmu_handle_irq(struct pt_regs *regs); 119927f6d22bSBorislav Petkov 1200e11c1a7eSKan Liang void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed, 1201e11c1a7eSKan Liang u64 intel_ctrl); 1202e11c1a7eSKan Liang 120327f6d22bSBorislav Petkov extern struct event_constraint emptyconstraint; 120427f6d22bSBorislav Petkov 120527f6d22bSBorislav Petkov extern struct event_constraint unconstrained; 120627f6d22bSBorislav Petkov 120727f6d22bSBorislav Petkov static inline bool kernel_ip(unsigned long ip) 120827f6d22bSBorislav Petkov { 120927f6d22bSBorislav Petkov #ifdef CONFIG_X86_32 121027f6d22bSBorislav Petkov return ip > PAGE_OFFSET; 121127f6d22bSBorislav Petkov #else 121227f6d22bSBorislav Petkov return (long)ip < 0; 121327f6d22bSBorislav Petkov #endif 121427f6d22bSBorislav Petkov } 121527f6d22bSBorislav Petkov 121627f6d22bSBorislav Petkov /* 121727f6d22bSBorislav Petkov * Not all PMUs provide the right context information to place the reported IP 121827f6d22bSBorislav Petkov * into full context. Specifically segment registers are typically not 121927f6d22bSBorislav Petkov * supplied. 122027f6d22bSBorislav Petkov * 122127f6d22bSBorislav Petkov * Assuming the address is a linear address (it is for IBS), we fake the CS and 122227f6d22bSBorislav Petkov * vm86 mode using the known zero-based code segment and 'fix up' the registers 122327f6d22bSBorislav Petkov * to reflect this. 122427f6d22bSBorislav Petkov * 122527f6d22bSBorislav Petkov * Intel PEBS/LBR appear to typically provide the effective address, nothing 122627f6d22bSBorislav Petkov * much we can do about that but pray and treat it like a linear address. 122727f6d22bSBorislav Petkov */ 122827f6d22bSBorislav Petkov static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) 122927f6d22bSBorislav Petkov { 123027f6d22bSBorislav Petkov regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; 123127f6d22bSBorislav Petkov if (regs->flags & X86_VM_MASK) 123227f6d22bSBorislav Petkov regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); 123327f6d22bSBorislav Petkov regs->ip = ip; 123427f6d22bSBorislav Petkov } 123527f6d22bSBorislav Petkov 12364462fbfeSSandipan Das /* 12374462fbfeSSandipan Das * x86control flow change classification 12384462fbfeSSandipan Das * x86control flow changes include branches, interrupts, traps, faults 12394462fbfeSSandipan Das */ 12404462fbfeSSandipan Das enum { 12414462fbfeSSandipan Das X86_BR_NONE = 0, /* unknown */ 12424462fbfeSSandipan Das 12434462fbfeSSandipan Das X86_BR_USER = 1 << 0, /* branch target is user */ 12444462fbfeSSandipan Das X86_BR_KERNEL = 1 << 1, /* branch target is kernel */ 12454462fbfeSSandipan Das 12464462fbfeSSandipan Das X86_BR_CALL = 1 << 2, /* call */ 12474462fbfeSSandipan Das X86_BR_RET = 1 << 3, /* return */ 12484462fbfeSSandipan Das X86_BR_SYSCALL = 1 << 4, /* syscall */ 12494462fbfeSSandipan Das X86_BR_SYSRET = 1 << 5, /* syscall return */ 12504462fbfeSSandipan Das X86_BR_INT = 1 << 6, /* sw interrupt */ 12514462fbfeSSandipan Das X86_BR_IRET = 1 << 7, /* return from interrupt */ 12524462fbfeSSandipan Das X86_BR_JCC = 1 << 8, /* conditional */ 12534462fbfeSSandipan Das X86_BR_JMP = 1 << 9, /* jump */ 12544462fbfeSSandipan Das X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */ 12554462fbfeSSandipan Das X86_BR_IND_CALL = 1 << 11,/* indirect calls */ 12564462fbfeSSandipan Das X86_BR_ABORT = 1 << 12,/* transaction abort */ 12574462fbfeSSandipan Das X86_BR_IN_TX = 1 << 13,/* in transaction */ 12584462fbfeSSandipan Das X86_BR_NO_TX = 1 << 14,/* not in transaction */ 12594462fbfeSSandipan Das X86_BR_ZERO_CALL = 1 << 15,/* zero length call */ 12604462fbfeSSandipan Das X86_BR_CALL_STACK = 1 << 16,/* call stack */ 12614462fbfeSSandipan Das X86_BR_IND_JMP = 1 << 17,/* indirect jump */ 12624462fbfeSSandipan Das 12634462fbfeSSandipan Das X86_BR_TYPE_SAVE = 1 << 18,/* indicate to save branch type */ 12644462fbfeSSandipan Das 12654462fbfeSSandipan Das }; 12664462fbfeSSandipan Das 12674462fbfeSSandipan Das #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL) 12684462fbfeSSandipan Das #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX) 12694462fbfeSSandipan Das 12704462fbfeSSandipan Das #define X86_BR_ANY \ 12714462fbfeSSandipan Das (X86_BR_CALL |\ 12724462fbfeSSandipan Das X86_BR_RET |\ 12734462fbfeSSandipan Das X86_BR_SYSCALL |\ 12744462fbfeSSandipan Das X86_BR_SYSRET |\ 12754462fbfeSSandipan Das X86_BR_INT |\ 12764462fbfeSSandipan Das X86_BR_IRET |\ 12774462fbfeSSandipan Das X86_BR_JCC |\ 12784462fbfeSSandipan Das X86_BR_JMP |\ 12794462fbfeSSandipan Das X86_BR_IRQ |\ 12804462fbfeSSandipan Das X86_BR_ABORT |\ 12814462fbfeSSandipan Das X86_BR_IND_CALL |\ 12824462fbfeSSandipan Das X86_BR_IND_JMP |\ 12834462fbfeSSandipan Das X86_BR_ZERO_CALL) 12844462fbfeSSandipan Das 12854462fbfeSSandipan Das #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY) 12864462fbfeSSandipan Das 12874462fbfeSSandipan Das #define X86_BR_ANY_CALL \ 12884462fbfeSSandipan Das (X86_BR_CALL |\ 12894462fbfeSSandipan Das X86_BR_IND_CALL |\ 12904462fbfeSSandipan Das X86_BR_ZERO_CALL |\ 12914462fbfeSSandipan Das X86_BR_SYSCALL |\ 12924462fbfeSSandipan Das X86_BR_IRQ |\ 12934462fbfeSSandipan Das X86_BR_INT) 12944462fbfeSSandipan Das 12954462fbfeSSandipan Das int common_branch_type(int type); 12964462fbfeSSandipan Das int branch_type(unsigned long from, unsigned long to, int abort); 1297df3e9612SSandipan Das int branch_type_fused(unsigned long from, unsigned long to, int abort, 1298df3e9612SSandipan Das int *offset); 12994462fbfeSSandipan Das 130027f6d22bSBorislav Petkov ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); 130127f6d22bSBorislav Petkov ssize_t intel_event_sysfs_show(char *page, u64 config); 130227f6d22bSBorislav Petkov 1303a49ac9f8SHuang Rui ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, 1304a49ac9f8SHuang Rui char *page); 1305fc07e9f9SAndi Kleen ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr, 1306fc07e9f9SAndi Kleen char *page); 1307a9c81ccdSKan Liang ssize_t events_hybrid_sysfs_show(struct device *dev, 1308a9c81ccdSKan Liang struct device_attribute *attr, 1309a9c81ccdSKan Liang char *page); 1310a49ac9f8SHuang Rui 1311fc4b8fcaSKan Liang static inline bool fixed_counter_disabled(int i, struct pmu *pmu) 131232451614SKan Liang { 1313fc4b8fcaSKan Liang u64 intel_ctrl = hybrid(pmu, intel_ctrl); 1314fc4b8fcaSKan Liang 1315fc4b8fcaSKan Liang return !(intel_ctrl >> (i + INTEL_PMC_IDX_FIXED)); 131632451614SKan Liang } 131732451614SKan Liang 131827f6d22bSBorislav Petkov #ifdef CONFIG_CPU_SUP_AMD 131927f6d22bSBorislav Petkov 132027f6d22bSBorislav Petkov int amd_pmu_init(void); 1321cc37e520SStephane Eranian 1322703fb765SSandipan Das int amd_pmu_lbr_init(void); 1323ca5b7c0dSSandipan Das void amd_pmu_lbr_reset(void); 1324ca5b7c0dSSandipan Das void amd_pmu_lbr_read(void); 1325ca5b7c0dSSandipan Das void amd_pmu_lbr_add(struct perf_event *event); 1326ca5b7c0dSSandipan Das void amd_pmu_lbr_del(struct perf_event *event); 1327bd275681SPeter Zijlstra void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); 1328ca5b7c0dSSandipan Das void amd_pmu_lbr_enable_all(void); 1329ca5b7c0dSSandipan Das void amd_pmu_lbr_disable_all(void); 1330ca5b7c0dSSandipan Das int amd_pmu_lbr_hw_config(struct perf_event *event); 1331703fb765SSandipan Das 1332*1eddf187SAndrii Nakryiko static __always_inline void __amd_pmu_lbr_disable(void) 1333*1eddf187SAndrii Nakryiko { 1334*1eddf187SAndrii Nakryiko u64 dbg_ctl, dbg_extn_cfg; 1335*1eddf187SAndrii Nakryiko 1336*1eddf187SAndrii Nakryiko rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg); 1337*1eddf187SAndrii Nakryiko wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN); 1338*1eddf187SAndrii Nakryiko 1339*1eddf187SAndrii Nakryiko if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) { 1340*1eddf187SAndrii Nakryiko rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl); 1341*1eddf187SAndrii Nakryiko wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); 1342*1eddf187SAndrii Nakryiko } 1343*1eddf187SAndrii Nakryiko } 1344*1eddf187SAndrii Nakryiko 1345cc37e520SStephane Eranian #ifdef CONFIG_PERF_EVENTS_AMD_BRS 1346b40d0156SSandipan Das 1347b40d0156SSandipan Das #define AMD_FAM19H_BRS_EVENT 0xc4 /* RETIRED_TAKEN_BRANCH_INSTRUCTIONS */ 1348b40d0156SSandipan Das 1349ada54345SStephane Eranian int amd_brs_init(void); 1350ada54345SStephane Eranian void amd_brs_disable(void); 1351ada54345SStephane Eranian void amd_brs_enable(void); 1352ada54345SStephane Eranian void amd_brs_enable_all(void); 1353ada54345SStephane Eranian void amd_brs_disable_all(void); 1354ada54345SStephane Eranian void amd_brs_drain(void); 1355d5616bacSStephane Eranian void amd_brs_lopwr_init(void); 1356b40d0156SSandipan Das int amd_brs_hw_config(struct perf_event *event); 1357ada54345SStephane Eranian void amd_brs_reset(void); 1358ada54345SStephane Eranian 1359ada54345SStephane Eranian static inline void amd_pmu_brs_add(struct perf_event *event) 1360ada54345SStephane Eranian { 1361ada54345SStephane Eranian struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1362ada54345SStephane Eranian 1363bd275681SPeter Zijlstra perf_sched_cb_inc(event->pmu); 1364ada54345SStephane Eranian cpuc->lbr_users++; 1365ada54345SStephane Eranian /* 1366ada54345SStephane Eranian * No need to reset BRS because it is reset 1367ada54345SStephane Eranian * on brs_enable() and it is saturating 1368ada54345SStephane Eranian */ 1369ada54345SStephane Eranian } 1370ada54345SStephane Eranian 1371ada54345SStephane Eranian static inline void amd_pmu_brs_del(struct perf_event *event) 1372ada54345SStephane Eranian { 1373ada54345SStephane Eranian struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1374ada54345SStephane Eranian 1375ada54345SStephane Eranian cpuc->lbr_users--; 1376ada54345SStephane Eranian WARN_ON_ONCE(cpuc->lbr_users < 0); 1377ada54345SStephane Eranian 1378bd275681SPeter Zijlstra perf_sched_cb_dec(event->pmu); 1379ada54345SStephane Eranian } 1380ada54345SStephane Eranian 1381bd275681SPeter Zijlstra void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); 1382cc37e520SStephane Eranian #else 1383cc37e520SStephane Eranian static inline int amd_brs_init(void) 1384cc37e520SStephane Eranian { 1385cc37e520SStephane Eranian return 0; 1386cc37e520SStephane Eranian } 1387cc37e520SStephane Eranian static inline void amd_brs_disable(void) {} 1388cc37e520SStephane Eranian static inline void amd_brs_enable(void) {} 1389cc37e520SStephane Eranian static inline void amd_brs_drain(void) {} 1390cc37e520SStephane Eranian static inline void amd_brs_lopwr_init(void) {} 1391cc37e520SStephane Eranian static inline void amd_brs_disable_all(void) {} 1392b40d0156SSandipan Das static inline int amd_brs_hw_config(struct perf_event *event) 1393cc37e520SStephane Eranian { 1394cc37e520SStephane Eranian return 0; 1395cc37e520SStephane Eranian } 1396cc37e520SStephane Eranian static inline void amd_brs_reset(void) {} 1397cc37e520SStephane Eranian 1398cc37e520SStephane Eranian static inline void amd_pmu_brs_add(struct perf_event *event) 1399cc37e520SStephane Eranian { 1400cc37e520SStephane Eranian } 1401cc37e520SStephane Eranian 1402cc37e520SStephane Eranian static inline void amd_pmu_brs_del(struct perf_event *event) 1403cc37e520SStephane Eranian { 1404cc37e520SStephane Eranian } 1405cc37e520SStephane Eranian 1406bd275681SPeter Zijlstra static inline void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) 1407cc37e520SStephane Eranian { 1408cc37e520SStephane Eranian } 1409cc37e520SStephane Eranian 1410cc37e520SStephane Eranian static inline void amd_brs_enable_all(void) 1411cc37e520SStephane Eranian { 1412cc37e520SStephane Eranian } 1413cc37e520SStephane Eranian 1414cc37e520SStephane Eranian #endif 1415ba2fe750SStephane Eranian 141627f6d22bSBorislav Petkov #else /* CONFIG_CPU_SUP_AMD */ 141727f6d22bSBorislav Petkov 141827f6d22bSBorislav Petkov static inline int amd_pmu_init(void) 141927f6d22bSBorislav Petkov { 142027f6d22bSBorislav Petkov return 0; 142127f6d22bSBorislav Petkov } 142227f6d22bSBorislav Petkov 1423ada54345SStephane Eranian static inline int amd_brs_init(void) 1424ada54345SStephane Eranian { 1425ada54345SStephane Eranian return -EOPNOTSUPP; 1426ada54345SStephane Eranian } 1427ada54345SStephane Eranian 1428ada54345SStephane Eranian static inline void amd_brs_drain(void) 1429ada54345SStephane Eranian { 1430ada54345SStephane Eranian } 1431ada54345SStephane Eranian 1432ada54345SStephane Eranian static inline void amd_brs_enable_all(void) 1433ada54345SStephane Eranian { 1434ada54345SStephane Eranian } 1435ada54345SStephane Eranian 1436ada54345SStephane Eranian static inline void amd_brs_disable_all(void) 1437ada54345SStephane Eranian { 1438ada54345SStephane Eranian } 143927f6d22bSBorislav Petkov #endif /* CONFIG_CPU_SUP_AMD */ 144027f6d22bSBorislav Petkov 144142880f72SAlexander Shishkin static inline int is_pebs_pt(struct perf_event *event) 144242880f72SAlexander Shishkin { 144342880f72SAlexander Shishkin return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT); 144442880f72SAlexander Shishkin } 144542880f72SAlexander Shishkin 144627f6d22bSBorislav Petkov #ifdef CONFIG_CPU_SUP_INTEL 144727f6d22bSBorislav Petkov 144881ec3f3cSJiri Olsa static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) 144927f6d22bSBorislav Petkov { 145067266c10SJiri Olsa struct hw_perf_event *hwc = &event->hw; 145167266c10SJiri Olsa unsigned int hw_event, bts_event; 145227f6d22bSBorislav Petkov 145367266c10SJiri Olsa if (event->attr.freq) 145427f6d22bSBorislav Petkov return false; 145567266c10SJiri Olsa 145667266c10SJiri Olsa hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; 145767266c10SJiri Olsa bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 145867266c10SJiri Olsa 145981ec3f3cSJiri Olsa return hw_event == bts_event && period == 1; 146081ec3f3cSJiri Olsa } 146181ec3f3cSJiri Olsa 146281ec3f3cSJiri Olsa static inline bool intel_pmu_has_bts(struct perf_event *event) 146381ec3f3cSJiri Olsa { 146481ec3f3cSJiri Olsa struct hw_perf_event *hwc = &event->hw; 146581ec3f3cSJiri Olsa 146681ec3f3cSJiri Olsa return intel_pmu_has_bts_period(event, hwc->sample_period); 146727f6d22bSBorislav Petkov } 146827f6d22bSBorislav Petkov 1469c22ac2a3SSong Liu static __always_inline void __intel_pmu_pebs_disable_all(void) 1470c22ac2a3SSong Liu { 1471c22ac2a3SSong Liu wrmsrl(MSR_IA32_PEBS_ENABLE, 0); 1472c22ac2a3SSong Liu } 1473c22ac2a3SSong Liu 1474c22ac2a3SSong Liu static __always_inline void __intel_pmu_arch_lbr_disable(void) 1475c22ac2a3SSong Liu { 1476c22ac2a3SSong Liu wrmsrl(MSR_ARCH_LBR_CTL, 0); 1477c22ac2a3SSong Liu } 1478c22ac2a3SSong Liu 1479c22ac2a3SSong Liu static __always_inline void __intel_pmu_lbr_disable(void) 1480c22ac2a3SSong Liu { 1481c22ac2a3SSong Liu u64 debugctl; 1482c22ac2a3SSong Liu 1483c22ac2a3SSong Liu rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1484c22ac2a3SSong Liu debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); 1485c22ac2a3SSong Liu wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1486c22ac2a3SSong Liu } 1487c22ac2a3SSong Liu 148827f6d22bSBorislav Petkov int intel_pmu_save_and_restart(struct perf_event *event); 148927f6d22bSBorislav Petkov 149027f6d22bSBorislav Petkov struct event_constraint * 149127f6d22bSBorislav Petkov x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 149227f6d22bSBorislav Petkov struct perf_event *event); 149327f6d22bSBorislav Petkov 1494d01b1f96SPeter Zijlstra (Intel) extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu); 1495d01b1f96SPeter Zijlstra (Intel) extern void intel_cpuc_finish(struct cpu_hw_events *cpuc); 149627f6d22bSBorislav Petkov 149727f6d22bSBorislav Petkov int intel_pmu_init(void); 149827f6d22bSBorislav Petkov 149927f6d22bSBorislav Petkov void init_debug_store_on_cpu(int cpu); 150027f6d22bSBorislav Petkov 150127f6d22bSBorislav Petkov void fini_debug_store_on_cpu(int cpu); 150227f6d22bSBorislav Petkov 150327f6d22bSBorislav Petkov void release_ds_buffers(void); 150427f6d22bSBorislav Petkov 150527f6d22bSBorislav Petkov void reserve_ds_buffers(void); 150627f6d22bSBorislav Petkov 1507c085fb87SKan Liang void release_lbr_buffers(void); 1508c085fb87SKan Liang 1509488e13a4SLike Xu void reserve_lbr_buffers(void); 1510488e13a4SLike Xu 151127f6d22bSBorislav Petkov extern struct event_constraint bts_constraint; 1512097e4311SLike Xu extern struct event_constraint vlbr_constraint; 151327f6d22bSBorislav Petkov 151427f6d22bSBorislav Petkov void intel_pmu_enable_bts(u64 config); 151527f6d22bSBorislav Petkov 151627f6d22bSBorislav Petkov void intel_pmu_disable_bts(void); 151727f6d22bSBorislav Petkov 151827f6d22bSBorislav Petkov int intel_pmu_drain_bts_buffer(void); 151927f6d22bSBorislav Petkov 152039a41278SKan Liang u64 adl_latency_data_small(struct perf_event *event, u64 status); 152139a41278SKan Liang 152238aaf921SKan Liang u64 mtl_latency_data_small(struct perf_event *event, u64 status); 152338aaf921SKan Liang 152427f6d22bSBorislav Petkov extern struct event_constraint intel_core2_pebs_event_constraints[]; 152527f6d22bSBorislav Petkov 152627f6d22bSBorislav Petkov extern struct event_constraint intel_atom_pebs_event_constraints[]; 152727f6d22bSBorislav Petkov 152827f6d22bSBorislav Petkov extern struct event_constraint intel_slm_pebs_event_constraints[]; 152927f6d22bSBorislav Petkov 15308b92c3a7SKan Liang extern struct event_constraint intel_glm_pebs_event_constraints[]; 15318b92c3a7SKan Liang 1532dd0b06b5SKan Liang extern struct event_constraint intel_glp_pebs_event_constraints[]; 1533dd0b06b5SKan Liang 1534f83d2f91SKan Liang extern struct event_constraint intel_grt_pebs_event_constraints[]; 1535f83d2f91SKan Liang 153627f6d22bSBorislav Petkov extern struct event_constraint intel_nehalem_pebs_event_constraints[]; 153727f6d22bSBorislav Petkov 153827f6d22bSBorislav Petkov extern struct event_constraint intel_westmere_pebs_event_constraints[]; 153927f6d22bSBorislav Petkov 154027f6d22bSBorislav Petkov extern struct event_constraint intel_snb_pebs_event_constraints[]; 154127f6d22bSBorislav Petkov 154227f6d22bSBorislav Petkov extern struct event_constraint intel_ivb_pebs_event_constraints[]; 154327f6d22bSBorislav Petkov 154427f6d22bSBorislav Petkov extern struct event_constraint intel_hsw_pebs_event_constraints[]; 154527f6d22bSBorislav Petkov 1546b3e62463SStephane Eranian extern struct event_constraint intel_bdw_pebs_event_constraints[]; 1547b3e62463SStephane Eranian 154827f6d22bSBorislav Petkov extern struct event_constraint intel_skl_pebs_event_constraints[]; 154927f6d22bSBorislav Petkov 155060176089SKan Liang extern struct event_constraint intel_icl_pebs_event_constraints[]; 155160176089SKan Liang 1552d4b5694cSKan Liang extern struct event_constraint intel_glc_pebs_event_constraints[]; 155361b985e3SKan Liang 155427f6d22bSBorislav Petkov struct event_constraint *intel_pebs_constraints(struct perf_event *event); 155527f6d22bSBorislav Petkov 155668f7082fSPeter Zijlstra void intel_pmu_pebs_add(struct perf_event *event); 155768f7082fSPeter Zijlstra 155868f7082fSPeter Zijlstra void intel_pmu_pebs_del(struct perf_event *event); 155968f7082fSPeter Zijlstra 156027f6d22bSBorislav Petkov void intel_pmu_pebs_enable(struct perf_event *event); 156127f6d22bSBorislav Petkov 156227f6d22bSBorislav Petkov void intel_pmu_pebs_disable(struct perf_event *event); 156327f6d22bSBorislav Petkov 156427f6d22bSBorislav Petkov void intel_pmu_pebs_enable_all(void); 156527f6d22bSBorislav Petkov 156627f6d22bSBorislav Petkov void intel_pmu_pebs_disable_all(void); 156727f6d22bSBorislav Petkov 1568bd275681SPeter Zijlstra void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); 156927f6d22bSBorislav Petkov 15705bee2cc6SKan Liang void intel_pmu_auto_reload_read(struct perf_event *event); 15715bee2cc6SKan Liang 15725624986dSKan Liang void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr); 1573c22497f5SKan Liang 157427f6d22bSBorislav Petkov void intel_ds_init(void); 157527f6d22bSBorislav Petkov 157633744916SKan Liang void intel_pmu_lbr_save_brstack(struct perf_sample_data *data, 157733744916SKan Liang struct cpu_hw_events *cpuc, 157833744916SKan Liang struct perf_event *event); 157933744916SKan Liang 1580bd275681SPeter Zijlstra void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc, 1581bd275681SPeter Zijlstra struct perf_event_pmu_context *next_epc); 1582421ca868SAlexey Budankov 1583bd275681SPeter Zijlstra void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); 158427f6d22bSBorislav Petkov 158519fc9dddSDavid Carrillo-Cisneros u64 lbr_from_signext_quirk_wr(u64 val); 158619fc9dddSDavid Carrillo-Cisneros 158727f6d22bSBorislav Petkov void intel_pmu_lbr_reset(void); 158827f6d22bSBorislav Petkov 15899f354a72SKan Liang void intel_pmu_lbr_reset_32(void); 15909f354a72SKan Liang 15919f354a72SKan Liang void intel_pmu_lbr_reset_64(void); 15929f354a72SKan Liang 159368f7082fSPeter Zijlstra void intel_pmu_lbr_add(struct perf_event *event); 159427f6d22bSBorislav Petkov 159568f7082fSPeter Zijlstra void intel_pmu_lbr_del(struct perf_event *event); 159627f6d22bSBorislav Petkov 159727f6d22bSBorislav Petkov void intel_pmu_lbr_enable_all(bool pmi); 159827f6d22bSBorislav Petkov 159927f6d22bSBorislav Petkov void intel_pmu_lbr_disable_all(void); 160027f6d22bSBorislav Petkov 160127f6d22bSBorislav Petkov void intel_pmu_lbr_read(void); 160227f6d22bSBorislav Petkov 1603c301b1d8SKan Liang void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc); 1604c301b1d8SKan Liang 1605c301b1d8SKan Liang void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc); 1606c301b1d8SKan Liang 1607799571bfSKan Liang void intel_pmu_lbr_save(void *ctx); 1608799571bfSKan Liang 1609799571bfSKan Liang void intel_pmu_lbr_restore(void *ctx); 1610799571bfSKan Liang 161127f6d22bSBorislav Petkov void intel_pmu_lbr_init_core(void); 161227f6d22bSBorislav Petkov 161327f6d22bSBorislav Petkov void intel_pmu_lbr_init_nhm(void); 161427f6d22bSBorislav Petkov 161527f6d22bSBorislav Petkov void intel_pmu_lbr_init_atom(void); 161627f6d22bSBorislav Petkov 1617f21d5adcSKan Liang void intel_pmu_lbr_init_slm(void); 1618f21d5adcSKan Liang 161927f6d22bSBorislav Petkov void intel_pmu_lbr_init_snb(void); 162027f6d22bSBorislav Petkov 162127f6d22bSBorislav Petkov void intel_pmu_lbr_init_hsw(void); 162227f6d22bSBorislav Petkov 162327f6d22bSBorislav Petkov void intel_pmu_lbr_init_skl(void); 162427f6d22bSBorislav Petkov 162527f6d22bSBorislav Petkov void intel_pmu_lbr_init_knl(void); 162627f6d22bSBorislav Petkov 16271ac7fd81SPeter Zijlstra (Intel) void intel_pmu_lbr_init(void); 16281ac7fd81SPeter Zijlstra (Intel) 162947125db2SKan Liang void intel_pmu_arch_lbr_init(void); 163047125db2SKan Liang 1631e17dc653SAndi Kleen void intel_pmu_pebs_data_source_nhm(void); 1632e17dc653SAndi Kleen 16336ae5fa61SAndi Kleen void intel_pmu_pebs_data_source_skl(bool pmem); 16346ae5fa61SAndi Kleen 1635ccf170e9SKan Liang void intel_pmu_pebs_data_source_adl(void); 1636ccf170e9SKan Liang 163724919fdeSKan Liang void intel_pmu_pebs_data_source_grt(void); 163824919fdeSKan Liang 163938aaf921SKan Liang void intel_pmu_pebs_data_source_mtl(void); 164038aaf921SKan Liang 1641a430021fSKan Liang void intel_pmu_pebs_data_source_cmt(void); 1642a430021fSKan Liang 164327f6d22bSBorislav Petkov int intel_pmu_setup_lbr_filter(struct perf_event *event); 164427f6d22bSBorislav Petkov 164527f6d22bSBorislav Petkov void intel_pt_interrupt(void); 164627f6d22bSBorislav Petkov 164727f6d22bSBorislav Petkov int intel_bts_interrupt(void); 164827f6d22bSBorislav Petkov 164927f6d22bSBorislav Petkov void intel_bts_enable_local(void); 165027f6d22bSBorislav Petkov 165127f6d22bSBorislav Petkov void intel_bts_disable_local(void); 165227f6d22bSBorislav Petkov 165327f6d22bSBorislav Petkov int p4_pmu_init(void); 165427f6d22bSBorislav Petkov 165527f6d22bSBorislav Petkov int p6_pmu_init(void); 165627f6d22bSBorislav Petkov 165727f6d22bSBorislav Petkov int knc_pmu_init(void); 165827f6d22bSBorislav Petkov 165927f6d22bSBorislav Petkov static inline int is_ht_workaround_enabled(void) 166027f6d22bSBorislav Petkov { 166127f6d22bSBorislav Petkov return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); 166227f6d22bSBorislav Petkov } 166327f6d22bSBorislav Petkov 166427f6d22bSBorislav Petkov #else /* CONFIG_CPU_SUP_INTEL */ 166527f6d22bSBorislav Petkov 166627f6d22bSBorislav Petkov static inline void reserve_ds_buffers(void) 166727f6d22bSBorislav Petkov { 166827f6d22bSBorislav Petkov } 166927f6d22bSBorislav Petkov 167027f6d22bSBorislav Petkov static inline void release_ds_buffers(void) 167127f6d22bSBorislav Petkov { 167227f6d22bSBorislav Petkov } 167327f6d22bSBorislav Petkov 1674c085fb87SKan Liang static inline void release_lbr_buffers(void) 1675c085fb87SKan Liang { 1676c085fb87SKan Liang } 1677c085fb87SKan Liang 1678488e13a4SLike Xu static inline void reserve_lbr_buffers(void) 1679488e13a4SLike Xu { 1680488e13a4SLike Xu } 1681488e13a4SLike Xu 168227f6d22bSBorislav Petkov static inline int intel_pmu_init(void) 168327f6d22bSBorislav Petkov { 168427f6d22bSBorislav Petkov return 0; 168527f6d22bSBorislav Petkov } 168627f6d22bSBorislav Petkov 1687f764c58bSPeter Zijlstra static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) 168827f6d22bSBorislav Petkov { 1689d01b1f96SPeter Zijlstra (Intel) return 0; 1690d01b1f96SPeter Zijlstra (Intel) } 1691d01b1f96SPeter Zijlstra (Intel) 1692f764c58bSPeter Zijlstra static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc) 1693d01b1f96SPeter Zijlstra (Intel) { 169427f6d22bSBorislav Petkov } 169527f6d22bSBorislav Petkov 169627f6d22bSBorislav Petkov static inline int is_ht_workaround_enabled(void) 169727f6d22bSBorislav Petkov { 169827f6d22bSBorislav Petkov return 0; 169927f6d22bSBorislav Petkov } 170027f6d22bSBorislav Petkov #endif /* CONFIG_CPU_SUP_INTEL */ 17013a4ac121SCodyYao-oc 17023a4ac121SCodyYao-oc #if ((defined CONFIG_CPU_SUP_CENTAUR) || (defined CONFIG_CPU_SUP_ZHAOXIN)) 17033a4ac121SCodyYao-oc int zhaoxin_pmu_init(void); 17043a4ac121SCodyYao-oc #else 17053a4ac121SCodyYao-oc static inline int zhaoxin_pmu_init(void) 17063a4ac121SCodyYao-oc { 17073a4ac121SCodyYao-oc return 0; 17083a4ac121SCodyYao-oc } 17093a4ac121SCodyYao-oc #endif /*CONFIG_CPU_SUP_CENTAUR or CONFIG_CPU_SUP_ZHAOXIN*/ 1710