127f6d22bSBorislav Petkov /* 227f6d22bSBorislav Petkov * Performance events x86 architecture header 327f6d22bSBorislav Petkov * 427f6d22bSBorislav Petkov * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 527f6d22bSBorislav Petkov * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 627f6d22bSBorislav Petkov * Copyright (C) 2009 Jaswinder Singh Rajput 727f6d22bSBorislav Petkov * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 827f6d22bSBorislav Petkov * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra 927f6d22bSBorislav Petkov * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 1027f6d22bSBorislav Petkov * Copyright (C) 2009 Google, Inc., Stephane Eranian 1127f6d22bSBorislav Petkov * 1227f6d22bSBorislav Petkov * For licencing details see kernel-base/COPYING 1327f6d22bSBorislav Petkov */ 1427f6d22bSBorislav Petkov 1527f6d22bSBorislav Petkov #include <linux/perf_event.h> 1627f6d22bSBorislav Petkov 17b50854ecSThomas Gleixner #include <asm/fpu/xstate.h> 1810043e02SThomas Gleixner #include <asm/intel_ds.h> 19d9977c43SKan Liang #include <asm/cpu.h> 2010043e02SThomas Gleixner 2127f6d22bSBorislav Petkov /* To enable MSR tracing please use the generic trace points. */ 2227f6d22bSBorislav Petkov 2327f6d22bSBorislav Petkov /* 2427f6d22bSBorislav Petkov * | NHM/WSM | SNB | 2527f6d22bSBorislav Petkov * register ------------------------------- 2627f6d22bSBorislav Petkov * | HT | no HT | HT | no HT | 2727f6d22bSBorislav Petkov *----------------------------------------- 2827f6d22bSBorislav Petkov * offcore | core | core | cpu | core | 2927f6d22bSBorislav Petkov * lbr_sel | core | core | cpu | core | 3027f6d22bSBorislav Petkov * ld_lat | cpu | core | cpu | core | 3127f6d22bSBorislav Petkov *----------------------------------------- 3227f6d22bSBorislav Petkov * 3327f6d22bSBorislav Petkov * Given that there is a small number of shared regs, 3427f6d22bSBorislav Petkov * we can pre-allocate their slot in the per-cpu 3527f6d22bSBorislav Petkov * per-core reg tables. 3627f6d22bSBorislav Petkov */ 3727f6d22bSBorislav Petkov enum extra_reg_type { 3827f6d22bSBorislav Petkov EXTRA_REG_NONE = -1, /* not used */ 3927f6d22bSBorislav Petkov 4027f6d22bSBorislav Petkov EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ 4127f6d22bSBorislav Petkov EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ 4227f6d22bSBorislav Petkov EXTRA_REG_LBR = 2, /* lbr_select */ 4327f6d22bSBorislav Petkov EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ 4427f6d22bSBorislav Petkov EXTRA_REG_FE = 4, /* fe_* */ 4527f6d22bSBorislav Petkov 4627f6d22bSBorislav Petkov EXTRA_REG_MAX /* number of entries needed */ 4727f6d22bSBorislav Petkov }; 4827f6d22bSBorislav Petkov 4927f6d22bSBorislav Petkov struct event_constraint { 5027f6d22bSBorislav Petkov union { 5127f6d22bSBorislav Petkov unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 5227f6d22bSBorislav Petkov u64 idxmsk64; 5327f6d22bSBorislav Petkov }; 5427f6d22bSBorislav Petkov u64 code; 5527f6d22bSBorislav Petkov u64 cmask; 5627f6d22bSBorislav Petkov int weight; 5727f6d22bSBorislav Petkov int overlap; 5827f6d22bSBorislav Petkov int flags; 5963b79f6eSPeter Zijlstra unsigned int size; 6027f6d22bSBorislav Petkov }; 611f6a1e2dSPeter Zijlstra 6263b79f6eSPeter Zijlstra static inline bool constraint_match(struct event_constraint *c, u64 ecode) 6363b79f6eSPeter Zijlstra { 6463b79f6eSPeter Zijlstra return ((ecode & c->cmask) - c->code) <= (u64)c->size; 6563b79f6eSPeter Zijlstra } 6663b79f6eSPeter Zijlstra 6788081cfbSAnshuman Khandual #define PERF_ARCH(name, val) \ 6888081cfbSAnshuman Khandual PERF_X86_EVENT_##name = val, 6988081cfbSAnshuman Khandual 7027f6d22bSBorislav Petkov /* 7127f6d22bSBorislav Petkov * struct hw_perf_event.flags flags 7227f6d22bSBorislav Petkov */ 7388081cfbSAnshuman Khandual enum { 7488081cfbSAnshuman Khandual #include "perf_event_flags.h" 7588081cfbSAnshuman Khandual }; 76369461ceSRob Herring 7788081cfbSAnshuman Khandual #undef PERF_ARCH 7888081cfbSAnshuman Khandual 7988081cfbSAnshuman Khandual #define PERF_ARCH(name, val) \ 8088081cfbSAnshuman Khandual static_assert((PERF_X86_EVENT_##name & PERF_EVENT_FLAG_ARCH) == \ 8188081cfbSAnshuman Khandual PERF_X86_EVENT_##name); 8288081cfbSAnshuman Khandual 8388081cfbSAnshuman Khandual #include "perf_event_flags.h" 8488081cfbSAnshuman Khandual 8588081cfbSAnshuman Khandual #undef PERF_ARCH 867b2c05a1SKan Liang 877b2c05a1SKan Liang static inline bool is_topdown_count(struct perf_event *event) 887b2c05a1SKan Liang { 897b2c05a1SKan Liang return event->hw.flags & PERF_X86_EVENT_TOPDOWN; 907b2c05a1SKan Liang } 917b2c05a1SKan Liang 927b2c05a1SKan Liang static inline bool is_metric_event(struct perf_event *event) 937b2c05a1SKan Liang { 947b2c05a1SKan Liang u64 config = event->attr.config; 957b2c05a1SKan Liang 967b2c05a1SKan Liang return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) && 977b2c05a1SKan Liang ((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING) && 987b2c05a1SKan Liang ((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX); 997b2c05a1SKan Liang } 1007b2c05a1SKan Liang 1017b2c05a1SKan Liang static inline bool is_slots_event(struct perf_event *event) 1027b2c05a1SKan Liang { 1037b2c05a1SKan Liang return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS; 1047b2c05a1SKan Liang } 1057b2c05a1SKan Liang 1067b2c05a1SKan Liang static inline bool is_topdown_event(struct perf_event *event) 1077b2c05a1SKan Liang { 1087b2c05a1SKan Liang return is_metric_event(event) || is_slots_event(event); 1097b2c05a1SKan Liang } 11027f6d22bSBorislav Petkov 11127f6d22bSBorislav Petkov struct amd_nb { 11227f6d22bSBorislav Petkov int nb_id; /* NorthBridge id */ 11327f6d22bSBorislav Petkov int refcnt; /* reference count */ 11427f6d22bSBorislav Petkov struct perf_event *owners[X86_PMC_IDX_MAX]; 11527f6d22bSBorislav Petkov struct event_constraint event_constraints[X86_PMC_IDX_MAX]; 11627f6d22bSBorislav Petkov }; 11727f6d22bSBorislav Petkov 118fd583ad1SKan Liang #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1) 11942880f72SAlexander Shishkin #define PEBS_PMI_AFTER_EACH_RECORD BIT_ULL(60) 12042880f72SAlexander Shishkin #define PEBS_OUTPUT_OFFSET 61 12142880f72SAlexander Shishkin #define PEBS_OUTPUT_MASK (3ull << PEBS_OUTPUT_OFFSET) 12242880f72SAlexander Shishkin #define PEBS_OUTPUT_PT (1ull << PEBS_OUTPUT_OFFSET) 12342880f72SAlexander Shishkin #define PEBS_VIA_PT_MASK (PEBS_OUTPUT_PT | PEBS_PMI_AFTER_EACH_RECORD) 12427f6d22bSBorislav Petkov 12527f6d22bSBorislav Petkov /* 12627f6d22bSBorislav Petkov * Flags PEBS can handle without an PMI. 12727f6d22bSBorislav Petkov * 12827f6d22bSBorislav Petkov * TID can only be handled by flushing at context switch. 1292fe1bc1fSAndi Kleen * REGS_USER can be handled for events limited to ring 3. 13027f6d22bSBorislav Petkov * 13127f6d22bSBorislav Petkov */ 132174afc3eSKan Liang #define LARGE_PEBS_FLAGS \ 13327f6d22bSBorislav Petkov (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \ 13427f6d22bSBorislav Petkov PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ 13527f6d22bSBorislav Petkov PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ 1362fe1bc1fSAndi Kleen PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \ 13711974914SJiri Olsa PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \ 138e60b7cb0SLike Xu PERF_SAMPLE_PERIOD | PERF_SAMPLE_CODE_PAGE_SIZE | \ 139e60b7cb0SLike Xu PERF_SAMPLE_WEIGHT_TYPE) 14027f6d22bSBorislav Petkov 1419d5dcc93SKan Liang #define PEBS_GP_REGS \ 1429d5dcc93SKan Liang ((1ULL << PERF_REG_X86_AX) | \ 1439d5dcc93SKan Liang (1ULL << PERF_REG_X86_BX) | \ 1449d5dcc93SKan Liang (1ULL << PERF_REG_X86_CX) | \ 1459d5dcc93SKan Liang (1ULL << PERF_REG_X86_DX) | \ 1469d5dcc93SKan Liang (1ULL << PERF_REG_X86_DI) | \ 1479d5dcc93SKan Liang (1ULL << PERF_REG_X86_SI) | \ 1489d5dcc93SKan Liang (1ULL << PERF_REG_X86_SP) | \ 1499d5dcc93SKan Liang (1ULL << PERF_REG_X86_BP) | \ 1509d5dcc93SKan Liang (1ULL << PERF_REG_X86_IP) | \ 1519d5dcc93SKan Liang (1ULL << PERF_REG_X86_FLAGS) | \ 1529d5dcc93SKan Liang (1ULL << PERF_REG_X86_R8) | \ 1539d5dcc93SKan Liang (1ULL << PERF_REG_X86_R9) | \ 1549d5dcc93SKan Liang (1ULL << PERF_REG_X86_R10) | \ 1559d5dcc93SKan Liang (1ULL << PERF_REG_X86_R11) | \ 1569d5dcc93SKan Liang (1ULL << PERF_REG_X86_R12) | \ 1579d5dcc93SKan Liang (1ULL << PERF_REG_X86_R13) | \ 1589d5dcc93SKan Liang (1ULL << PERF_REG_X86_R14) | \ 1599d5dcc93SKan Liang (1ULL << PERF_REG_X86_R15)) 1602fe1bc1fSAndi Kleen 16127f6d22bSBorislav Petkov /* 16227f6d22bSBorislav Petkov * Per register state. 16327f6d22bSBorislav Petkov */ 16427f6d22bSBorislav Petkov struct er_account { 16527f6d22bSBorislav Petkov raw_spinlock_t lock; /* per-core: protect structure */ 16627f6d22bSBorislav Petkov u64 config; /* extra MSR config */ 16727f6d22bSBorislav Petkov u64 reg; /* extra MSR number */ 16827f6d22bSBorislav Petkov atomic_t ref; /* reference count */ 16927f6d22bSBorislav Petkov }; 17027f6d22bSBorislav Petkov 17127f6d22bSBorislav Petkov /* 17227f6d22bSBorislav Petkov * Per core/cpu state 17327f6d22bSBorislav Petkov * 17427f6d22bSBorislav Petkov * Used to coordinate shared registers between HT threads or 17527f6d22bSBorislav Petkov * among events on a single PMU. 17627f6d22bSBorislav Petkov */ 17727f6d22bSBorislav Petkov struct intel_shared_regs { 17827f6d22bSBorislav Petkov struct er_account regs[EXTRA_REG_MAX]; 17927f6d22bSBorislav Petkov int refcnt; /* per-core: #HT threads */ 18027f6d22bSBorislav Petkov unsigned core_id; /* per-core: core id */ 18127f6d22bSBorislav Petkov }; 18227f6d22bSBorislav Petkov 18327f6d22bSBorislav Petkov enum intel_excl_state_type { 18427f6d22bSBorislav Petkov INTEL_EXCL_UNUSED = 0, /* counter is unused */ 18527f6d22bSBorislav Petkov INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */ 18627f6d22bSBorislav Petkov INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */ 18727f6d22bSBorislav Petkov }; 18827f6d22bSBorislav Petkov 18927f6d22bSBorislav Petkov struct intel_excl_states { 19027f6d22bSBorislav Petkov enum intel_excl_state_type state[X86_PMC_IDX_MAX]; 19127f6d22bSBorislav Petkov bool sched_started; /* true if scheduling has started */ 19227f6d22bSBorislav Petkov }; 19327f6d22bSBorislav Petkov 19427f6d22bSBorislav Petkov struct intel_excl_cntrs { 19527f6d22bSBorislav Petkov raw_spinlock_t lock; 19627f6d22bSBorislav Petkov 19727f6d22bSBorislav Petkov struct intel_excl_states states[2]; 19827f6d22bSBorislav Petkov 19927f6d22bSBorislav Petkov union { 20027f6d22bSBorislav Petkov u16 has_exclusive[2]; 20127f6d22bSBorislav Petkov u32 exclusive_present; 20227f6d22bSBorislav Petkov }; 20327f6d22bSBorislav Petkov 20427f6d22bSBorislav Petkov int refcnt; /* per-core: #HT threads */ 20527f6d22bSBorislav Petkov unsigned core_id; /* per-core: core id */ 20627f6d22bSBorislav Petkov }; 20727f6d22bSBorislav Petkov 2088b077e4aSKan Liang struct x86_perf_task_context; 20927f6d22bSBorislav Petkov #define MAX_LBR_ENTRIES 32 21027f6d22bSBorislav Petkov 21127f6d22bSBorislav Petkov enum { 2129f354a72SKan Liang LBR_FORMAT_32 = 0x00, 2139f354a72SKan Liang LBR_FORMAT_LIP = 0x01, 2149f354a72SKan Liang LBR_FORMAT_EIP = 0x02, 2159f354a72SKan Liang LBR_FORMAT_EIP_FLAGS = 0x03, 2169f354a72SKan Liang LBR_FORMAT_EIP_FLAGS2 = 0x04, 2179f354a72SKan Liang LBR_FORMAT_INFO = 0x05, 2189f354a72SKan Liang LBR_FORMAT_TIME = 0x06, 2191ac7fd81SPeter Zijlstra (Intel) LBR_FORMAT_INFO2 = 0x07, 2201ac7fd81SPeter Zijlstra (Intel) LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_INFO2, 2219f354a72SKan Liang }; 2229f354a72SKan Liang 2239f354a72SKan Liang enum { 22427f6d22bSBorislav Petkov X86_PERF_KFREE_SHARED = 0, 22527f6d22bSBorislav Petkov X86_PERF_KFREE_EXCL = 1, 22627f6d22bSBorislav Petkov X86_PERF_KFREE_MAX 22727f6d22bSBorislav Petkov }; 22827f6d22bSBorislav Petkov 22927f6d22bSBorislav Petkov struct cpu_hw_events { 23027f6d22bSBorislav Petkov /* 23127f6d22bSBorislav Petkov * Generic x86 PMC bits 23227f6d22bSBorislav Petkov */ 23327f6d22bSBorislav Petkov struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ 23427f6d22bSBorislav Petkov unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 2355471eea5SKan Liang unsigned long dirty[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 23627f6d22bSBorislav Petkov int enabled; 23727f6d22bSBorislav Petkov 23827f6d22bSBorislav Petkov int n_events; /* the # of events in the below arrays */ 23927f6d22bSBorislav Petkov int n_added; /* the # last events in the below arrays; 24027f6d22bSBorislav Petkov they've never been enabled yet */ 24127f6d22bSBorislav Petkov int n_txn; /* the # last events in the below arrays; 24227f6d22bSBorislav Petkov added in the current transaction */ 243871a93b0SPeter Zijlstra int n_txn_pair; 2443dbde695SPeter Zijlstra int n_txn_metric; 24527f6d22bSBorislav Petkov int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ 24627f6d22bSBorislav Petkov u64 tags[X86_PMC_IDX_MAX]; 24727f6d22bSBorislav Petkov 24827f6d22bSBorislav Petkov struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ 24927f6d22bSBorislav Petkov struct event_constraint *event_constraint[X86_PMC_IDX_MAX]; 25027f6d22bSBorislav Petkov 25127f6d22bSBorislav Petkov int n_excl; /* the number of exclusive events */ 25227f6d22bSBorislav Petkov 25327f6d22bSBorislav Petkov unsigned int txn_flags; 25427f6d22bSBorislav Petkov int is_fake; 25527f6d22bSBorislav Petkov 25627f6d22bSBorislav Petkov /* 25727f6d22bSBorislav Petkov * Intel DebugStore bits 25827f6d22bSBorislav Petkov */ 25927f6d22bSBorislav Petkov struct debug_store *ds; 260c1961a46SHugh Dickins void *ds_pebs_vaddr; 261c1961a46SHugh Dickins void *ds_bts_vaddr; 26227f6d22bSBorislav Petkov u64 pebs_enabled; 26309e61b4fSPeter Zijlstra int n_pebs; 26409e61b4fSPeter Zijlstra int n_large_pebs; 26542880f72SAlexander Shishkin int n_pebs_via_pt; 26642880f72SAlexander Shishkin int pebs_output; 26727f6d22bSBorislav Petkov 268c22497f5SKan Liang /* Current super set of events hardware configuration */ 269c22497f5SKan Liang u64 pebs_data_cfg; 270c22497f5SKan Liang u64 active_pebs_data_cfg; 271c22497f5SKan Liang int pebs_record_size; 272c22497f5SKan Liang 27327f6d22bSBorislav Petkov /* 27427f6d22bSBorislav Petkov * Intel LBR bits 27527f6d22bSBorislav Petkov */ 27627f6d22bSBorislav Petkov int lbr_users; 277d3617b98SAndi Kleen int lbr_pebs_users; 27827f6d22bSBorislav Petkov struct perf_branch_stack lbr_stack; 27927f6d22bSBorislav Petkov struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; 28049d8184fSKan Liang union { 28127f6d22bSBorislav Petkov struct er_account *lbr_sel; 28249d8184fSKan Liang struct er_account *lbr_ctl; 28349d8184fSKan Liang }; 28427f6d22bSBorislav Petkov u64 br_sel; 285f42be865SKan Liang void *last_task_ctx; 2868b077e4aSKan Liang int last_log_id; 287e1ad1ac2SLike Xu int lbr_select; 288c085fb87SKan Liang void *lbr_xsave; 28927f6d22bSBorislav Petkov 29027f6d22bSBorislav Petkov /* 29127f6d22bSBorislav Petkov * Intel host/guest exclude bits 29227f6d22bSBorislav Petkov */ 29327f6d22bSBorislav Petkov u64 intel_ctrl_guest_mask; 29427f6d22bSBorislav Petkov u64 intel_ctrl_host_mask; 29527f6d22bSBorislav Petkov struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; 29627f6d22bSBorislav Petkov 29727f6d22bSBorislav Petkov /* 29827f6d22bSBorislav Petkov * Intel checkpoint mask 29927f6d22bSBorislav Petkov */ 30027f6d22bSBorislav Petkov u64 intel_cp_status; 30127f6d22bSBorislav Petkov 30227f6d22bSBorislav Petkov /* 30327f6d22bSBorislav Petkov * manage shared (per-core, per-cpu) registers 30427f6d22bSBorislav Petkov * used on Intel NHM/WSM/SNB 30527f6d22bSBorislav Petkov */ 30627f6d22bSBorislav Petkov struct intel_shared_regs *shared_regs; 30727f6d22bSBorislav Petkov /* 30827f6d22bSBorislav Petkov * manage exclusive counter access between hyperthread 30927f6d22bSBorislav Petkov */ 31027f6d22bSBorislav Petkov struct event_constraint *constraint_list; /* in enable order */ 31127f6d22bSBorislav Petkov struct intel_excl_cntrs *excl_cntrs; 31227f6d22bSBorislav Petkov int excl_thread_id; /* 0 or 1 */ 31327f6d22bSBorislav Petkov 31427f6d22bSBorislav Petkov /* 315400816f6SPeter Zijlstra (Intel) * SKL TSX_FORCE_ABORT shadow 316400816f6SPeter Zijlstra (Intel) */ 317400816f6SPeter Zijlstra (Intel) u64 tfa_shadow; 318400816f6SPeter Zijlstra (Intel) 319400816f6SPeter Zijlstra (Intel) /* 3207b2c05a1SKan Liang * Perf Metrics 3217b2c05a1SKan Liang */ 3227b2c05a1SKan Liang /* number of accepted metrics events */ 3237b2c05a1SKan Liang int n_metric; 3247b2c05a1SKan Liang 3257b2c05a1SKan Liang /* 32627f6d22bSBorislav Petkov * AMD specific bits 32727f6d22bSBorislav Petkov */ 32827f6d22bSBorislav Petkov struct amd_nb *amd_nb; 329ada54345SStephane Eranian int brs_active; /* BRS is enabled */ 330ada54345SStephane Eranian 33127f6d22bSBorislav Petkov /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ 33227f6d22bSBorislav Petkov u64 perf_ctr_virt_mask; 33357388912SKim Phillips int n_pair; /* Large increment events */ 33427f6d22bSBorislav Petkov 33527f6d22bSBorislav Petkov void *kfree_on_online[X86_PERF_KFREE_MAX]; 33661e76d53SKan Liang 33761e76d53SKan Liang struct pmu *pmu; 33827f6d22bSBorislav Petkov }; 33927f6d22bSBorislav Petkov 34063b79f6eSPeter Zijlstra #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \ 34127f6d22bSBorislav Petkov { .idxmsk64 = (n) }, \ 34227f6d22bSBorislav Petkov .code = (c), \ 34363b79f6eSPeter Zijlstra .size = (e) - (c), \ 34427f6d22bSBorislav Petkov .cmask = (m), \ 34527f6d22bSBorislav Petkov .weight = (w), \ 34627f6d22bSBorislav Petkov .overlap = (o), \ 34727f6d22bSBorislav Petkov .flags = f, \ 34827f6d22bSBorislav Petkov } 34927f6d22bSBorislav Petkov 35063b79f6eSPeter Zijlstra #define __EVENT_CONSTRAINT(c, n, m, w, o, f) \ 35163b79f6eSPeter Zijlstra __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f) 35263b79f6eSPeter Zijlstra 35327f6d22bSBorislav Petkov #define EVENT_CONSTRAINT(c, n, m) \ 35427f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) 35527f6d22bSBorislav Petkov 35663b79f6eSPeter Zijlstra /* 35763b79f6eSPeter Zijlstra * The constraint_match() function only works for 'simple' event codes 35863b79f6eSPeter Zijlstra * and not for extended (AMD64_EVENTSEL_EVENT) events codes. 35963b79f6eSPeter Zijlstra */ 36063b79f6eSPeter Zijlstra #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \ 36163b79f6eSPeter Zijlstra __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0) 36263b79f6eSPeter Zijlstra 36327f6d22bSBorislav Petkov #define INTEL_EXCLEVT_CONSTRAINT(c, n) \ 36427f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\ 36527f6d22bSBorislav Petkov 0, PERF_X86_EVENT_EXCL) 36627f6d22bSBorislav Petkov 36727f6d22bSBorislav Petkov /* 36827f6d22bSBorislav Petkov * The overlap flag marks event constraints with overlapping counter 36927f6d22bSBorislav Petkov * masks. This is the case if the counter mask of such an event is not 37027f6d22bSBorislav Petkov * a subset of any other counter mask of a constraint with an equal or 37127f6d22bSBorislav Petkov * higher weight, e.g.: 37227f6d22bSBorislav Petkov * 37327f6d22bSBorislav Petkov * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); 37427f6d22bSBorislav Petkov * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); 37527f6d22bSBorislav Petkov * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); 37627f6d22bSBorislav Petkov * 37727f6d22bSBorislav Petkov * The event scheduler may not select the correct counter in the first 37827f6d22bSBorislav Petkov * cycle because it needs to know which subsequent events will be 37927f6d22bSBorislav Petkov * scheduled. It may fail to schedule the events then. So we set the 38027f6d22bSBorislav Petkov * overlap flag for such constraints to give the scheduler a hint which 38127f6d22bSBorislav Petkov * events to select for counter rescheduling. 38227f6d22bSBorislav Petkov * 38327f6d22bSBorislav Petkov * Care must be taken as the rescheduling algorithm is O(n!) which 38400f52685SIngo Molnar * will increase scheduling cycles for an over-committed system 38527f6d22bSBorislav Petkov * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros 38627f6d22bSBorislav Petkov * and its counter masks must be kept at a minimum. 38727f6d22bSBorislav Petkov */ 38827f6d22bSBorislav Petkov #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ 38927f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0) 39027f6d22bSBorislav Petkov 39127f6d22bSBorislav Petkov /* 39227f6d22bSBorislav Petkov * Constraint on the Event code. 39327f6d22bSBorislav Petkov */ 39427f6d22bSBorislav Petkov #define INTEL_EVENT_CONSTRAINT(c, n) \ 39527f6d22bSBorislav Petkov EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) 39627f6d22bSBorislav Petkov 39727f6d22bSBorislav Petkov /* 39863b79f6eSPeter Zijlstra * Constraint on a range of Event codes 39963b79f6eSPeter Zijlstra */ 40063b79f6eSPeter Zijlstra #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \ 40163b79f6eSPeter Zijlstra EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT) 40263b79f6eSPeter Zijlstra 40363b79f6eSPeter Zijlstra /* 40427f6d22bSBorislav Petkov * Constraint on the Event code + UMask + fixed-mask 40527f6d22bSBorislav Petkov * 40627f6d22bSBorislav Petkov * filter mask to validate fixed counter events. 40727f6d22bSBorislav Petkov * the following filters disqualify for fixed counters: 40827f6d22bSBorislav Petkov * - inv 40927f6d22bSBorislav Petkov * - edge 41027f6d22bSBorislav Petkov * - cnt-mask 41127f6d22bSBorislav Petkov * - in_tx 41227f6d22bSBorislav Petkov * - in_tx_checkpointed 41327f6d22bSBorislav Petkov * The other filters are supported by fixed counters. 41427f6d22bSBorislav Petkov * The any-thread option is supported starting with v3. 41527f6d22bSBorislav Petkov */ 41627f6d22bSBorislav Petkov #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED) 41727f6d22bSBorislav Petkov #define FIXED_EVENT_CONSTRAINT(c, n) \ 41827f6d22bSBorislav Petkov EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) 41927f6d22bSBorislav Petkov 42027f6d22bSBorislav Petkov /* 42159a854e2SKan Liang * The special metric counters do not actually exist. They are calculated from 42259a854e2SKan Liang * the combination of the FxCtr3 + MSR_PERF_METRICS. 42359a854e2SKan Liang * 42459a854e2SKan Liang * The special metric counters are mapped to a dummy offset for the scheduler. 42559a854e2SKan Liang * The sharing between multiple users of the same metric without multiplexing 42659a854e2SKan Liang * is not allowed, even though the hardware supports that in principle. 42759a854e2SKan Liang */ 42859a854e2SKan Liang 42959a854e2SKan Liang #define METRIC_EVENT_CONSTRAINT(c, n) \ 43059a854e2SKan Liang EVENT_CONSTRAINT(c, (1ULL << (INTEL_PMC_IDX_METRIC_BASE + n)), \ 43159a854e2SKan Liang INTEL_ARCH_EVENT_MASK) 43259a854e2SKan Liang 43359a854e2SKan Liang /* 43427f6d22bSBorislav Petkov * Constraint on the Event code + UMask 43527f6d22bSBorislav Petkov */ 43627f6d22bSBorislav Petkov #define INTEL_UEVENT_CONSTRAINT(c, n) \ 43727f6d22bSBorislav Petkov EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) 43827f6d22bSBorislav Petkov 43927f6d22bSBorislav Petkov /* Constraint on specific umask bit only + event */ 44027f6d22bSBorislav Petkov #define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \ 44127f6d22bSBorislav Petkov EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c)) 44227f6d22bSBorislav Petkov 44327f6d22bSBorislav Petkov /* Like UEVENT_CONSTRAINT, but match flags too */ 44427f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \ 44527f6d22bSBorislav Petkov EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) 44627f6d22bSBorislav Petkov 44727f6d22bSBorislav Petkov #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \ 44827f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ 44927f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_EXCL) 45027f6d22bSBorislav Petkov 45127f6d22bSBorislav Petkov #define INTEL_PLD_CONSTRAINT(c, n) \ 45227f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 45327f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) 45427f6d22bSBorislav Petkov 45561b985e3SKan Liang #define INTEL_PSD_CONSTRAINT(c, n) \ 45661b985e3SKan Liang __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 45761b985e3SKan Liang HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_STLAT) 45861b985e3SKan Liang 45927f6d22bSBorislav Petkov #define INTEL_PST_CONSTRAINT(c, n) \ 46027f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 46127f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) 46227f6d22bSBorislav Petkov 46339a41278SKan Liang #define INTEL_HYBRID_LAT_CONSTRAINT(c, n) \ 46439a41278SKan Liang __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 46539a41278SKan Liang HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LAT_HYBRID) 46639a41278SKan Liang 46727f6d22bSBorislav Petkov /* Event constraint, but match on all event flags too. */ 46827f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ 4696b89d4c1SStephane Eranian EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) 47027f6d22bSBorislav Petkov 47163b79f6eSPeter Zijlstra #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \ 4726b89d4c1SStephane Eranian EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) 47363b79f6eSPeter Zijlstra 47427f6d22bSBorislav Petkov /* Check only flags, but allow all event/umask */ 47527f6d22bSBorislav Petkov #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ 47627f6d22bSBorislav Petkov EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) 47727f6d22bSBorislav Petkov 47827f6d22bSBorislav Petkov /* Check flags and event code, and set the HSW store flag */ 47927f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \ 48027f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 48127f6d22bSBorislav Petkov ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ 48227f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) 48327f6d22bSBorislav Petkov 48427f6d22bSBorislav Petkov /* Check flags and event code, and set the HSW load flag */ 48527f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \ 48627f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 48727f6d22bSBorislav Petkov ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ 48827f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) 48927f6d22bSBorislav Petkov 49063b79f6eSPeter Zijlstra #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \ 49163b79f6eSPeter Zijlstra __EVENT_CONSTRAINT_RANGE(code, end, n, \ 49263b79f6eSPeter Zijlstra ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ 49363b79f6eSPeter Zijlstra HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) 49463b79f6eSPeter Zijlstra 49527f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \ 49627f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 49727f6d22bSBorislav Petkov ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ 49827f6d22bSBorislav Petkov HWEIGHT(n), 0, \ 49927f6d22bSBorislav Petkov PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) 50027f6d22bSBorislav Petkov 50127f6d22bSBorislav Petkov /* Check flags and event code/umask, and set the HSW store flag */ 50227f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \ 50327f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 50427f6d22bSBorislav Petkov INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 50527f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) 50627f6d22bSBorislav Petkov 50727f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \ 50827f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 50927f6d22bSBorislav Petkov INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 51027f6d22bSBorislav Petkov HWEIGHT(n), 0, \ 51127f6d22bSBorislav Petkov PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL) 51227f6d22bSBorislav Petkov 51327f6d22bSBorislav Petkov /* Check flags and event code/umask, and set the HSW load flag */ 51427f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \ 51527f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 51627f6d22bSBorislav Petkov INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 51727f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) 51827f6d22bSBorislav Petkov 51927f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \ 52027f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 52127f6d22bSBorislav Petkov INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 52227f6d22bSBorislav Petkov HWEIGHT(n), 0, \ 52327f6d22bSBorislav Petkov PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) 52427f6d22bSBorislav Petkov 52527f6d22bSBorislav Petkov /* Check flags and event code/umask, and set the HSW N/A flag */ 52627f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ 52727f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 52827f6d22bSBorislav Petkov INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 52927f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) 53027f6d22bSBorislav Petkov 53127f6d22bSBorislav Petkov 53227f6d22bSBorislav Petkov /* 53327f6d22bSBorislav Petkov * We define the end marker as having a weight of -1 53427f6d22bSBorislav Petkov * to enable blacklisting of events using a counter bitmask 53527f6d22bSBorislav Petkov * of zero and thus a weight of zero. 53627f6d22bSBorislav Petkov * The end marker has a weight that cannot possibly be 53727f6d22bSBorislav Petkov * obtained from counting the bits in the bitmask. 53827f6d22bSBorislav Petkov */ 53927f6d22bSBorislav Petkov #define EVENT_CONSTRAINT_END { .weight = -1 } 54027f6d22bSBorislav Petkov 54127f6d22bSBorislav Petkov /* 54227f6d22bSBorislav Petkov * Check for end marker with weight == -1 54327f6d22bSBorislav Petkov */ 54427f6d22bSBorislav Petkov #define for_each_event_constraint(e, c) \ 54527f6d22bSBorislav Petkov for ((e) = (c); (e)->weight != -1; (e)++) 54627f6d22bSBorislav Petkov 54727f6d22bSBorislav Petkov /* 54827f6d22bSBorislav Petkov * Extra registers for specific events. 54927f6d22bSBorislav Petkov * 55027f6d22bSBorislav Petkov * Some events need large masks and require external MSRs. 55127f6d22bSBorislav Petkov * Those extra MSRs end up being shared for all events on 55227f6d22bSBorislav Petkov * a PMU and sometimes between PMU of sibling HT threads. 55327f6d22bSBorislav Petkov * In either case, the kernel needs to handle conflicting 55427f6d22bSBorislav Petkov * accesses to those extra, shared, regs. The data structure 55527f6d22bSBorislav Petkov * to manage those registers is stored in cpu_hw_event. 55627f6d22bSBorislav Petkov */ 55727f6d22bSBorislav Petkov struct extra_reg { 55827f6d22bSBorislav Petkov unsigned int event; 55927f6d22bSBorislav Petkov unsigned int msr; 56027f6d22bSBorislav Petkov u64 config_mask; 56127f6d22bSBorislav Petkov u64 valid_mask; 56227f6d22bSBorislav Petkov int idx; /* per_xxx->regs[] reg index */ 56327f6d22bSBorislav Petkov bool extra_msr_access; 56427f6d22bSBorislav Petkov }; 56527f6d22bSBorislav Petkov 56627f6d22bSBorislav Petkov #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ 56727f6d22bSBorislav Petkov .event = (e), \ 56827f6d22bSBorislav Petkov .msr = (ms), \ 56927f6d22bSBorislav Petkov .config_mask = (m), \ 57027f6d22bSBorislav Petkov .valid_mask = (vm), \ 57127f6d22bSBorislav Petkov .idx = EXTRA_REG_##i, \ 57227f6d22bSBorislav Petkov .extra_msr_access = true, \ 57327f6d22bSBorislav Petkov } 57427f6d22bSBorislav Petkov 57527f6d22bSBorislav Petkov #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ 57627f6d22bSBorislav Petkov EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) 57727f6d22bSBorislav Petkov 57827f6d22bSBorislav Petkov #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \ 57927f6d22bSBorislav Petkov EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \ 58027f6d22bSBorislav Petkov ARCH_PERFMON_EVENTSEL_UMASK, vm, idx) 58127f6d22bSBorislav Petkov 58227f6d22bSBorislav Petkov #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \ 58327f6d22bSBorislav Petkov INTEL_UEVENT_EXTRA_REG(c, \ 58427f6d22bSBorislav Petkov MSR_PEBS_LD_LAT_THRESHOLD, \ 58527f6d22bSBorislav Petkov 0xffff, \ 58627f6d22bSBorislav Petkov LDLAT) 58727f6d22bSBorislav Petkov 58827f6d22bSBorislav Petkov #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) 58927f6d22bSBorislav Petkov 59027f6d22bSBorislav Petkov union perf_capabilities { 59127f6d22bSBorislav Petkov struct { 59227f6d22bSBorislav Petkov u64 lbr_format:6; 59327f6d22bSBorislav Petkov u64 pebs_trap:1; 59427f6d22bSBorislav Petkov u64 pebs_arch_reg:1; 59527f6d22bSBorislav Petkov u64 pebs_format:4; 59627f6d22bSBorislav Petkov u64 smm_freeze:1; 59727f6d22bSBorislav Petkov /* 59827f6d22bSBorislav Petkov * PMU supports separate counter range for writing 59927f6d22bSBorislav Petkov * values > 32bit. 60027f6d22bSBorislav Petkov */ 60127f6d22bSBorislav Petkov u64 full_width_write:1; 602c22497f5SKan Liang u64 pebs_baseline:1; 603bbdbde2aSKan Liang u64 perf_metrics:1; 60442880f72SAlexander Shishkin u64 pebs_output_pt_available:1; 605cadbaa03SStephane Eranian u64 anythread_deprecated:1; 60627f6d22bSBorislav Petkov }; 60727f6d22bSBorislav Petkov u64 capabilities; 60827f6d22bSBorislav Petkov }; 60927f6d22bSBorislav Petkov 61027f6d22bSBorislav Petkov struct x86_pmu_quirk { 61127f6d22bSBorislav Petkov struct x86_pmu_quirk *next; 61227f6d22bSBorislav Petkov void (*func)(void); 61327f6d22bSBorislav Petkov }; 61427f6d22bSBorislav Petkov 61527f6d22bSBorislav Petkov union x86_pmu_config { 61627f6d22bSBorislav Petkov struct { 61727f6d22bSBorislav Petkov u64 event:8, 61827f6d22bSBorislav Petkov umask:8, 61927f6d22bSBorislav Petkov usr:1, 62027f6d22bSBorislav Petkov os:1, 62127f6d22bSBorislav Petkov edge:1, 62227f6d22bSBorislav Petkov pc:1, 62327f6d22bSBorislav Petkov interrupt:1, 62427f6d22bSBorislav Petkov __reserved1:1, 62527f6d22bSBorislav Petkov en:1, 62627f6d22bSBorislav Petkov inv:1, 62727f6d22bSBorislav Petkov cmask:8, 62827f6d22bSBorislav Petkov event2:4, 62927f6d22bSBorislav Petkov __reserved2:4, 63027f6d22bSBorislav Petkov go:1, 63127f6d22bSBorislav Petkov ho:1; 63227f6d22bSBorislav Petkov } bits; 63327f6d22bSBorislav Petkov u64 value; 63427f6d22bSBorislav Petkov }; 63527f6d22bSBorislav Petkov 63627f6d22bSBorislav Petkov #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value 63727f6d22bSBorislav Petkov 63827f6d22bSBorislav Petkov enum { 63927f6d22bSBorislav Petkov x86_lbr_exclusive_lbr, 64027f6d22bSBorislav Petkov x86_lbr_exclusive_bts, 64127f6d22bSBorislav Petkov x86_lbr_exclusive_pt, 64227f6d22bSBorislav Petkov x86_lbr_exclusive_max, 64327f6d22bSBorislav Petkov }; 64427f6d22bSBorislav Petkov 645ccf170e9SKan Liang #define PERF_PEBS_DATA_SOURCE_MAX 0x10 646ccf170e9SKan Liang 647d0946a88SKan Liang struct x86_hybrid_pmu { 648d0946a88SKan Liang struct pmu pmu; 649d9977c43SKan Liang const char *name; 650d9977c43SKan Liang u8 cpu_type; 651d9977c43SKan Liang cpumask_t supported_cpus; 652d0946a88SKan Liang union perf_capabilities intel_cap; 653fc4b8fcaSKan Liang u64 intel_ctrl; 654d4b294bfSKan Liang int max_pebs_events; 655d4b294bfSKan Liang int num_counters; 656d4b294bfSKan Liang int num_counters_fixed; 657eaacf07dSKan Liang struct event_constraint unconstrained; 6580d18f2dfSKan Liang 6590d18f2dfSKan Liang u64 hw_cache_event_ids 6600d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_MAX] 6610d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_OP_MAX] 6620d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_RESULT_MAX]; 6630d18f2dfSKan Liang u64 hw_cache_extra_regs 6640d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_MAX] 6650d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_OP_MAX] 6660d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_RESULT_MAX]; 66724ee38ffSKan Liang struct event_constraint *event_constraints; 66824ee38ffSKan Liang struct event_constraint *pebs_constraints; 669183af736SKan Liang struct extra_reg *extra_regs; 670acade637SKan Liang 671acade637SKan Liang unsigned int late_ack :1, 672acade637SKan Liang mid_ack :1, 673acade637SKan Liang enabled_ack :1; 674ccf170e9SKan Liang 675ccf170e9SKan Liang u64 pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX]; 676d0946a88SKan Liang }; 677d0946a88SKan Liang 678d0946a88SKan Liang static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu) 679d0946a88SKan Liang { 680d0946a88SKan Liang return container_of(pmu, struct x86_hybrid_pmu, pmu); 681d0946a88SKan Liang } 682d0946a88SKan Liang 683d0946a88SKan Liang extern struct static_key_false perf_is_hybrid; 684d0946a88SKan Liang #define is_hybrid() static_branch_unlikely(&perf_is_hybrid) 685d0946a88SKan Liang 686d0946a88SKan Liang #define hybrid(_pmu, _field) \ 687d0946a88SKan Liang (*({ \ 688d0946a88SKan Liang typeof(&x86_pmu._field) __Fp = &x86_pmu._field; \ 689d0946a88SKan Liang \ 690d0946a88SKan Liang if (is_hybrid() && (_pmu)) \ 691d0946a88SKan Liang __Fp = &hybrid_pmu(_pmu)->_field; \ 692d0946a88SKan Liang \ 693d0946a88SKan Liang __Fp; \ 694d0946a88SKan Liang })) 695d0946a88SKan Liang 696eaacf07dSKan Liang #define hybrid_var(_pmu, _var) \ 697eaacf07dSKan Liang (*({ \ 698eaacf07dSKan Liang typeof(&_var) __Fp = &_var; \ 699eaacf07dSKan Liang \ 700eaacf07dSKan Liang if (is_hybrid() && (_pmu)) \ 701eaacf07dSKan Liang __Fp = &hybrid_pmu(_pmu)->_var; \ 702eaacf07dSKan Liang \ 703eaacf07dSKan Liang __Fp; \ 704eaacf07dSKan Liang })) 705eaacf07dSKan Liang 706acade637SKan Liang #define hybrid_bit(_pmu, _field) \ 707acade637SKan Liang ({ \ 708acade637SKan Liang bool __Fp = x86_pmu._field; \ 709acade637SKan Liang \ 710acade637SKan Liang if (is_hybrid() && (_pmu)) \ 711acade637SKan Liang __Fp = hybrid_pmu(_pmu)->_field; \ 712acade637SKan Liang \ 713acade637SKan Liang __Fp; \ 714acade637SKan Liang }) 715acade637SKan Liang 716d9977c43SKan Liang enum hybrid_pmu_type { 717d9977c43SKan Liang hybrid_big = 0x40, 718d9977c43SKan Liang hybrid_small = 0x20, 719d9977c43SKan Liang 720d9977c43SKan Liang hybrid_big_small = hybrid_big | hybrid_small, 721d9977c43SKan Liang }; 722d9977c43SKan Liang 723f83d2f91SKan Liang #define X86_HYBRID_PMU_ATOM_IDX 0 724f83d2f91SKan Liang #define X86_HYBRID_PMU_CORE_IDX 1 725f83d2f91SKan Liang 726f83d2f91SKan Liang #define X86_HYBRID_NUM_PMUS 2 727f83d2f91SKan Liang 72827f6d22bSBorislav Petkov /* 72927f6d22bSBorislav Petkov * struct x86_pmu - generic x86 pmu 73027f6d22bSBorislav Petkov */ 73127f6d22bSBorislav Petkov struct x86_pmu { 73227f6d22bSBorislav Petkov /* 73327f6d22bSBorislav Petkov * Generic x86 PMC bits 73427f6d22bSBorislav Petkov */ 73527f6d22bSBorislav Petkov const char *name; 73627f6d22bSBorislav Petkov int version; 73727f6d22bSBorislav Petkov int (*handle_irq)(struct pt_regs *); 73827f6d22bSBorislav Petkov void (*disable_all)(void); 73927f6d22bSBorislav Petkov void (*enable_all)(int added); 74027f6d22bSBorislav Petkov void (*enable)(struct perf_event *); 74127f6d22bSBorislav Petkov void (*disable)(struct perf_event *); 7428b8ff8ccSAdrian Hunter void (*assign)(struct perf_event *event, int idx); 74368f7082fSPeter Zijlstra void (*add)(struct perf_event *); 74468f7082fSPeter Zijlstra void (*del)(struct perf_event *); 745bcfbe5c4SKan Liang void (*read)(struct perf_event *event); 74673759c34SPeter Zijlstra int (*set_period)(struct perf_event *event); 74773759c34SPeter Zijlstra u64 (*update)(struct perf_event *event); 74827f6d22bSBorislav Petkov int (*hw_config)(struct perf_event *event); 74927f6d22bSBorislav Petkov int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); 75027f6d22bSBorislav Petkov unsigned eventsel; 75127f6d22bSBorislav Petkov unsigned perfctr; 75227f6d22bSBorislav Petkov int (*addr_offset)(int index, bool eventsel); 75327f6d22bSBorislav Petkov int (*rdpmc_index)(int index); 75427f6d22bSBorislav Petkov u64 (*event_map)(int); 75527f6d22bSBorislav Petkov int max_events; 75627f6d22bSBorislav Petkov int num_counters; 75727f6d22bSBorislav Petkov int num_counters_fixed; 75827f6d22bSBorislav Petkov int cntval_bits; 75927f6d22bSBorislav Petkov u64 cntval_mask; 76027f6d22bSBorislav Petkov union { 76127f6d22bSBorislav Petkov unsigned long events_maskl; 76227f6d22bSBorislav Petkov unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; 76327f6d22bSBorislav Petkov }; 76427f6d22bSBorislav Petkov int events_mask_len; 76527f6d22bSBorislav Petkov int apic; 76627f6d22bSBorislav Petkov u64 max_period; 76727f6d22bSBorislav Petkov struct event_constraint * 76827f6d22bSBorislav Petkov (*get_event_constraints)(struct cpu_hw_events *cpuc, 76927f6d22bSBorislav Petkov int idx, 77027f6d22bSBorislav Petkov struct perf_event *event); 77127f6d22bSBorislav Petkov 77227f6d22bSBorislav Petkov void (*put_event_constraints)(struct cpu_hw_events *cpuc, 77327f6d22bSBorislav Petkov struct perf_event *event); 77427f6d22bSBorislav Petkov 77527f6d22bSBorislav Petkov void (*start_scheduling)(struct cpu_hw_events *cpuc); 77627f6d22bSBorislav Petkov 77727f6d22bSBorislav Petkov void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr); 77827f6d22bSBorislav Petkov 77927f6d22bSBorislav Petkov void (*stop_scheduling)(struct cpu_hw_events *cpuc); 78027f6d22bSBorislav Petkov 78127f6d22bSBorislav Petkov struct event_constraint *event_constraints; 78227f6d22bSBorislav Petkov struct x86_pmu_quirk *quirks; 78328f0f3c4SPeter Zijlstra void (*limit_period)(struct perf_event *event, s64 *l); 78427f6d22bSBorislav Petkov 785af3bdb99SAndi Kleen /* PMI handler bits */ 786af3bdb99SAndi Kleen unsigned int late_ack :1, 787acade637SKan Liang mid_ack :1, 7883daa96d6SPeter Zijlstra enabled_ack :1; 78927f6d22bSBorislav Petkov /* 79027f6d22bSBorislav Petkov * sysfs attrs 79127f6d22bSBorislav Petkov */ 79227f6d22bSBorislav Petkov int attr_rdpmc_broken; 79327f6d22bSBorislav Petkov int attr_rdpmc; 79427f6d22bSBorislav Petkov struct attribute **format_attrs; 79527f6d22bSBorislav Petkov 79627f6d22bSBorislav Petkov ssize_t (*events_sysfs_show)(char *page, u64 config); 797baa0c833SJiri Olsa const struct attribute_group **attr_update; 79827f6d22bSBorislav Petkov 7996089327fSKan Liang unsigned long attr_freeze_on_smi; 8006089327fSKan Liang 80127f6d22bSBorislav Petkov /* 80227f6d22bSBorislav Petkov * CPU Hotplug hooks 80327f6d22bSBorislav Petkov */ 80427f6d22bSBorislav Petkov int (*cpu_prepare)(int cpu); 80527f6d22bSBorislav Petkov void (*cpu_starting)(int cpu); 80627f6d22bSBorislav Petkov void (*cpu_dying)(int cpu); 80727f6d22bSBorislav Petkov void (*cpu_dead)(int cpu); 80827f6d22bSBorislav Petkov 80927f6d22bSBorislav Petkov void (*check_microcode)(void); 81027f6d22bSBorislav Petkov void (*sched_task)(struct perf_event_context *ctx, 81127f6d22bSBorislav Petkov bool sched_in); 81227f6d22bSBorislav Petkov 81327f6d22bSBorislav Petkov /* 81427f6d22bSBorislav Petkov * Intel Arch Perfmon v2+ 81527f6d22bSBorislav Petkov */ 81627f6d22bSBorislav Petkov u64 intel_ctrl; 81727f6d22bSBorislav Petkov union perf_capabilities intel_cap; 81827f6d22bSBorislav Petkov 81927f6d22bSBorislav Petkov /* 82027f6d22bSBorislav Petkov * Intel DebugStore bits 82127f6d22bSBorislav Petkov */ 82227f6d22bSBorislav Petkov unsigned int bts :1, 82327f6d22bSBorislav Petkov bts_active :1, 82427f6d22bSBorislav Petkov pebs :1, 82527f6d22bSBorislav Petkov pebs_active :1, 82627f6d22bSBorislav Petkov pebs_broken :1, 82795298355SAndi Kleen pebs_prec_dist :1, 8289b545c04SAndi Kleen pebs_no_tlb :1, 82961b985e3SKan Liang pebs_no_isolation :1, 830fb358e0bSLike Xu pebs_block :1, 831fb358e0bSLike Xu pebs_ept :1; 83227f6d22bSBorislav Petkov int pebs_record_size; 833e72daf3fSJiri Olsa int pebs_buffer_size; 834c22497f5SKan Liang int max_pebs_events; 8359dfa9a5cSPeter Zijlstra void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data); 83627f6d22bSBorislav Petkov struct event_constraint *pebs_constraints; 83727f6d22bSBorislav Petkov void (*pebs_aliases)(struct perf_event *event); 83839a41278SKan Liang u64 (*pebs_latency_data)(struct perf_event *event, u64 status); 839174afc3eSKan Liang unsigned long large_pebs_flags; 840c22497f5SKan Liang u64 rtm_abort_event; 8410d23dc34SPeter Zijlstra (Intel) u64 pebs_capable; 84227f6d22bSBorislav Petkov 84327f6d22bSBorislav Petkov /* 84427f6d22bSBorislav Petkov * Intel LBR 84527f6d22bSBorislav Petkov */ 8463cb9d546SWei Wang unsigned int lbr_tos, lbr_from, lbr_to, 847fda1f99fSKan Liang lbr_info, lbr_nr; /* LBR base regs and size */ 84849d8184fSKan Liang union { 84927f6d22bSBorislav Petkov u64 lbr_sel_mask; /* LBR_SELECT valid bits */ 85049d8184fSKan Liang u64 lbr_ctl_mask; /* LBR_CTL valid bits */ 85149d8184fSKan Liang }; 85249d8184fSKan Liang union { 85327f6d22bSBorislav Petkov const int *lbr_sel_map; /* lbr_select mappings */ 85449d8184fSKan Liang int *lbr_ctl_map; /* LBR_CTL mappings */ 85549d8184fSKan Liang }; 85627f6d22bSBorislav Petkov bool lbr_double_abort; /* duplicated lbr aborts */ 857b0c1ef52SAndi Kleen bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */ 85827f6d22bSBorislav Petkov 8591ac7fd81SPeter Zijlstra (Intel) unsigned int lbr_has_info:1; 8601ac7fd81SPeter Zijlstra (Intel) unsigned int lbr_has_tsx:1; 8611ac7fd81SPeter Zijlstra (Intel) unsigned int lbr_from_flags:1; 8621ac7fd81SPeter Zijlstra (Intel) unsigned int lbr_to_cycles:1; 8631ac7fd81SPeter Zijlstra (Intel) 864af6cf129SKan Liang /* 865af6cf129SKan Liang * Intel Architectural LBR CPUID Enumeration 866af6cf129SKan Liang */ 867af6cf129SKan Liang unsigned int lbr_depth_mask:8; 868af6cf129SKan Liang unsigned int lbr_deep_c_reset:1; 869af6cf129SKan Liang unsigned int lbr_lip:1; 870af6cf129SKan Liang unsigned int lbr_cpl:1; 871af6cf129SKan Liang unsigned int lbr_filter:1; 872af6cf129SKan Liang unsigned int lbr_call_stack:1; 873af6cf129SKan Liang unsigned int lbr_mispred:1; 874af6cf129SKan Liang unsigned int lbr_timed_lbr:1; 875af6cf129SKan Liang unsigned int lbr_br_type:1; 876af6cf129SKan Liang 8779f354a72SKan Liang void (*lbr_reset)(void); 878c301b1d8SKan Liang void (*lbr_read)(struct cpu_hw_events *cpuc); 879799571bfSKan Liang void (*lbr_save)(void *ctx); 880799571bfSKan Liang void (*lbr_restore)(void *ctx); 8819f354a72SKan Liang 88227f6d22bSBorislav Petkov /* 88327f6d22bSBorislav Petkov * Intel PT/LBR/BTS are exclusive 88427f6d22bSBorislav Petkov */ 88527f6d22bSBorislav Petkov atomic_t lbr_exclusive[x86_lbr_exclusive_max]; 88627f6d22bSBorislav Petkov 88727f6d22bSBorislav Petkov /* 8887b2c05a1SKan Liang * Intel perf metrics 8897b2c05a1SKan Liang */ 8901ab5f235SKan Liang int num_topdown_events; 8917b2c05a1SKan Liang 8927b2c05a1SKan Liang /* 893fc1adfe3SAlexey Budankov * perf task context (i.e. struct perf_event_context::task_ctx_data) 894fc1adfe3SAlexey Budankov * switch helper to bridge calls from perf/core to perf/x86. 895fc1adfe3SAlexey Budankov * See struct pmu::swap_task_ctx() usage for examples; 896fc1adfe3SAlexey Budankov */ 897fc1adfe3SAlexey Budankov void (*swap_task_ctx)(struct perf_event_context *prev, 898fc1adfe3SAlexey Budankov struct perf_event_context *next); 899fc1adfe3SAlexey Budankov 900fc1adfe3SAlexey Budankov /* 90132b62f44SPeter Zijlstra * AMD bits 90232b62f44SPeter Zijlstra */ 90332b62f44SPeter Zijlstra unsigned int amd_nb_constraints : 1; 90457388912SKim Phillips u64 perf_ctr_pair_en; 90532b62f44SPeter Zijlstra 90632b62f44SPeter Zijlstra /* 90727f6d22bSBorislav Petkov * Extra registers for events 90827f6d22bSBorislav Petkov */ 90927f6d22bSBorislav Petkov struct extra_reg *extra_regs; 91027f6d22bSBorislav Petkov unsigned int flags; 91127f6d22bSBorislav Petkov 91227f6d22bSBorislav Petkov /* 91327f6d22bSBorislav Petkov * Intel host/guest support (KVM) 91427f6d22bSBorislav Petkov */ 91539a4d779SLike Xu struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr, void *data); 91681ec3f3cSJiri Olsa 91781ec3f3cSJiri Olsa /* 91881ec3f3cSJiri Olsa * Check period value for PERF_EVENT_IOC_PERIOD ioctl. 91981ec3f3cSJiri Olsa */ 92081ec3f3cSJiri Olsa int (*check_period) (struct perf_event *event, u64 period); 92142880f72SAlexander Shishkin 92242880f72SAlexander Shishkin int (*aux_output_match) (struct perf_event *event); 923d0946a88SKan Liang 9243e9a8b21SKan Liang int (*filter_match)(struct perf_event *event); 925d0946a88SKan Liang /* 926d0946a88SKan Liang * Hybrid support 927d0946a88SKan Liang * 928d0946a88SKan Liang * Most PMU capabilities are the same among different hybrid PMUs. 929d0946a88SKan Liang * The global x86_pmu saves the architecture capabilities, which 930d0946a88SKan Liang * are available for all PMUs. The hybrid_pmu only includes the 931d0946a88SKan Liang * unique capabilities. 932d0946a88SKan Liang */ 933d4b294bfSKan Liang int num_hybrid_pmus; 934d0946a88SKan Liang struct x86_hybrid_pmu *hybrid_pmu; 935d9977c43SKan Liang u8 (*get_hybrid_cpu_type) (void); 93627f6d22bSBorislav Petkov }; 93727f6d22bSBorislav Petkov 938530bfff6SKan Liang struct x86_perf_task_context_opt { 939530bfff6SKan Liang int lbr_callstack_users; 940530bfff6SKan Liang int lbr_stack_state; 941530bfff6SKan Liang int log_id; 942530bfff6SKan Liang }; 943530bfff6SKan Liang 94427f6d22bSBorislav Petkov struct x86_perf_task_context { 945e1ad1ac2SLike Xu u64 lbr_sel; 94627f6d22bSBorislav Petkov int tos; 9470592e57bSKan Liang int valid_lbrs; 948530bfff6SKan Liang struct x86_perf_task_context_opt opt; 9495624986dSKan Liang struct lbr_entry lbr[MAX_LBR_ENTRIES]; 95027f6d22bSBorislav Petkov }; 95127f6d22bSBorislav Petkov 95247125db2SKan Liang struct x86_perf_task_context_arch_lbr { 95347125db2SKan Liang struct x86_perf_task_context_opt opt; 95447125db2SKan Liang struct lbr_entry entries[]; 95547125db2SKan Liang }; 95647125db2SKan Liang 957ce711ea3SKan Liang /* 958ce711ea3SKan Liang * Add padding to guarantee the 64-byte alignment of the state buffer. 959ce711ea3SKan Liang * 960ce711ea3SKan Liang * The structure is dynamically allocated. The size of the LBR state may vary 961ce711ea3SKan Liang * based on the number of LBR registers. 962ce711ea3SKan Liang * 963ce711ea3SKan Liang * Do not put anything after the LBR state. 964ce711ea3SKan Liang */ 965ce711ea3SKan Liang struct x86_perf_task_context_arch_lbr_xsave { 966ce711ea3SKan Liang struct x86_perf_task_context_opt opt; 967ce711ea3SKan Liang 968ce711ea3SKan Liang union { 969ce711ea3SKan Liang struct xregs_state xsave; 970ce711ea3SKan Liang struct { 971ce711ea3SKan Liang struct fxregs_state i387; 972ce711ea3SKan Liang struct xstate_header header; 973ce711ea3SKan Liang struct arch_lbr_state lbr; 974ce711ea3SKan Liang } __attribute__ ((packed, aligned (XSAVE_ALIGNMENT))); 975ce711ea3SKan Liang }; 976ce711ea3SKan Liang }; 977ce711ea3SKan Liang 97827f6d22bSBorislav Petkov #define x86_add_quirk(func_) \ 97927f6d22bSBorislav Petkov do { \ 98027f6d22bSBorislav Petkov static struct x86_pmu_quirk __quirk __initdata = { \ 98127f6d22bSBorislav Petkov .func = func_, \ 98227f6d22bSBorislav Petkov }; \ 98327f6d22bSBorislav Petkov __quirk.next = x86_pmu.quirks; \ 98427f6d22bSBorislav Petkov x86_pmu.quirks = &__quirk; \ 98527f6d22bSBorislav Petkov } while (0) 98627f6d22bSBorislav Petkov 98727f6d22bSBorislav Petkov /* 98827f6d22bSBorislav Petkov * x86_pmu flags 98927f6d22bSBorislav Petkov */ 99027f6d22bSBorislav Petkov #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */ 99127f6d22bSBorislav Petkov #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */ 99227f6d22bSBorislav Petkov #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ 99327f6d22bSBorislav Petkov #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ 99431962340SKan Liang #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */ 995400816f6SPeter Zijlstra (Intel) #define PMU_FL_TFA 0x20 /* deal with TSX force abort */ 996471af006SKim Phillips #define PMU_FL_PAIR 0x40 /* merge counters for large incr. events */ 99761b985e3SKan Liang #define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */ 99861b985e3SKan Liang #define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */ 99927f6d22bSBorislav Petkov 100027f6d22bSBorislav Petkov #define EVENT_VAR(_id) event_attr_##_id 100127f6d22bSBorislav Petkov #define EVENT_PTR(_id) &event_attr_##_id.attr.attr 100227f6d22bSBorislav Petkov 100327f6d22bSBorislav Petkov #define EVENT_ATTR(_name, _id) \ 100427f6d22bSBorislav Petkov static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ 100527f6d22bSBorislav Petkov .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ 100627f6d22bSBorislav Petkov .id = PERF_COUNT_HW_##_id, \ 100727f6d22bSBorislav Petkov .event_str = NULL, \ 100827f6d22bSBorislav Petkov }; 100927f6d22bSBorislav Petkov 101027f6d22bSBorislav Petkov #define EVENT_ATTR_STR(_name, v, str) \ 101127f6d22bSBorislav Petkov static struct perf_pmu_events_attr event_attr_##v = { \ 101227f6d22bSBorislav Petkov .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ 101327f6d22bSBorislav Petkov .id = 0, \ 101427f6d22bSBorislav Petkov .event_str = str, \ 101527f6d22bSBorislav Petkov }; 101627f6d22bSBorislav Petkov 1017fc07e9f9SAndi Kleen #define EVENT_ATTR_STR_HT(_name, v, noht, ht) \ 1018fc07e9f9SAndi Kleen static struct perf_pmu_events_ht_attr event_attr_##v = { \ 1019fc07e9f9SAndi Kleen .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\ 1020fc07e9f9SAndi Kleen .id = 0, \ 1021fc07e9f9SAndi Kleen .event_str_noht = noht, \ 1022fc07e9f9SAndi Kleen .event_str_ht = ht, \ 1023fc07e9f9SAndi Kleen } 1024fc07e9f9SAndi Kleen 1025a9c81ccdSKan Liang #define EVENT_ATTR_STR_HYBRID(_name, v, str, _pmu) \ 1026a9c81ccdSKan Liang static struct perf_pmu_events_hybrid_attr event_attr_##v = { \ 1027a9c81ccdSKan Liang .attr = __ATTR(_name, 0444, events_hybrid_sysfs_show, NULL),\ 1028a9c81ccdSKan Liang .id = 0, \ 1029a9c81ccdSKan Liang .event_str = str, \ 1030a9c81ccdSKan Liang .pmu_type = _pmu, \ 1031a9c81ccdSKan Liang } 1032a9c81ccdSKan Liang 1033a9c81ccdSKan Liang #define FORMAT_HYBRID_PTR(_id) (&format_attr_hybrid_##_id.attr.attr) 1034a9c81ccdSKan Liang 1035a9c81ccdSKan Liang #define FORMAT_ATTR_HYBRID(_name, _pmu) \ 1036a9c81ccdSKan Liang static struct perf_pmu_format_hybrid_attr format_attr_hybrid_##_name = {\ 1037a9c81ccdSKan Liang .attr = __ATTR_RO(_name), \ 1038a9c81ccdSKan Liang .pmu_type = _pmu, \ 1039a9c81ccdSKan Liang } 1040a9c81ccdSKan Liang 104161e76d53SKan Liang struct pmu *x86_get_pmu(unsigned int cpu); 104227f6d22bSBorislav Petkov extern struct x86_pmu x86_pmu __read_mostly; 104327f6d22bSBorislav Petkov 104473759c34SPeter Zijlstra DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period); 104573759c34SPeter Zijlstra DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update); 104673759c34SPeter Zijlstra 1047f42be865SKan Liang static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx) 1048f42be865SKan Liang { 104947125db2SKan Liang if (static_cpu_has(X86_FEATURE_ARCH_LBR)) 105047125db2SKan Liang return &((struct x86_perf_task_context_arch_lbr *)ctx)->opt; 105147125db2SKan Liang 1052f42be865SKan Liang return &((struct x86_perf_task_context *)ctx)->opt; 1053f42be865SKan Liang } 1054f42be865SKan Liang 105527f6d22bSBorislav Petkov static inline bool x86_pmu_has_lbr_callstack(void) 105627f6d22bSBorislav Petkov { 105727f6d22bSBorislav Petkov return x86_pmu.lbr_sel_map && 105827f6d22bSBorislav Petkov x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; 105927f6d22bSBorislav Petkov } 106027f6d22bSBorislav Petkov 106127f6d22bSBorislav Petkov DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); 1062*dbf4e792SPeter Zijlstra DECLARE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); 106327f6d22bSBorislav Petkov 106427f6d22bSBorislav Petkov int x86_perf_event_set_period(struct perf_event *event); 106527f6d22bSBorislav Petkov 106627f6d22bSBorislav Petkov /* 106727f6d22bSBorislav Petkov * Generalized hw caching related hw_event table, filled 106827f6d22bSBorislav Petkov * in on a per model basis. A value of 0 means 106927f6d22bSBorislav Petkov * 'not supported', -1 means 'hw_event makes no sense on 107027f6d22bSBorislav Petkov * this CPU', any other value means the raw hw_event 107127f6d22bSBorislav Petkov * ID. 107227f6d22bSBorislav Petkov */ 107327f6d22bSBorislav Petkov 107427f6d22bSBorislav Petkov #define C(x) PERF_COUNT_HW_CACHE_##x 107527f6d22bSBorislav Petkov 107627f6d22bSBorislav Petkov extern u64 __read_mostly hw_cache_event_ids 107727f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_MAX] 107827f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_OP_MAX] 107927f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_RESULT_MAX]; 108027f6d22bSBorislav Petkov extern u64 __read_mostly hw_cache_extra_regs 108127f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_MAX] 108227f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_OP_MAX] 108327f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_RESULT_MAX]; 108427f6d22bSBorislav Petkov 108527f6d22bSBorislav Petkov u64 x86_perf_event_update(struct perf_event *event); 108627f6d22bSBorislav Petkov 108727f6d22bSBorislav Petkov static inline unsigned int x86_pmu_config_addr(int index) 108827f6d22bSBorislav Petkov { 108927f6d22bSBorislav Petkov return x86_pmu.eventsel + (x86_pmu.addr_offset ? 109027f6d22bSBorislav Petkov x86_pmu.addr_offset(index, true) : index); 109127f6d22bSBorislav Petkov } 109227f6d22bSBorislav Petkov 109327f6d22bSBorislav Petkov static inline unsigned int x86_pmu_event_addr(int index) 109427f6d22bSBorislav Petkov { 109527f6d22bSBorislav Petkov return x86_pmu.perfctr + (x86_pmu.addr_offset ? 109627f6d22bSBorislav Petkov x86_pmu.addr_offset(index, false) : index); 109727f6d22bSBorislav Petkov } 109827f6d22bSBorislav Petkov 109927f6d22bSBorislav Petkov static inline int x86_pmu_rdpmc_index(int index) 110027f6d22bSBorislav Petkov { 110127f6d22bSBorislav Petkov return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; 110227f6d22bSBorislav Petkov } 110327f6d22bSBorislav Petkov 1104fc4b8fcaSKan Liang bool check_hw_exists(struct pmu *pmu, int num_counters, 1105fc4b8fcaSKan Liang int num_counters_fixed); 1106fc4b8fcaSKan Liang 110727f6d22bSBorislav Petkov int x86_add_exclusive(unsigned int what); 110827f6d22bSBorislav Petkov 110927f6d22bSBorislav Petkov void x86_del_exclusive(unsigned int what); 111027f6d22bSBorislav Petkov 111127f6d22bSBorislav Petkov int x86_reserve_hardware(void); 111227f6d22bSBorislav Petkov 111327f6d22bSBorislav Petkov void x86_release_hardware(void); 111427f6d22bSBorislav Petkov 1115b00233b5SAndi Kleen int x86_pmu_max_precise(void); 1116b00233b5SAndi Kleen 111727f6d22bSBorislav Petkov void hw_perf_lbr_event_destroy(struct perf_event *event); 111827f6d22bSBorislav Petkov 111927f6d22bSBorislav Petkov int x86_setup_perfctr(struct perf_event *event); 112027f6d22bSBorislav Petkov 112127f6d22bSBorislav Petkov int x86_pmu_hw_config(struct perf_event *event); 112227f6d22bSBorislav Petkov 112327f6d22bSBorislav Petkov void x86_pmu_disable_all(void); 112427f6d22bSBorislav Petkov 1125ada54345SStephane Eranian static inline bool has_amd_brs(struct hw_perf_event *hwc) 1126ada54345SStephane Eranian { 1127ada54345SStephane Eranian return hwc->flags & PERF_X86_EVENT_AMD_BRS; 1128ada54345SStephane Eranian } 1129ada54345SStephane Eranian 113057388912SKim Phillips static inline bool is_counter_pair(struct hw_perf_event *hwc) 113157388912SKim Phillips { 113257388912SKim Phillips return hwc->flags & PERF_X86_EVENT_PAIR; 113357388912SKim Phillips } 113457388912SKim Phillips 113527f6d22bSBorislav Petkov static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, 113627f6d22bSBorislav Petkov u64 enable_mask) 113727f6d22bSBorislav Petkov { 113827f6d22bSBorislav Petkov u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); 113927f6d22bSBorislav Petkov 114027f6d22bSBorislav Petkov if (hwc->extra_reg.reg) 114127f6d22bSBorislav Petkov wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); 114257388912SKim Phillips 114357388912SKim Phillips /* 114457388912SKim Phillips * Add enabled Merge event on next counter 114557388912SKim Phillips * if large increment event being enabled on this counter 114657388912SKim Phillips */ 114757388912SKim Phillips if (is_counter_pair(hwc)) 114857388912SKim Phillips wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en); 114957388912SKim Phillips 115027f6d22bSBorislav Petkov wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); 115127f6d22bSBorislav Petkov } 115227f6d22bSBorislav Petkov 115327f6d22bSBorislav Petkov void x86_pmu_enable_all(int added); 115427f6d22bSBorislav Petkov 115527f6d22bSBorislav Petkov int perf_assign_events(struct event_constraint **constraints, int n, 115627f6d22bSBorislav Petkov int wmin, int wmax, int gpmax, int *assign); 115727f6d22bSBorislav Petkov int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); 115827f6d22bSBorislav Petkov 115927f6d22bSBorislav Petkov void x86_pmu_stop(struct perf_event *event, int flags); 116027f6d22bSBorislav Petkov 116127f6d22bSBorislav Petkov static inline void x86_pmu_disable_event(struct perf_event *event) 116227f6d22bSBorislav Petkov { 1163df51fe7eSLike Xu u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); 116427f6d22bSBorislav Petkov struct hw_perf_event *hwc = &event->hw; 116527f6d22bSBorislav Petkov 1166df51fe7eSLike Xu wrmsrl(hwc->config_base, hwc->config & ~disable_mask); 116757388912SKim Phillips 116857388912SKim Phillips if (is_counter_pair(hwc)) 116957388912SKim Phillips wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0); 117027f6d22bSBorislav Petkov } 117127f6d22bSBorislav Petkov 117227f6d22bSBorislav Petkov void x86_pmu_enable_event(struct perf_event *event); 117327f6d22bSBorislav Petkov 117427f6d22bSBorislav Petkov int x86_pmu_handle_irq(struct pt_regs *regs); 117527f6d22bSBorislav Petkov 1176e11c1a7eSKan Liang void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed, 1177e11c1a7eSKan Liang u64 intel_ctrl); 1178e11c1a7eSKan Liang 1179d9977c43SKan Liang void x86_pmu_update_cpu_context(struct pmu *pmu, int cpu); 1180d9977c43SKan Liang 118127f6d22bSBorislav Petkov extern struct event_constraint emptyconstraint; 118227f6d22bSBorislav Petkov 118327f6d22bSBorislav Petkov extern struct event_constraint unconstrained; 118427f6d22bSBorislav Petkov 118527f6d22bSBorislav Petkov static inline bool kernel_ip(unsigned long ip) 118627f6d22bSBorislav Petkov { 118727f6d22bSBorislav Petkov #ifdef CONFIG_X86_32 118827f6d22bSBorislav Petkov return ip > PAGE_OFFSET; 118927f6d22bSBorislav Petkov #else 119027f6d22bSBorislav Petkov return (long)ip < 0; 119127f6d22bSBorislav Petkov #endif 119227f6d22bSBorislav Petkov } 119327f6d22bSBorislav Petkov 119427f6d22bSBorislav Petkov /* 119527f6d22bSBorislav Petkov * Not all PMUs provide the right context information to place the reported IP 119627f6d22bSBorislav Petkov * into full context. Specifically segment registers are typically not 119727f6d22bSBorislav Petkov * supplied. 119827f6d22bSBorislav Petkov * 119927f6d22bSBorislav Petkov * Assuming the address is a linear address (it is for IBS), we fake the CS and 120027f6d22bSBorislav Petkov * vm86 mode using the known zero-based code segment and 'fix up' the registers 120127f6d22bSBorislav Petkov * to reflect this. 120227f6d22bSBorislav Petkov * 120327f6d22bSBorislav Petkov * Intel PEBS/LBR appear to typically provide the effective address, nothing 120427f6d22bSBorislav Petkov * much we can do about that but pray and treat it like a linear address. 120527f6d22bSBorislav Petkov */ 120627f6d22bSBorislav Petkov static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) 120727f6d22bSBorislav Petkov { 120827f6d22bSBorislav Petkov regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; 120927f6d22bSBorislav Petkov if (regs->flags & X86_VM_MASK) 121027f6d22bSBorislav Petkov regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); 121127f6d22bSBorislav Petkov regs->ip = ip; 121227f6d22bSBorislav Petkov } 121327f6d22bSBorislav Petkov 12144462fbfeSSandipan Das /* 12154462fbfeSSandipan Das * x86control flow change classification 12164462fbfeSSandipan Das * x86control flow changes include branches, interrupts, traps, faults 12174462fbfeSSandipan Das */ 12184462fbfeSSandipan Das enum { 12194462fbfeSSandipan Das X86_BR_NONE = 0, /* unknown */ 12204462fbfeSSandipan Das 12214462fbfeSSandipan Das X86_BR_USER = 1 << 0, /* branch target is user */ 12224462fbfeSSandipan Das X86_BR_KERNEL = 1 << 1, /* branch target is kernel */ 12234462fbfeSSandipan Das 12244462fbfeSSandipan Das X86_BR_CALL = 1 << 2, /* call */ 12254462fbfeSSandipan Das X86_BR_RET = 1 << 3, /* return */ 12264462fbfeSSandipan Das X86_BR_SYSCALL = 1 << 4, /* syscall */ 12274462fbfeSSandipan Das X86_BR_SYSRET = 1 << 5, /* syscall return */ 12284462fbfeSSandipan Das X86_BR_INT = 1 << 6, /* sw interrupt */ 12294462fbfeSSandipan Das X86_BR_IRET = 1 << 7, /* return from interrupt */ 12304462fbfeSSandipan Das X86_BR_JCC = 1 << 8, /* conditional */ 12314462fbfeSSandipan Das X86_BR_JMP = 1 << 9, /* jump */ 12324462fbfeSSandipan Das X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */ 12334462fbfeSSandipan Das X86_BR_IND_CALL = 1 << 11,/* indirect calls */ 12344462fbfeSSandipan Das X86_BR_ABORT = 1 << 12,/* transaction abort */ 12354462fbfeSSandipan Das X86_BR_IN_TX = 1 << 13,/* in transaction */ 12364462fbfeSSandipan Das X86_BR_NO_TX = 1 << 14,/* not in transaction */ 12374462fbfeSSandipan Das X86_BR_ZERO_CALL = 1 << 15,/* zero length call */ 12384462fbfeSSandipan Das X86_BR_CALL_STACK = 1 << 16,/* call stack */ 12394462fbfeSSandipan Das X86_BR_IND_JMP = 1 << 17,/* indirect jump */ 12404462fbfeSSandipan Das 12414462fbfeSSandipan Das X86_BR_TYPE_SAVE = 1 << 18,/* indicate to save branch type */ 12424462fbfeSSandipan Das 12434462fbfeSSandipan Das }; 12444462fbfeSSandipan Das 12454462fbfeSSandipan Das #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL) 12464462fbfeSSandipan Das #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX) 12474462fbfeSSandipan Das 12484462fbfeSSandipan Das #define X86_BR_ANY \ 12494462fbfeSSandipan Das (X86_BR_CALL |\ 12504462fbfeSSandipan Das X86_BR_RET |\ 12514462fbfeSSandipan Das X86_BR_SYSCALL |\ 12524462fbfeSSandipan Das X86_BR_SYSRET |\ 12534462fbfeSSandipan Das X86_BR_INT |\ 12544462fbfeSSandipan Das X86_BR_IRET |\ 12554462fbfeSSandipan Das X86_BR_JCC |\ 12564462fbfeSSandipan Das X86_BR_JMP |\ 12574462fbfeSSandipan Das X86_BR_IRQ |\ 12584462fbfeSSandipan Das X86_BR_ABORT |\ 12594462fbfeSSandipan Das X86_BR_IND_CALL |\ 12604462fbfeSSandipan Das X86_BR_IND_JMP |\ 12614462fbfeSSandipan Das X86_BR_ZERO_CALL) 12624462fbfeSSandipan Das 12634462fbfeSSandipan Das #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY) 12644462fbfeSSandipan Das 12654462fbfeSSandipan Das #define X86_BR_ANY_CALL \ 12664462fbfeSSandipan Das (X86_BR_CALL |\ 12674462fbfeSSandipan Das X86_BR_IND_CALL |\ 12684462fbfeSSandipan Das X86_BR_ZERO_CALL |\ 12694462fbfeSSandipan Das X86_BR_SYSCALL |\ 12704462fbfeSSandipan Das X86_BR_IRQ |\ 12714462fbfeSSandipan Das X86_BR_INT) 12724462fbfeSSandipan Das 12734462fbfeSSandipan Das int common_branch_type(int type); 12744462fbfeSSandipan Das int branch_type(unsigned long from, unsigned long to, int abort); 1275df3e9612SSandipan Das int branch_type_fused(unsigned long from, unsigned long to, int abort, 1276df3e9612SSandipan Das int *offset); 12774462fbfeSSandipan Das 127827f6d22bSBorislav Petkov ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); 127927f6d22bSBorislav Petkov ssize_t intel_event_sysfs_show(char *page, u64 config); 128027f6d22bSBorislav Petkov 1281a49ac9f8SHuang Rui ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, 1282a49ac9f8SHuang Rui char *page); 1283fc07e9f9SAndi Kleen ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr, 1284fc07e9f9SAndi Kleen char *page); 1285a9c81ccdSKan Liang ssize_t events_hybrid_sysfs_show(struct device *dev, 1286a9c81ccdSKan Liang struct device_attribute *attr, 1287a9c81ccdSKan Liang char *page); 1288a49ac9f8SHuang Rui 1289fc4b8fcaSKan Liang static inline bool fixed_counter_disabled(int i, struct pmu *pmu) 129032451614SKan Liang { 1291fc4b8fcaSKan Liang u64 intel_ctrl = hybrid(pmu, intel_ctrl); 1292fc4b8fcaSKan Liang 1293fc4b8fcaSKan Liang return !(intel_ctrl >> (i + INTEL_PMC_IDX_FIXED)); 129432451614SKan Liang } 129532451614SKan Liang 129627f6d22bSBorislav Petkov #ifdef CONFIG_CPU_SUP_AMD 129727f6d22bSBorislav Petkov 129827f6d22bSBorislav Petkov int amd_pmu_init(void); 1299cc37e520SStephane Eranian 1300703fb765SSandipan Das int amd_pmu_lbr_init(void); 1301ca5b7c0dSSandipan Das void amd_pmu_lbr_reset(void); 1302ca5b7c0dSSandipan Das void amd_pmu_lbr_read(void); 1303ca5b7c0dSSandipan Das void amd_pmu_lbr_add(struct perf_event *event); 1304ca5b7c0dSSandipan Das void amd_pmu_lbr_del(struct perf_event *event); 1305ca5b7c0dSSandipan Das void amd_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); 1306ca5b7c0dSSandipan Das void amd_pmu_lbr_enable_all(void); 1307ca5b7c0dSSandipan Das void amd_pmu_lbr_disable_all(void); 1308ca5b7c0dSSandipan Das int amd_pmu_lbr_hw_config(struct perf_event *event); 1309703fb765SSandipan Das 1310cc37e520SStephane Eranian #ifdef CONFIG_PERF_EVENTS_AMD_BRS 1311b40d0156SSandipan Das 1312b40d0156SSandipan Das #define AMD_FAM19H_BRS_EVENT 0xc4 /* RETIRED_TAKEN_BRANCH_INSTRUCTIONS */ 1313b40d0156SSandipan Das 1314ada54345SStephane Eranian int amd_brs_init(void); 1315ada54345SStephane Eranian void amd_brs_disable(void); 1316ada54345SStephane Eranian void amd_brs_enable(void); 1317ada54345SStephane Eranian void amd_brs_enable_all(void); 1318ada54345SStephane Eranian void amd_brs_disable_all(void); 1319ada54345SStephane Eranian void amd_brs_drain(void); 1320d5616bacSStephane Eranian void amd_brs_lopwr_init(void); 1321ada54345SStephane Eranian void amd_brs_disable_all(void); 1322b40d0156SSandipan Das int amd_brs_hw_config(struct perf_event *event); 1323ada54345SStephane Eranian void amd_brs_reset(void); 1324ada54345SStephane Eranian 1325ada54345SStephane Eranian static inline void amd_pmu_brs_add(struct perf_event *event) 1326ada54345SStephane Eranian { 1327ada54345SStephane Eranian struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1328ada54345SStephane Eranian 1329ada54345SStephane Eranian perf_sched_cb_inc(event->ctx->pmu); 1330ada54345SStephane Eranian cpuc->lbr_users++; 1331ada54345SStephane Eranian /* 1332ada54345SStephane Eranian * No need to reset BRS because it is reset 1333ada54345SStephane Eranian * on brs_enable() and it is saturating 1334ada54345SStephane Eranian */ 1335ada54345SStephane Eranian } 1336ada54345SStephane Eranian 1337ada54345SStephane Eranian static inline void amd_pmu_brs_del(struct perf_event *event) 1338ada54345SStephane Eranian { 1339ada54345SStephane Eranian struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1340ada54345SStephane Eranian 1341ada54345SStephane Eranian cpuc->lbr_users--; 1342ada54345SStephane Eranian WARN_ON_ONCE(cpuc->lbr_users < 0); 1343ada54345SStephane Eranian 1344ada54345SStephane Eranian perf_sched_cb_dec(event->ctx->pmu); 1345ada54345SStephane Eranian } 1346ada54345SStephane Eranian 1347ada54345SStephane Eranian void amd_pmu_brs_sched_task(struct perf_event_context *ctx, bool sched_in); 1348cc37e520SStephane Eranian #else 1349cc37e520SStephane Eranian static inline int amd_brs_init(void) 1350cc37e520SStephane Eranian { 1351cc37e520SStephane Eranian return 0; 1352cc37e520SStephane Eranian } 1353cc37e520SStephane Eranian static inline void amd_brs_disable(void) {} 1354cc37e520SStephane Eranian static inline void amd_brs_enable(void) {} 1355cc37e520SStephane Eranian static inline void amd_brs_drain(void) {} 1356cc37e520SStephane Eranian static inline void amd_brs_lopwr_init(void) {} 1357cc37e520SStephane Eranian static inline void amd_brs_disable_all(void) {} 1358b40d0156SSandipan Das static inline int amd_brs_hw_config(struct perf_event *event) 1359cc37e520SStephane Eranian { 1360cc37e520SStephane Eranian return 0; 1361cc37e520SStephane Eranian } 1362cc37e520SStephane Eranian static inline void amd_brs_reset(void) {} 1363cc37e520SStephane Eranian 1364cc37e520SStephane Eranian static inline void amd_pmu_brs_add(struct perf_event *event) 1365cc37e520SStephane Eranian { 1366cc37e520SStephane Eranian } 1367cc37e520SStephane Eranian 1368cc37e520SStephane Eranian static inline void amd_pmu_brs_del(struct perf_event *event) 1369cc37e520SStephane Eranian { 1370cc37e520SStephane Eranian } 1371cc37e520SStephane Eranian 1372cc37e520SStephane Eranian static inline void amd_pmu_brs_sched_task(struct perf_event_context *ctx, bool sched_in) 1373cc37e520SStephane Eranian { 1374cc37e520SStephane Eranian } 1375cc37e520SStephane Eranian 1376cc37e520SStephane Eranian static inline void amd_brs_enable_all(void) 1377cc37e520SStephane Eranian { 1378cc37e520SStephane Eranian } 1379cc37e520SStephane Eranian 1380cc37e520SStephane Eranian #endif 1381ba2fe750SStephane Eranian 138227f6d22bSBorislav Petkov #else /* CONFIG_CPU_SUP_AMD */ 138327f6d22bSBorislav Petkov 138427f6d22bSBorislav Petkov static inline int amd_pmu_init(void) 138527f6d22bSBorislav Petkov { 138627f6d22bSBorislav Petkov return 0; 138727f6d22bSBorislav Petkov } 138827f6d22bSBorislav Petkov 1389ada54345SStephane Eranian static inline int amd_brs_init(void) 1390ada54345SStephane Eranian { 1391ada54345SStephane Eranian return -EOPNOTSUPP; 1392ada54345SStephane Eranian } 1393ada54345SStephane Eranian 1394ada54345SStephane Eranian static inline void amd_brs_drain(void) 1395ada54345SStephane Eranian { 1396ada54345SStephane Eranian } 1397ada54345SStephane Eranian 1398ada54345SStephane Eranian static inline void amd_brs_enable_all(void) 1399ada54345SStephane Eranian { 1400ada54345SStephane Eranian } 1401ada54345SStephane Eranian 1402ada54345SStephane Eranian static inline void amd_brs_disable_all(void) 1403ada54345SStephane Eranian { 1404ada54345SStephane Eranian } 140527f6d22bSBorislav Petkov #endif /* CONFIG_CPU_SUP_AMD */ 140627f6d22bSBorislav Petkov 140742880f72SAlexander Shishkin static inline int is_pebs_pt(struct perf_event *event) 140842880f72SAlexander Shishkin { 140942880f72SAlexander Shishkin return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT); 141042880f72SAlexander Shishkin } 141142880f72SAlexander Shishkin 141227f6d22bSBorislav Petkov #ifdef CONFIG_CPU_SUP_INTEL 141327f6d22bSBorislav Petkov 141481ec3f3cSJiri Olsa static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) 141527f6d22bSBorislav Petkov { 141667266c10SJiri Olsa struct hw_perf_event *hwc = &event->hw; 141767266c10SJiri Olsa unsigned int hw_event, bts_event; 141827f6d22bSBorislav Petkov 141967266c10SJiri Olsa if (event->attr.freq) 142027f6d22bSBorislav Petkov return false; 142167266c10SJiri Olsa 142267266c10SJiri Olsa hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; 142367266c10SJiri Olsa bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 142467266c10SJiri Olsa 142581ec3f3cSJiri Olsa return hw_event == bts_event && period == 1; 142681ec3f3cSJiri Olsa } 142781ec3f3cSJiri Olsa 142881ec3f3cSJiri Olsa static inline bool intel_pmu_has_bts(struct perf_event *event) 142981ec3f3cSJiri Olsa { 143081ec3f3cSJiri Olsa struct hw_perf_event *hwc = &event->hw; 143181ec3f3cSJiri Olsa 143281ec3f3cSJiri Olsa return intel_pmu_has_bts_period(event, hwc->sample_period); 143327f6d22bSBorislav Petkov } 143427f6d22bSBorislav Petkov 1435c22ac2a3SSong Liu static __always_inline void __intel_pmu_pebs_disable_all(void) 1436c22ac2a3SSong Liu { 1437c22ac2a3SSong Liu wrmsrl(MSR_IA32_PEBS_ENABLE, 0); 1438c22ac2a3SSong Liu } 1439c22ac2a3SSong Liu 1440c22ac2a3SSong Liu static __always_inline void __intel_pmu_arch_lbr_disable(void) 1441c22ac2a3SSong Liu { 1442c22ac2a3SSong Liu wrmsrl(MSR_ARCH_LBR_CTL, 0); 1443c22ac2a3SSong Liu } 1444c22ac2a3SSong Liu 1445c22ac2a3SSong Liu static __always_inline void __intel_pmu_lbr_disable(void) 1446c22ac2a3SSong Liu { 1447c22ac2a3SSong Liu u64 debugctl; 1448c22ac2a3SSong Liu 1449c22ac2a3SSong Liu rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1450c22ac2a3SSong Liu debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); 1451c22ac2a3SSong Liu wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1452c22ac2a3SSong Liu } 1453c22ac2a3SSong Liu 145427f6d22bSBorislav Petkov int intel_pmu_save_and_restart(struct perf_event *event); 145527f6d22bSBorislav Petkov 145627f6d22bSBorislav Petkov struct event_constraint * 145727f6d22bSBorislav Petkov x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 145827f6d22bSBorislav Petkov struct perf_event *event); 145927f6d22bSBorislav Petkov 1460d01b1f96SPeter Zijlstra (Intel) extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu); 1461d01b1f96SPeter Zijlstra (Intel) extern void intel_cpuc_finish(struct cpu_hw_events *cpuc); 146227f6d22bSBorislav Petkov 146327f6d22bSBorislav Petkov int intel_pmu_init(void); 146427f6d22bSBorislav Petkov 146527f6d22bSBorislav Petkov void init_debug_store_on_cpu(int cpu); 146627f6d22bSBorislav Petkov 146727f6d22bSBorislav Petkov void fini_debug_store_on_cpu(int cpu); 146827f6d22bSBorislav Petkov 146927f6d22bSBorislav Petkov void release_ds_buffers(void); 147027f6d22bSBorislav Petkov 147127f6d22bSBorislav Petkov void reserve_ds_buffers(void); 147227f6d22bSBorislav Petkov 1473c085fb87SKan Liang void release_lbr_buffers(void); 1474c085fb87SKan Liang 1475488e13a4SLike Xu void reserve_lbr_buffers(void); 1476488e13a4SLike Xu 147727f6d22bSBorislav Petkov extern struct event_constraint bts_constraint; 1478097e4311SLike Xu extern struct event_constraint vlbr_constraint; 147927f6d22bSBorislav Petkov 148027f6d22bSBorislav Petkov void intel_pmu_enable_bts(u64 config); 148127f6d22bSBorislav Petkov 148227f6d22bSBorislav Petkov void intel_pmu_disable_bts(void); 148327f6d22bSBorislav Petkov 148427f6d22bSBorislav Petkov int intel_pmu_drain_bts_buffer(void); 148527f6d22bSBorislav Petkov 148639a41278SKan Liang u64 adl_latency_data_small(struct perf_event *event, u64 status); 148739a41278SKan Liang 148827f6d22bSBorislav Petkov extern struct event_constraint intel_core2_pebs_event_constraints[]; 148927f6d22bSBorislav Petkov 149027f6d22bSBorislav Petkov extern struct event_constraint intel_atom_pebs_event_constraints[]; 149127f6d22bSBorislav Petkov 149227f6d22bSBorislav Petkov extern struct event_constraint intel_slm_pebs_event_constraints[]; 149327f6d22bSBorislav Petkov 14948b92c3a7SKan Liang extern struct event_constraint intel_glm_pebs_event_constraints[]; 14958b92c3a7SKan Liang 1496dd0b06b5SKan Liang extern struct event_constraint intel_glp_pebs_event_constraints[]; 1497dd0b06b5SKan Liang 1498f83d2f91SKan Liang extern struct event_constraint intel_grt_pebs_event_constraints[]; 1499f83d2f91SKan Liang 150027f6d22bSBorislav Petkov extern struct event_constraint intel_nehalem_pebs_event_constraints[]; 150127f6d22bSBorislav Petkov 150227f6d22bSBorislav Petkov extern struct event_constraint intel_westmere_pebs_event_constraints[]; 150327f6d22bSBorislav Petkov 150427f6d22bSBorislav Petkov extern struct event_constraint intel_snb_pebs_event_constraints[]; 150527f6d22bSBorislav Petkov 150627f6d22bSBorislav Petkov extern struct event_constraint intel_ivb_pebs_event_constraints[]; 150727f6d22bSBorislav Petkov 150827f6d22bSBorislav Petkov extern struct event_constraint intel_hsw_pebs_event_constraints[]; 150927f6d22bSBorislav Petkov 1510b3e62463SStephane Eranian extern struct event_constraint intel_bdw_pebs_event_constraints[]; 1511b3e62463SStephane Eranian 151227f6d22bSBorislav Petkov extern struct event_constraint intel_skl_pebs_event_constraints[]; 151327f6d22bSBorislav Petkov 151460176089SKan Liang extern struct event_constraint intel_icl_pebs_event_constraints[]; 151560176089SKan Liang 151661b985e3SKan Liang extern struct event_constraint intel_spr_pebs_event_constraints[]; 151761b985e3SKan Liang 151827f6d22bSBorislav Petkov struct event_constraint *intel_pebs_constraints(struct perf_event *event); 151927f6d22bSBorislav Petkov 152068f7082fSPeter Zijlstra void intel_pmu_pebs_add(struct perf_event *event); 152168f7082fSPeter Zijlstra 152268f7082fSPeter Zijlstra void intel_pmu_pebs_del(struct perf_event *event); 152368f7082fSPeter Zijlstra 152427f6d22bSBorislav Petkov void intel_pmu_pebs_enable(struct perf_event *event); 152527f6d22bSBorislav Petkov 152627f6d22bSBorislav Petkov void intel_pmu_pebs_disable(struct perf_event *event); 152727f6d22bSBorislav Petkov 152827f6d22bSBorislav Petkov void intel_pmu_pebs_enable_all(void); 152927f6d22bSBorislav Petkov 153027f6d22bSBorislav Petkov void intel_pmu_pebs_disable_all(void); 153127f6d22bSBorislav Petkov 153227f6d22bSBorislav Petkov void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in); 153327f6d22bSBorislav Petkov 15345bee2cc6SKan Liang void intel_pmu_auto_reload_read(struct perf_event *event); 15355bee2cc6SKan Liang 15365624986dSKan Liang void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr); 1537c22497f5SKan Liang 153827f6d22bSBorislav Petkov void intel_ds_init(void); 153927f6d22bSBorislav Petkov 1540421ca868SAlexey Budankov void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev, 1541421ca868SAlexey Budankov struct perf_event_context *next); 1542421ca868SAlexey Budankov 154327f6d22bSBorislav Petkov void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); 154427f6d22bSBorislav Petkov 154519fc9dddSDavid Carrillo-Cisneros u64 lbr_from_signext_quirk_wr(u64 val); 154619fc9dddSDavid Carrillo-Cisneros 154727f6d22bSBorislav Petkov void intel_pmu_lbr_reset(void); 154827f6d22bSBorislav Petkov 15499f354a72SKan Liang void intel_pmu_lbr_reset_32(void); 15509f354a72SKan Liang 15519f354a72SKan Liang void intel_pmu_lbr_reset_64(void); 15529f354a72SKan Liang 155368f7082fSPeter Zijlstra void intel_pmu_lbr_add(struct perf_event *event); 155427f6d22bSBorislav Petkov 155568f7082fSPeter Zijlstra void intel_pmu_lbr_del(struct perf_event *event); 155627f6d22bSBorislav Petkov 155727f6d22bSBorislav Petkov void intel_pmu_lbr_enable_all(bool pmi); 155827f6d22bSBorislav Petkov 155927f6d22bSBorislav Petkov void intel_pmu_lbr_disable_all(void); 156027f6d22bSBorislav Petkov 156127f6d22bSBorislav Petkov void intel_pmu_lbr_read(void); 156227f6d22bSBorislav Petkov 1563c301b1d8SKan Liang void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc); 1564c301b1d8SKan Liang 1565c301b1d8SKan Liang void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc); 1566c301b1d8SKan Liang 1567799571bfSKan Liang void intel_pmu_lbr_save(void *ctx); 1568799571bfSKan Liang 1569799571bfSKan Liang void intel_pmu_lbr_restore(void *ctx); 1570799571bfSKan Liang 157127f6d22bSBorislav Petkov void intel_pmu_lbr_init_core(void); 157227f6d22bSBorislav Petkov 157327f6d22bSBorislav Petkov void intel_pmu_lbr_init_nhm(void); 157427f6d22bSBorislav Petkov 157527f6d22bSBorislav Petkov void intel_pmu_lbr_init_atom(void); 157627f6d22bSBorislav Petkov 1577f21d5adcSKan Liang void intel_pmu_lbr_init_slm(void); 1578f21d5adcSKan Liang 157927f6d22bSBorislav Petkov void intel_pmu_lbr_init_snb(void); 158027f6d22bSBorislav Petkov 158127f6d22bSBorislav Petkov void intel_pmu_lbr_init_hsw(void); 158227f6d22bSBorislav Petkov 158327f6d22bSBorislav Petkov void intel_pmu_lbr_init_skl(void); 158427f6d22bSBorislav Petkov 158527f6d22bSBorislav Petkov void intel_pmu_lbr_init_knl(void); 158627f6d22bSBorislav Petkov 15871ac7fd81SPeter Zijlstra (Intel) void intel_pmu_lbr_init(void); 15881ac7fd81SPeter Zijlstra (Intel) 158947125db2SKan Liang void intel_pmu_arch_lbr_init(void); 159047125db2SKan Liang 1591e17dc653SAndi Kleen void intel_pmu_pebs_data_source_nhm(void); 1592e17dc653SAndi Kleen 15936ae5fa61SAndi Kleen void intel_pmu_pebs_data_source_skl(bool pmem); 15946ae5fa61SAndi Kleen 1595ccf170e9SKan Liang void intel_pmu_pebs_data_source_adl(void); 1596ccf170e9SKan Liang 159727f6d22bSBorislav Petkov int intel_pmu_setup_lbr_filter(struct perf_event *event); 159827f6d22bSBorislav Petkov 159927f6d22bSBorislav Petkov void intel_pt_interrupt(void); 160027f6d22bSBorislav Petkov 160127f6d22bSBorislav Petkov int intel_bts_interrupt(void); 160227f6d22bSBorislav Petkov 160327f6d22bSBorislav Petkov void intel_bts_enable_local(void); 160427f6d22bSBorislav Petkov 160527f6d22bSBorislav Petkov void intel_bts_disable_local(void); 160627f6d22bSBorislav Petkov 160727f6d22bSBorislav Petkov int p4_pmu_init(void); 160827f6d22bSBorislav Petkov 160927f6d22bSBorislav Petkov int p6_pmu_init(void); 161027f6d22bSBorislav Petkov 161127f6d22bSBorislav Petkov int knc_pmu_init(void); 161227f6d22bSBorislav Petkov 161327f6d22bSBorislav Petkov static inline int is_ht_workaround_enabled(void) 161427f6d22bSBorislav Petkov { 161527f6d22bSBorislav Petkov return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); 161627f6d22bSBorislav Petkov } 161727f6d22bSBorislav Petkov 161827f6d22bSBorislav Petkov #else /* CONFIG_CPU_SUP_INTEL */ 161927f6d22bSBorislav Petkov 162027f6d22bSBorislav Petkov static inline void reserve_ds_buffers(void) 162127f6d22bSBorislav Petkov { 162227f6d22bSBorislav Petkov } 162327f6d22bSBorislav Petkov 162427f6d22bSBorislav Petkov static inline void release_ds_buffers(void) 162527f6d22bSBorislav Petkov { 162627f6d22bSBorislav Petkov } 162727f6d22bSBorislav Petkov 1628c085fb87SKan Liang static inline void release_lbr_buffers(void) 1629c085fb87SKan Liang { 1630c085fb87SKan Liang } 1631c085fb87SKan Liang 1632488e13a4SLike Xu static inline void reserve_lbr_buffers(void) 1633488e13a4SLike Xu { 1634488e13a4SLike Xu } 1635488e13a4SLike Xu 163627f6d22bSBorislav Petkov static inline int intel_pmu_init(void) 163727f6d22bSBorislav Petkov { 163827f6d22bSBorislav Petkov return 0; 163927f6d22bSBorislav Petkov } 164027f6d22bSBorislav Petkov 1641f764c58bSPeter Zijlstra static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) 164227f6d22bSBorislav Petkov { 1643d01b1f96SPeter Zijlstra (Intel) return 0; 1644d01b1f96SPeter Zijlstra (Intel) } 1645d01b1f96SPeter Zijlstra (Intel) 1646f764c58bSPeter Zijlstra static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc) 1647d01b1f96SPeter Zijlstra (Intel) { 164827f6d22bSBorislav Petkov } 164927f6d22bSBorislav Petkov 165027f6d22bSBorislav Petkov static inline int is_ht_workaround_enabled(void) 165127f6d22bSBorislav Petkov { 165227f6d22bSBorislav Petkov return 0; 165327f6d22bSBorislav Petkov } 165427f6d22bSBorislav Petkov #endif /* CONFIG_CPU_SUP_INTEL */ 16553a4ac121SCodyYao-oc 16563a4ac121SCodyYao-oc #if ((defined CONFIG_CPU_SUP_CENTAUR) || (defined CONFIG_CPU_SUP_ZHAOXIN)) 16573a4ac121SCodyYao-oc int zhaoxin_pmu_init(void); 16583a4ac121SCodyYao-oc #else 16593a4ac121SCodyYao-oc static inline int zhaoxin_pmu_init(void) 16603a4ac121SCodyYao-oc { 16613a4ac121SCodyYao-oc return 0; 16623a4ac121SCodyYao-oc } 16633a4ac121SCodyYao-oc #endif /*CONFIG_CPU_SUP_CENTAUR or CONFIG_CPU_SUP_ZHAOXIN*/ 1664