127f6d22bSBorislav Petkov /* 227f6d22bSBorislav Petkov * Performance events x86 architecture header 327f6d22bSBorislav Petkov * 427f6d22bSBorislav Petkov * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 527f6d22bSBorislav Petkov * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 627f6d22bSBorislav Petkov * Copyright (C) 2009 Jaswinder Singh Rajput 727f6d22bSBorislav Petkov * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 827f6d22bSBorislav Petkov * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra 927f6d22bSBorislav Petkov * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 1027f6d22bSBorislav Petkov * Copyright (C) 2009 Google, Inc., Stephane Eranian 1127f6d22bSBorislav Petkov * 1227f6d22bSBorislav Petkov * For licencing details see kernel-base/COPYING 1327f6d22bSBorislav Petkov */ 1427f6d22bSBorislav Petkov 1527f6d22bSBorislav Petkov #include <linux/perf_event.h> 1627f6d22bSBorislav Petkov 1710043e02SThomas Gleixner #include <asm/intel_ds.h> 1810043e02SThomas Gleixner 1927f6d22bSBorislav Petkov /* To enable MSR tracing please use the generic trace points. */ 2027f6d22bSBorislav Petkov 2127f6d22bSBorislav Petkov /* 2227f6d22bSBorislav Petkov * | NHM/WSM | SNB | 2327f6d22bSBorislav Petkov * register ------------------------------- 2427f6d22bSBorislav Petkov * | HT | no HT | HT | no HT | 2527f6d22bSBorislav Petkov *----------------------------------------- 2627f6d22bSBorislav Petkov * offcore | core | core | cpu | core | 2727f6d22bSBorislav Petkov * lbr_sel | core | core | cpu | core | 2827f6d22bSBorislav Petkov * ld_lat | cpu | core | cpu | core | 2927f6d22bSBorislav Petkov *----------------------------------------- 3027f6d22bSBorislav Petkov * 3127f6d22bSBorislav Petkov * Given that there is a small number of shared regs, 3227f6d22bSBorislav Petkov * we can pre-allocate their slot in the per-cpu 3327f6d22bSBorislav Petkov * per-core reg tables. 3427f6d22bSBorislav Petkov */ 3527f6d22bSBorislav Petkov enum extra_reg_type { 3627f6d22bSBorislav Petkov EXTRA_REG_NONE = -1, /* not used */ 3727f6d22bSBorislav Petkov 3827f6d22bSBorislav Petkov EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ 3927f6d22bSBorislav Petkov EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ 4027f6d22bSBorislav Petkov EXTRA_REG_LBR = 2, /* lbr_select */ 4127f6d22bSBorislav Petkov EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ 4227f6d22bSBorislav Petkov EXTRA_REG_FE = 4, /* fe_* */ 4327f6d22bSBorislav Petkov 4427f6d22bSBorislav Petkov EXTRA_REG_MAX /* number of entries needed */ 4527f6d22bSBorislav Petkov }; 4627f6d22bSBorislav Petkov 4727f6d22bSBorislav Petkov struct event_constraint { 4827f6d22bSBorislav Petkov union { 4927f6d22bSBorislav Petkov unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 5027f6d22bSBorislav Petkov u64 idxmsk64; 5127f6d22bSBorislav Petkov }; 5227f6d22bSBorislav Petkov u64 code; 5327f6d22bSBorislav Petkov u64 cmask; 5427f6d22bSBorislav Petkov int weight; 5527f6d22bSBorislav Petkov int overlap; 5627f6d22bSBorislav Petkov int flags; 5763b79f6eSPeter Zijlstra unsigned int size; 5827f6d22bSBorislav Petkov }; 591f6a1e2dSPeter Zijlstra 6063b79f6eSPeter Zijlstra static inline bool constraint_match(struct event_constraint *c, u64 ecode) 6163b79f6eSPeter Zijlstra { 6263b79f6eSPeter Zijlstra return ((ecode & c->cmask) - c->code) <= (u64)c->size; 6363b79f6eSPeter Zijlstra } 6463b79f6eSPeter Zijlstra 6527f6d22bSBorislav Petkov /* 6627f6d22bSBorislav Petkov * struct hw_perf_event.flags flags 6727f6d22bSBorislav Petkov */ 6827f6d22bSBorislav Petkov #define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */ 6927f6d22bSBorislav Petkov #define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */ 7027f6d22bSBorislav Petkov #define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */ 711f6a1e2dSPeter Zijlstra #define PERF_X86_EVENT_PEBS_LD_HSW 0x0008 /* haswell style datala, load */ 721f6a1e2dSPeter Zijlstra #define PERF_X86_EVENT_PEBS_NA_HSW 0x0010 /* haswell style datala, unknown */ 731f6a1e2dSPeter Zijlstra #define PERF_X86_EVENT_EXCL 0x0020 /* HT exclusivity on counter */ 741f6a1e2dSPeter Zijlstra #define PERF_X86_EVENT_DYNAMIC 0x0040 /* dynamic alloc'd constraint */ 751f6a1e2dSPeter Zijlstra #define PERF_X86_EVENT_RDPMC_ALLOWED 0x0080 /* grant rdpmc permission */ 761f6a1e2dSPeter Zijlstra #define PERF_X86_EVENT_EXCL_ACCT 0x0100 /* accounted EXCL event */ 771f6a1e2dSPeter Zijlstra #define PERF_X86_EVENT_AUTO_RELOAD 0x0200 /* use PEBS auto-reload */ 781f6a1e2dSPeter Zijlstra #define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */ 7942880f72SAlexander Shishkin #define PERF_X86_EVENT_PEBS_VIA_PT 0x0800 /* use PT buffer for PEBS */ 80471af006SKim Phillips #define PERF_X86_EVENT_PAIR 0x1000 /* Large Increment per Cycle */ 81e1ad1ac2SLike Xu #define PERF_X86_EVENT_LBR_SELECT 0x2000 /* Save/Restore MSR_LBR_SELECT */ 827b2c05a1SKan Liang #define PERF_X86_EVENT_TOPDOWN 0x4000 /* Count Topdown slots/metrics events */ 8361b985e3SKan Liang #define PERF_X86_EVENT_PEBS_STLAT 0x8000 /* st+stlat data address sampling */ 847b2c05a1SKan Liang 857b2c05a1SKan Liang static inline bool is_topdown_count(struct perf_event *event) 867b2c05a1SKan Liang { 877b2c05a1SKan Liang return event->hw.flags & PERF_X86_EVENT_TOPDOWN; 887b2c05a1SKan Liang } 897b2c05a1SKan Liang 907b2c05a1SKan Liang static inline bool is_metric_event(struct perf_event *event) 917b2c05a1SKan Liang { 927b2c05a1SKan Liang u64 config = event->attr.config; 937b2c05a1SKan Liang 947b2c05a1SKan Liang return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) && 957b2c05a1SKan Liang ((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING) && 967b2c05a1SKan Liang ((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX); 977b2c05a1SKan Liang } 987b2c05a1SKan Liang 997b2c05a1SKan Liang static inline bool is_slots_event(struct perf_event *event) 1007b2c05a1SKan Liang { 1017b2c05a1SKan Liang return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS; 1027b2c05a1SKan Liang } 1037b2c05a1SKan Liang 1047b2c05a1SKan Liang static inline bool is_topdown_event(struct perf_event *event) 1057b2c05a1SKan Liang { 1067b2c05a1SKan Liang return is_metric_event(event) || is_slots_event(event); 1077b2c05a1SKan Liang } 10827f6d22bSBorislav Petkov 10927f6d22bSBorislav Petkov struct amd_nb { 11027f6d22bSBorislav Petkov int nb_id; /* NorthBridge id */ 11127f6d22bSBorislav Petkov int refcnt; /* reference count */ 11227f6d22bSBorislav Petkov struct perf_event *owners[X86_PMC_IDX_MAX]; 11327f6d22bSBorislav Petkov struct event_constraint event_constraints[X86_PMC_IDX_MAX]; 11427f6d22bSBorislav Petkov }; 11527f6d22bSBorislav Petkov 116fd583ad1SKan Liang #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1) 11742880f72SAlexander Shishkin #define PEBS_PMI_AFTER_EACH_RECORD BIT_ULL(60) 11842880f72SAlexander Shishkin #define PEBS_OUTPUT_OFFSET 61 11942880f72SAlexander Shishkin #define PEBS_OUTPUT_MASK (3ull << PEBS_OUTPUT_OFFSET) 12042880f72SAlexander Shishkin #define PEBS_OUTPUT_PT (1ull << PEBS_OUTPUT_OFFSET) 12142880f72SAlexander Shishkin #define PEBS_VIA_PT_MASK (PEBS_OUTPUT_PT | PEBS_PMI_AFTER_EACH_RECORD) 12227f6d22bSBorislav Petkov 12327f6d22bSBorislav Petkov /* 12427f6d22bSBorislav Petkov * Flags PEBS can handle without an PMI. 12527f6d22bSBorislav Petkov * 12627f6d22bSBorislav Petkov * TID can only be handled by flushing at context switch. 1272fe1bc1fSAndi Kleen * REGS_USER can be handled for events limited to ring 3. 12827f6d22bSBorislav Petkov * 12927f6d22bSBorislav Petkov */ 130174afc3eSKan Liang #define LARGE_PEBS_FLAGS \ 13127f6d22bSBorislav Petkov (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \ 13227f6d22bSBorislav Petkov PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ 13327f6d22bSBorislav Petkov PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ 1342fe1bc1fSAndi Kleen PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \ 13511974914SJiri Olsa PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \ 136995f088eSStephane Eranian PERF_SAMPLE_PERIOD | PERF_SAMPLE_CODE_PAGE_SIZE) 13727f6d22bSBorislav Petkov 1389d5dcc93SKan Liang #define PEBS_GP_REGS \ 1399d5dcc93SKan Liang ((1ULL << PERF_REG_X86_AX) | \ 1409d5dcc93SKan Liang (1ULL << PERF_REG_X86_BX) | \ 1419d5dcc93SKan Liang (1ULL << PERF_REG_X86_CX) | \ 1429d5dcc93SKan Liang (1ULL << PERF_REG_X86_DX) | \ 1439d5dcc93SKan Liang (1ULL << PERF_REG_X86_DI) | \ 1449d5dcc93SKan Liang (1ULL << PERF_REG_X86_SI) | \ 1459d5dcc93SKan Liang (1ULL << PERF_REG_X86_SP) | \ 1469d5dcc93SKan Liang (1ULL << PERF_REG_X86_BP) | \ 1479d5dcc93SKan Liang (1ULL << PERF_REG_X86_IP) | \ 1489d5dcc93SKan Liang (1ULL << PERF_REG_X86_FLAGS) | \ 1499d5dcc93SKan Liang (1ULL << PERF_REG_X86_R8) | \ 1509d5dcc93SKan Liang (1ULL << PERF_REG_X86_R9) | \ 1519d5dcc93SKan Liang (1ULL << PERF_REG_X86_R10) | \ 1529d5dcc93SKan Liang (1ULL << PERF_REG_X86_R11) | \ 1539d5dcc93SKan Liang (1ULL << PERF_REG_X86_R12) | \ 1549d5dcc93SKan Liang (1ULL << PERF_REG_X86_R13) | \ 1559d5dcc93SKan Liang (1ULL << PERF_REG_X86_R14) | \ 1569d5dcc93SKan Liang (1ULL << PERF_REG_X86_R15)) 1572fe1bc1fSAndi Kleen 15827f6d22bSBorislav Petkov /* 15927f6d22bSBorislav Petkov * Per register state. 16027f6d22bSBorislav Petkov */ 16127f6d22bSBorislav Petkov struct er_account { 16227f6d22bSBorislav Petkov raw_spinlock_t lock; /* per-core: protect structure */ 16327f6d22bSBorislav Petkov u64 config; /* extra MSR config */ 16427f6d22bSBorislav Petkov u64 reg; /* extra MSR number */ 16527f6d22bSBorislav Petkov atomic_t ref; /* reference count */ 16627f6d22bSBorislav Petkov }; 16727f6d22bSBorislav Petkov 16827f6d22bSBorislav Petkov /* 16927f6d22bSBorislav Petkov * Per core/cpu state 17027f6d22bSBorislav Petkov * 17127f6d22bSBorislav Petkov * Used to coordinate shared registers between HT threads or 17227f6d22bSBorislav Petkov * among events on a single PMU. 17327f6d22bSBorislav Petkov */ 17427f6d22bSBorislav Petkov struct intel_shared_regs { 17527f6d22bSBorislav Petkov struct er_account regs[EXTRA_REG_MAX]; 17627f6d22bSBorislav Petkov int refcnt; /* per-core: #HT threads */ 17727f6d22bSBorislav Petkov unsigned core_id; /* per-core: core id */ 17827f6d22bSBorislav Petkov }; 17927f6d22bSBorislav Petkov 18027f6d22bSBorislav Petkov enum intel_excl_state_type { 18127f6d22bSBorislav Petkov INTEL_EXCL_UNUSED = 0, /* counter is unused */ 18227f6d22bSBorislav Petkov INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */ 18327f6d22bSBorislav Petkov INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */ 18427f6d22bSBorislav Petkov }; 18527f6d22bSBorislav Petkov 18627f6d22bSBorislav Petkov struct intel_excl_states { 18727f6d22bSBorislav Petkov enum intel_excl_state_type state[X86_PMC_IDX_MAX]; 18827f6d22bSBorislav Petkov bool sched_started; /* true if scheduling has started */ 18927f6d22bSBorislav Petkov }; 19027f6d22bSBorislav Petkov 19127f6d22bSBorislav Petkov struct intel_excl_cntrs { 19227f6d22bSBorislav Petkov raw_spinlock_t lock; 19327f6d22bSBorislav Petkov 19427f6d22bSBorislav Petkov struct intel_excl_states states[2]; 19527f6d22bSBorislav Petkov 19627f6d22bSBorislav Petkov union { 19727f6d22bSBorislav Petkov u16 has_exclusive[2]; 19827f6d22bSBorislav Petkov u32 exclusive_present; 19927f6d22bSBorislav Petkov }; 20027f6d22bSBorislav Petkov 20127f6d22bSBorislav Petkov int refcnt; /* per-core: #HT threads */ 20227f6d22bSBorislav Petkov unsigned core_id; /* per-core: core id */ 20327f6d22bSBorislav Petkov }; 20427f6d22bSBorislav Petkov 2058b077e4aSKan Liang struct x86_perf_task_context; 20627f6d22bSBorislav Petkov #define MAX_LBR_ENTRIES 32 20727f6d22bSBorislav Petkov 20827f6d22bSBorislav Petkov enum { 2099f354a72SKan Liang LBR_FORMAT_32 = 0x00, 2109f354a72SKan Liang LBR_FORMAT_LIP = 0x01, 2119f354a72SKan Liang LBR_FORMAT_EIP = 0x02, 2129f354a72SKan Liang LBR_FORMAT_EIP_FLAGS = 0x03, 2139f354a72SKan Liang LBR_FORMAT_EIP_FLAGS2 = 0x04, 2149f354a72SKan Liang LBR_FORMAT_INFO = 0x05, 2159f354a72SKan Liang LBR_FORMAT_TIME = 0x06, 2169f354a72SKan Liang LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME, 2179f354a72SKan Liang }; 2189f354a72SKan Liang 2199f354a72SKan Liang enum { 22027f6d22bSBorislav Petkov X86_PERF_KFREE_SHARED = 0, 22127f6d22bSBorislav Petkov X86_PERF_KFREE_EXCL = 1, 22227f6d22bSBorislav Petkov X86_PERF_KFREE_MAX 22327f6d22bSBorislav Petkov }; 22427f6d22bSBorislav Petkov 22527f6d22bSBorislav Petkov struct cpu_hw_events { 22627f6d22bSBorislav Petkov /* 22727f6d22bSBorislav Petkov * Generic x86 PMC bits 22827f6d22bSBorislav Petkov */ 22927f6d22bSBorislav Petkov struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ 23027f6d22bSBorislav Petkov unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 23127f6d22bSBorislav Petkov int enabled; 23227f6d22bSBorislav Petkov 23327f6d22bSBorislav Petkov int n_events; /* the # of events in the below arrays */ 23427f6d22bSBorislav Petkov int n_added; /* the # last events in the below arrays; 23527f6d22bSBorislav Petkov they've never been enabled yet */ 23627f6d22bSBorislav Petkov int n_txn; /* the # last events in the below arrays; 23727f6d22bSBorislav Petkov added in the current transaction */ 238871a93b0SPeter Zijlstra int n_txn_pair; 2393dbde695SPeter Zijlstra int n_txn_metric; 24027f6d22bSBorislav Petkov int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ 24127f6d22bSBorislav Petkov u64 tags[X86_PMC_IDX_MAX]; 24227f6d22bSBorislav Petkov 24327f6d22bSBorislav Petkov struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ 24427f6d22bSBorislav Petkov struct event_constraint *event_constraint[X86_PMC_IDX_MAX]; 24527f6d22bSBorislav Petkov 24627f6d22bSBorislav Petkov int n_excl; /* the number of exclusive events */ 24727f6d22bSBorislav Petkov 24827f6d22bSBorislav Petkov unsigned int txn_flags; 24927f6d22bSBorislav Petkov int is_fake; 25027f6d22bSBorislav Petkov 25127f6d22bSBorislav Petkov /* 25227f6d22bSBorislav Petkov * Intel DebugStore bits 25327f6d22bSBorislav Petkov */ 25427f6d22bSBorislav Petkov struct debug_store *ds; 255c1961a46SHugh Dickins void *ds_pebs_vaddr; 256c1961a46SHugh Dickins void *ds_bts_vaddr; 25727f6d22bSBorislav Petkov u64 pebs_enabled; 25809e61b4fSPeter Zijlstra int n_pebs; 25909e61b4fSPeter Zijlstra int n_large_pebs; 26042880f72SAlexander Shishkin int n_pebs_via_pt; 26142880f72SAlexander Shishkin int pebs_output; 26227f6d22bSBorislav Petkov 263c22497f5SKan Liang /* Current super set of events hardware configuration */ 264c22497f5SKan Liang u64 pebs_data_cfg; 265c22497f5SKan Liang u64 active_pebs_data_cfg; 266c22497f5SKan Liang int pebs_record_size; 267c22497f5SKan Liang 26827f6d22bSBorislav Petkov /* 26927f6d22bSBorislav Petkov * Intel LBR bits 27027f6d22bSBorislav Petkov */ 27127f6d22bSBorislav Petkov int lbr_users; 272d3617b98SAndi Kleen int lbr_pebs_users; 27327f6d22bSBorislav Petkov struct perf_branch_stack lbr_stack; 27427f6d22bSBorislav Petkov struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; 27549d8184fSKan Liang union { 27627f6d22bSBorislav Petkov struct er_account *lbr_sel; 27749d8184fSKan Liang struct er_account *lbr_ctl; 27849d8184fSKan Liang }; 27927f6d22bSBorislav Petkov u64 br_sel; 280f42be865SKan Liang void *last_task_ctx; 2818b077e4aSKan Liang int last_log_id; 282e1ad1ac2SLike Xu int lbr_select; 283c085fb87SKan Liang void *lbr_xsave; 28427f6d22bSBorislav Petkov 28527f6d22bSBorislav Petkov /* 28627f6d22bSBorislav Petkov * Intel host/guest exclude bits 28727f6d22bSBorislav Petkov */ 28827f6d22bSBorislav Petkov u64 intel_ctrl_guest_mask; 28927f6d22bSBorislav Petkov u64 intel_ctrl_host_mask; 29027f6d22bSBorislav Petkov struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; 29127f6d22bSBorislav Petkov 29227f6d22bSBorislav Petkov /* 29327f6d22bSBorislav Petkov * Intel checkpoint mask 29427f6d22bSBorislav Petkov */ 29527f6d22bSBorislav Petkov u64 intel_cp_status; 29627f6d22bSBorislav Petkov 29727f6d22bSBorislav Petkov /* 29827f6d22bSBorislav Petkov * manage shared (per-core, per-cpu) registers 29927f6d22bSBorislav Petkov * used on Intel NHM/WSM/SNB 30027f6d22bSBorislav Petkov */ 30127f6d22bSBorislav Petkov struct intel_shared_regs *shared_regs; 30227f6d22bSBorislav Petkov /* 30327f6d22bSBorislav Petkov * manage exclusive counter access between hyperthread 30427f6d22bSBorislav Petkov */ 30527f6d22bSBorislav Petkov struct event_constraint *constraint_list; /* in enable order */ 30627f6d22bSBorislav Petkov struct intel_excl_cntrs *excl_cntrs; 30727f6d22bSBorislav Petkov int excl_thread_id; /* 0 or 1 */ 30827f6d22bSBorislav Petkov 30927f6d22bSBorislav Petkov /* 310400816f6SPeter Zijlstra (Intel) * SKL TSX_FORCE_ABORT shadow 311400816f6SPeter Zijlstra (Intel) */ 312400816f6SPeter Zijlstra (Intel) u64 tfa_shadow; 313400816f6SPeter Zijlstra (Intel) 314400816f6SPeter Zijlstra (Intel) /* 3157b2c05a1SKan Liang * Perf Metrics 3167b2c05a1SKan Liang */ 3177b2c05a1SKan Liang /* number of accepted metrics events */ 3187b2c05a1SKan Liang int n_metric; 3197b2c05a1SKan Liang 3207b2c05a1SKan Liang /* 32127f6d22bSBorislav Petkov * AMD specific bits 32227f6d22bSBorislav Petkov */ 32327f6d22bSBorislav Petkov struct amd_nb *amd_nb; 32427f6d22bSBorislav Petkov /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ 32527f6d22bSBorislav Petkov u64 perf_ctr_virt_mask; 32657388912SKim Phillips int n_pair; /* Large increment events */ 32727f6d22bSBorislav Petkov 32827f6d22bSBorislav Petkov void *kfree_on_online[X86_PERF_KFREE_MAX]; 32961e76d53SKan Liang 33061e76d53SKan Liang struct pmu *pmu; 33127f6d22bSBorislav Petkov }; 33227f6d22bSBorislav Petkov 33363b79f6eSPeter Zijlstra #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \ 33427f6d22bSBorislav Petkov { .idxmsk64 = (n) }, \ 33527f6d22bSBorislav Petkov .code = (c), \ 33663b79f6eSPeter Zijlstra .size = (e) - (c), \ 33727f6d22bSBorislav Petkov .cmask = (m), \ 33827f6d22bSBorislav Petkov .weight = (w), \ 33927f6d22bSBorislav Petkov .overlap = (o), \ 34027f6d22bSBorislav Petkov .flags = f, \ 34127f6d22bSBorislav Petkov } 34227f6d22bSBorislav Petkov 34363b79f6eSPeter Zijlstra #define __EVENT_CONSTRAINT(c, n, m, w, o, f) \ 34463b79f6eSPeter Zijlstra __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f) 34563b79f6eSPeter Zijlstra 34627f6d22bSBorislav Petkov #define EVENT_CONSTRAINT(c, n, m) \ 34727f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) 34827f6d22bSBorislav Petkov 34963b79f6eSPeter Zijlstra /* 35063b79f6eSPeter Zijlstra * The constraint_match() function only works for 'simple' event codes 35163b79f6eSPeter Zijlstra * and not for extended (AMD64_EVENTSEL_EVENT) events codes. 35263b79f6eSPeter Zijlstra */ 35363b79f6eSPeter Zijlstra #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \ 35463b79f6eSPeter Zijlstra __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0) 35563b79f6eSPeter Zijlstra 35627f6d22bSBorislav Petkov #define INTEL_EXCLEVT_CONSTRAINT(c, n) \ 35727f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\ 35827f6d22bSBorislav Petkov 0, PERF_X86_EVENT_EXCL) 35927f6d22bSBorislav Petkov 36027f6d22bSBorislav Petkov /* 36127f6d22bSBorislav Petkov * The overlap flag marks event constraints with overlapping counter 36227f6d22bSBorislav Petkov * masks. This is the case if the counter mask of such an event is not 36327f6d22bSBorislav Petkov * a subset of any other counter mask of a constraint with an equal or 36427f6d22bSBorislav Petkov * higher weight, e.g.: 36527f6d22bSBorislav Petkov * 36627f6d22bSBorislav Petkov * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); 36727f6d22bSBorislav Petkov * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); 36827f6d22bSBorislav Petkov * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); 36927f6d22bSBorislav Petkov * 37027f6d22bSBorislav Petkov * The event scheduler may not select the correct counter in the first 37127f6d22bSBorislav Petkov * cycle because it needs to know which subsequent events will be 37227f6d22bSBorislav Petkov * scheduled. It may fail to schedule the events then. So we set the 37327f6d22bSBorislav Petkov * overlap flag for such constraints to give the scheduler a hint which 37427f6d22bSBorislav Petkov * events to select for counter rescheduling. 37527f6d22bSBorislav Petkov * 37627f6d22bSBorislav Petkov * Care must be taken as the rescheduling algorithm is O(n!) which 37700f52685SIngo Molnar * will increase scheduling cycles for an over-committed system 37827f6d22bSBorislav Petkov * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros 37927f6d22bSBorislav Petkov * and its counter masks must be kept at a minimum. 38027f6d22bSBorislav Petkov */ 38127f6d22bSBorislav Petkov #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ 38227f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0) 38327f6d22bSBorislav Petkov 38427f6d22bSBorislav Petkov /* 38527f6d22bSBorislav Petkov * Constraint on the Event code. 38627f6d22bSBorislav Petkov */ 38727f6d22bSBorislav Petkov #define INTEL_EVENT_CONSTRAINT(c, n) \ 38827f6d22bSBorislav Petkov EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) 38927f6d22bSBorislav Petkov 39027f6d22bSBorislav Petkov /* 39163b79f6eSPeter Zijlstra * Constraint on a range of Event codes 39263b79f6eSPeter Zijlstra */ 39363b79f6eSPeter Zijlstra #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \ 39463b79f6eSPeter Zijlstra EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT) 39563b79f6eSPeter Zijlstra 39663b79f6eSPeter Zijlstra /* 39727f6d22bSBorislav Petkov * Constraint on the Event code + UMask + fixed-mask 39827f6d22bSBorislav Petkov * 39927f6d22bSBorislav Petkov * filter mask to validate fixed counter events. 40027f6d22bSBorislav Petkov * the following filters disqualify for fixed counters: 40127f6d22bSBorislav Petkov * - inv 40227f6d22bSBorislav Petkov * - edge 40327f6d22bSBorislav Petkov * - cnt-mask 40427f6d22bSBorislav Petkov * - in_tx 40527f6d22bSBorislav Petkov * - in_tx_checkpointed 40627f6d22bSBorislav Petkov * The other filters are supported by fixed counters. 40727f6d22bSBorislav Petkov * The any-thread option is supported starting with v3. 40827f6d22bSBorislav Petkov */ 40927f6d22bSBorislav Petkov #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED) 41027f6d22bSBorislav Petkov #define FIXED_EVENT_CONSTRAINT(c, n) \ 41127f6d22bSBorislav Petkov EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) 41227f6d22bSBorislav Petkov 41327f6d22bSBorislav Petkov /* 41459a854e2SKan Liang * The special metric counters do not actually exist. They are calculated from 41559a854e2SKan Liang * the combination of the FxCtr3 + MSR_PERF_METRICS. 41659a854e2SKan Liang * 41759a854e2SKan Liang * The special metric counters are mapped to a dummy offset for the scheduler. 41859a854e2SKan Liang * The sharing between multiple users of the same metric without multiplexing 41959a854e2SKan Liang * is not allowed, even though the hardware supports that in principle. 42059a854e2SKan Liang */ 42159a854e2SKan Liang 42259a854e2SKan Liang #define METRIC_EVENT_CONSTRAINT(c, n) \ 42359a854e2SKan Liang EVENT_CONSTRAINT(c, (1ULL << (INTEL_PMC_IDX_METRIC_BASE + n)), \ 42459a854e2SKan Liang INTEL_ARCH_EVENT_MASK) 42559a854e2SKan Liang 42659a854e2SKan Liang /* 42727f6d22bSBorislav Petkov * Constraint on the Event code + UMask 42827f6d22bSBorislav Petkov */ 42927f6d22bSBorislav Petkov #define INTEL_UEVENT_CONSTRAINT(c, n) \ 43027f6d22bSBorislav Petkov EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) 43127f6d22bSBorislav Petkov 43227f6d22bSBorislav Petkov /* Constraint on specific umask bit only + event */ 43327f6d22bSBorislav Petkov #define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \ 43427f6d22bSBorislav Petkov EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c)) 43527f6d22bSBorislav Petkov 43627f6d22bSBorislav Petkov /* Like UEVENT_CONSTRAINT, but match flags too */ 43727f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \ 43827f6d22bSBorislav Petkov EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) 43927f6d22bSBorislav Petkov 44027f6d22bSBorislav Petkov #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \ 44127f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ 44227f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_EXCL) 44327f6d22bSBorislav Petkov 44427f6d22bSBorislav Petkov #define INTEL_PLD_CONSTRAINT(c, n) \ 44527f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 44627f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) 44727f6d22bSBorislav Petkov 44861b985e3SKan Liang #define INTEL_PSD_CONSTRAINT(c, n) \ 44961b985e3SKan Liang __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 45061b985e3SKan Liang HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_STLAT) 45161b985e3SKan Liang 45227f6d22bSBorislav Petkov #define INTEL_PST_CONSTRAINT(c, n) \ 45327f6d22bSBorislav Petkov __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 45427f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) 45527f6d22bSBorislav Petkov 45627f6d22bSBorislav Petkov /* Event constraint, but match on all event flags too. */ 45727f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ 4586b89d4c1SStephane Eranian EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) 45927f6d22bSBorislav Petkov 46063b79f6eSPeter Zijlstra #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \ 4616b89d4c1SStephane Eranian EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) 46263b79f6eSPeter Zijlstra 46327f6d22bSBorislav Petkov /* Check only flags, but allow all event/umask */ 46427f6d22bSBorislav Petkov #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ 46527f6d22bSBorislav Petkov EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) 46627f6d22bSBorislav Petkov 46727f6d22bSBorislav Petkov /* Check flags and event code, and set the HSW store flag */ 46827f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \ 46927f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 47027f6d22bSBorislav Petkov ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ 47127f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) 47227f6d22bSBorislav Petkov 47327f6d22bSBorislav Petkov /* Check flags and event code, and set the HSW load flag */ 47427f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \ 47527f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 47627f6d22bSBorislav Petkov ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ 47727f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) 47827f6d22bSBorislav Petkov 47963b79f6eSPeter Zijlstra #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \ 48063b79f6eSPeter Zijlstra __EVENT_CONSTRAINT_RANGE(code, end, n, \ 48163b79f6eSPeter Zijlstra ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ 48263b79f6eSPeter Zijlstra HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) 48363b79f6eSPeter Zijlstra 48427f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \ 48527f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 48627f6d22bSBorislav Petkov ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ 48727f6d22bSBorislav Petkov HWEIGHT(n), 0, \ 48827f6d22bSBorislav Petkov PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) 48927f6d22bSBorislav Petkov 49027f6d22bSBorislav Petkov /* Check flags and event code/umask, and set the HSW store flag */ 49127f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \ 49227f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 49327f6d22bSBorislav Petkov INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 49427f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) 49527f6d22bSBorislav Petkov 49627f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \ 49727f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 49827f6d22bSBorislav Petkov INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 49927f6d22bSBorislav Petkov HWEIGHT(n), 0, \ 50027f6d22bSBorislav Petkov PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL) 50127f6d22bSBorislav Petkov 50227f6d22bSBorislav Petkov /* Check flags and event code/umask, and set the HSW load flag */ 50327f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \ 50427f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 50527f6d22bSBorislav Petkov INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 50627f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) 50727f6d22bSBorislav Petkov 50827f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \ 50927f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 51027f6d22bSBorislav Petkov INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 51127f6d22bSBorislav Petkov HWEIGHT(n), 0, \ 51227f6d22bSBorislav Petkov PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) 51327f6d22bSBorislav Petkov 51427f6d22bSBorislav Petkov /* Check flags and event code/umask, and set the HSW N/A flag */ 51527f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ 51627f6d22bSBorislav Petkov __EVENT_CONSTRAINT(code, n, \ 51727f6d22bSBorislav Petkov INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ 51827f6d22bSBorislav Petkov HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) 51927f6d22bSBorislav Petkov 52027f6d22bSBorislav Petkov 52127f6d22bSBorislav Petkov /* 52227f6d22bSBorislav Petkov * We define the end marker as having a weight of -1 52327f6d22bSBorislav Petkov * to enable blacklisting of events using a counter bitmask 52427f6d22bSBorislav Petkov * of zero and thus a weight of zero. 52527f6d22bSBorislav Petkov * The end marker has a weight that cannot possibly be 52627f6d22bSBorislav Petkov * obtained from counting the bits in the bitmask. 52727f6d22bSBorislav Petkov */ 52827f6d22bSBorislav Petkov #define EVENT_CONSTRAINT_END { .weight = -1 } 52927f6d22bSBorislav Petkov 53027f6d22bSBorislav Petkov /* 53127f6d22bSBorislav Petkov * Check for end marker with weight == -1 53227f6d22bSBorislav Petkov */ 53327f6d22bSBorislav Petkov #define for_each_event_constraint(e, c) \ 53427f6d22bSBorislav Petkov for ((e) = (c); (e)->weight != -1; (e)++) 53527f6d22bSBorislav Petkov 53627f6d22bSBorislav Petkov /* 53727f6d22bSBorislav Petkov * Extra registers for specific events. 53827f6d22bSBorislav Petkov * 53927f6d22bSBorislav Petkov * Some events need large masks and require external MSRs. 54027f6d22bSBorislav Petkov * Those extra MSRs end up being shared for all events on 54127f6d22bSBorislav Petkov * a PMU and sometimes between PMU of sibling HT threads. 54227f6d22bSBorislav Petkov * In either case, the kernel needs to handle conflicting 54327f6d22bSBorislav Petkov * accesses to those extra, shared, regs. The data structure 54427f6d22bSBorislav Petkov * to manage those registers is stored in cpu_hw_event. 54527f6d22bSBorislav Petkov */ 54627f6d22bSBorislav Petkov struct extra_reg { 54727f6d22bSBorislav Petkov unsigned int event; 54827f6d22bSBorislav Petkov unsigned int msr; 54927f6d22bSBorislav Petkov u64 config_mask; 55027f6d22bSBorislav Petkov u64 valid_mask; 55127f6d22bSBorislav Petkov int idx; /* per_xxx->regs[] reg index */ 55227f6d22bSBorislav Petkov bool extra_msr_access; 55327f6d22bSBorislav Petkov }; 55427f6d22bSBorislav Petkov 55527f6d22bSBorislav Petkov #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ 55627f6d22bSBorislav Petkov .event = (e), \ 55727f6d22bSBorislav Petkov .msr = (ms), \ 55827f6d22bSBorislav Petkov .config_mask = (m), \ 55927f6d22bSBorislav Petkov .valid_mask = (vm), \ 56027f6d22bSBorislav Petkov .idx = EXTRA_REG_##i, \ 56127f6d22bSBorislav Petkov .extra_msr_access = true, \ 56227f6d22bSBorislav Petkov } 56327f6d22bSBorislav Petkov 56427f6d22bSBorislav Petkov #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ 56527f6d22bSBorislav Petkov EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) 56627f6d22bSBorislav Petkov 56727f6d22bSBorislav Petkov #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \ 56827f6d22bSBorislav Petkov EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \ 56927f6d22bSBorislav Petkov ARCH_PERFMON_EVENTSEL_UMASK, vm, idx) 57027f6d22bSBorislav Petkov 57127f6d22bSBorislav Petkov #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \ 57227f6d22bSBorislav Petkov INTEL_UEVENT_EXTRA_REG(c, \ 57327f6d22bSBorislav Petkov MSR_PEBS_LD_LAT_THRESHOLD, \ 57427f6d22bSBorislav Petkov 0xffff, \ 57527f6d22bSBorislav Petkov LDLAT) 57627f6d22bSBorislav Petkov 57727f6d22bSBorislav Petkov #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) 57827f6d22bSBorislav Petkov 57927f6d22bSBorislav Petkov union perf_capabilities { 58027f6d22bSBorislav Petkov struct { 58127f6d22bSBorislav Petkov u64 lbr_format:6; 58227f6d22bSBorislav Petkov u64 pebs_trap:1; 58327f6d22bSBorislav Petkov u64 pebs_arch_reg:1; 58427f6d22bSBorislav Petkov u64 pebs_format:4; 58527f6d22bSBorislav Petkov u64 smm_freeze:1; 58627f6d22bSBorislav Petkov /* 58727f6d22bSBorislav Petkov * PMU supports separate counter range for writing 58827f6d22bSBorislav Petkov * values > 32bit. 58927f6d22bSBorislav Petkov */ 59027f6d22bSBorislav Petkov u64 full_width_write:1; 591c22497f5SKan Liang u64 pebs_baseline:1; 592bbdbde2aSKan Liang u64 perf_metrics:1; 59342880f72SAlexander Shishkin u64 pebs_output_pt_available:1; 594cadbaa03SStephane Eranian u64 anythread_deprecated:1; 59527f6d22bSBorislav Petkov }; 59627f6d22bSBorislav Petkov u64 capabilities; 59727f6d22bSBorislav Petkov }; 59827f6d22bSBorislav Petkov 59927f6d22bSBorislav Petkov struct x86_pmu_quirk { 60027f6d22bSBorislav Petkov struct x86_pmu_quirk *next; 60127f6d22bSBorislav Petkov void (*func)(void); 60227f6d22bSBorislav Petkov }; 60327f6d22bSBorislav Petkov 60427f6d22bSBorislav Petkov union x86_pmu_config { 60527f6d22bSBorislav Petkov struct { 60627f6d22bSBorislav Petkov u64 event:8, 60727f6d22bSBorislav Petkov umask:8, 60827f6d22bSBorislav Petkov usr:1, 60927f6d22bSBorislav Petkov os:1, 61027f6d22bSBorislav Petkov edge:1, 61127f6d22bSBorislav Petkov pc:1, 61227f6d22bSBorislav Petkov interrupt:1, 61327f6d22bSBorislav Petkov __reserved1:1, 61427f6d22bSBorislav Petkov en:1, 61527f6d22bSBorislav Petkov inv:1, 61627f6d22bSBorislav Petkov cmask:8, 61727f6d22bSBorislav Petkov event2:4, 61827f6d22bSBorislav Petkov __reserved2:4, 61927f6d22bSBorislav Petkov go:1, 62027f6d22bSBorislav Petkov ho:1; 62127f6d22bSBorislav Petkov } bits; 62227f6d22bSBorislav Petkov u64 value; 62327f6d22bSBorislav Petkov }; 62427f6d22bSBorislav Petkov 62527f6d22bSBorislav Petkov #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value 62627f6d22bSBorislav Petkov 62727f6d22bSBorislav Petkov enum { 62827f6d22bSBorislav Petkov x86_lbr_exclusive_lbr, 62927f6d22bSBorislav Petkov x86_lbr_exclusive_bts, 63027f6d22bSBorislav Petkov x86_lbr_exclusive_pt, 63127f6d22bSBorislav Petkov x86_lbr_exclusive_max, 63227f6d22bSBorislav Petkov }; 63327f6d22bSBorislav Petkov 634d0946a88SKan Liang struct x86_hybrid_pmu { 635d0946a88SKan Liang struct pmu pmu; 636d0946a88SKan Liang union perf_capabilities intel_cap; 637fc4b8fcaSKan Liang u64 intel_ctrl; 638d4b294bfSKan Liang int max_pebs_events; 639d4b294bfSKan Liang int num_counters; 640d4b294bfSKan Liang int num_counters_fixed; 641eaacf07dSKan Liang struct event_constraint unconstrained; 642*0d18f2dfSKan Liang 643*0d18f2dfSKan Liang u64 hw_cache_event_ids 644*0d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_MAX] 645*0d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_OP_MAX] 646*0d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_RESULT_MAX]; 647*0d18f2dfSKan Liang u64 hw_cache_extra_regs 648*0d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_MAX] 649*0d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_OP_MAX] 650*0d18f2dfSKan Liang [PERF_COUNT_HW_CACHE_RESULT_MAX]; 651d0946a88SKan Liang }; 652d0946a88SKan Liang 653d0946a88SKan Liang static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu) 654d0946a88SKan Liang { 655d0946a88SKan Liang return container_of(pmu, struct x86_hybrid_pmu, pmu); 656d0946a88SKan Liang } 657d0946a88SKan Liang 658d0946a88SKan Liang extern struct static_key_false perf_is_hybrid; 659d0946a88SKan Liang #define is_hybrid() static_branch_unlikely(&perf_is_hybrid) 660d0946a88SKan Liang 661d0946a88SKan Liang #define hybrid(_pmu, _field) \ 662d0946a88SKan Liang (*({ \ 663d0946a88SKan Liang typeof(&x86_pmu._field) __Fp = &x86_pmu._field; \ 664d0946a88SKan Liang \ 665d0946a88SKan Liang if (is_hybrid() && (_pmu)) \ 666d0946a88SKan Liang __Fp = &hybrid_pmu(_pmu)->_field; \ 667d0946a88SKan Liang \ 668d0946a88SKan Liang __Fp; \ 669d0946a88SKan Liang })) 670d0946a88SKan Liang 671eaacf07dSKan Liang #define hybrid_var(_pmu, _var) \ 672eaacf07dSKan Liang (*({ \ 673eaacf07dSKan Liang typeof(&_var) __Fp = &_var; \ 674eaacf07dSKan Liang \ 675eaacf07dSKan Liang if (is_hybrid() && (_pmu)) \ 676eaacf07dSKan Liang __Fp = &hybrid_pmu(_pmu)->_var; \ 677eaacf07dSKan Liang \ 678eaacf07dSKan Liang __Fp; \ 679eaacf07dSKan Liang })) 680eaacf07dSKan Liang 68127f6d22bSBorislav Petkov /* 68227f6d22bSBorislav Petkov * struct x86_pmu - generic x86 pmu 68327f6d22bSBorislav Petkov */ 68427f6d22bSBorislav Petkov struct x86_pmu { 68527f6d22bSBorislav Petkov /* 68627f6d22bSBorislav Petkov * Generic x86 PMC bits 68727f6d22bSBorislav Petkov */ 68827f6d22bSBorislav Petkov const char *name; 68927f6d22bSBorislav Petkov int version; 69027f6d22bSBorislav Petkov int (*handle_irq)(struct pt_regs *); 69127f6d22bSBorislav Petkov void (*disable_all)(void); 69227f6d22bSBorislav Petkov void (*enable_all)(int added); 69327f6d22bSBorislav Petkov void (*enable)(struct perf_event *); 69427f6d22bSBorislav Petkov void (*disable)(struct perf_event *); 69568f7082fSPeter Zijlstra void (*add)(struct perf_event *); 69668f7082fSPeter Zijlstra void (*del)(struct perf_event *); 697bcfbe5c4SKan Liang void (*read)(struct perf_event *event); 69827f6d22bSBorislav Petkov int (*hw_config)(struct perf_event *event); 69927f6d22bSBorislav Petkov int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); 70027f6d22bSBorislav Petkov unsigned eventsel; 70127f6d22bSBorislav Petkov unsigned perfctr; 70227f6d22bSBorislav Petkov int (*addr_offset)(int index, bool eventsel); 70327f6d22bSBorislav Petkov int (*rdpmc_index)(int index); 70427f6d22bSBorislav Petkov u64 (*event_map)(int); 70527f6d22bSBorislav Petkov int max_events; 70627f6d22bSBorislav Petkov int num_counters; 70727f6d22bSBorislav Petkov int num_counters_fixed; 70827f6d22bSBorislav Petkov int cntval_bits; 70927f6d22bSBorislav Petkov u64 cntval_mask; 71027f6d22bSBorislav Petkov union { 71127f6d22bSBorislav Petkov unsigned long events_maskl; 71227f6d22bSBorislav Petkov unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; 71327f6d22bSBorislav Petkov }; 71427f6d22bSBorislav Petkov int events_mask_len; 71527f6d22bSBorislav Petkov int apic; 71627f6d22bSBorislav Petkov u64 max_period; 71727f6d22bSBorislav Petkov struct event_constraint * 71827f6d22bSBorislav Petkov (*get_event_constraints)(struct cpu_hw_events *cpuc, 71927f6d22bSBorislav Petkov int idx, 72027f6d22bSBorislav Petkov struct perf_event *event); 72127f6d22bSBorislav Petkov 72227f6d22bSBorislav Petkov void (*put_event_constraints)(struct cpu_hw_events *cpuc, 72327f6d22bSBorislav Petkov struct perf_event *event); 72427f6d22bSBorislav Petkov 72527f6d22bSBorislav Petkov void (*start_scheduling)(struct cpu_hw_events *cpuc); 72627f6d22bSBorislav Petkov 72727f6d22bSBorislav Petkov void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr); 72827f6d22bSBorislav Petkov 72927f6d22bSBorislav Petkov void (*stop_scheduling)(struct cpu_hw_events *cpuc); 73027f6d22bSBorislav Petkov 73127f6d22bSBorislav Petkov struct event_constraint *event_constraints; 73227f6d22bSBorislav Petkov struct x86_pmu_quirk *quirks; 73327f6d22bSBorislav Petkov int perfctr_second_write; 734f605cfcaSKan Liang u64 (*limit_period)(struct perf_event *event, u64 l); 73527f6d22bSBorislav Petkov 736af3bdb99SAndi Kleen /* PMI handler bits */ 737af3bdb99SAndi Kleen unsigned int late_ack :1, 7383daa96d6SPeter Zijlstra enabled_ack :1; 73927f6d22bSBorislav Petkov /* 74027f6d22bSBorislav Petkov * sysfs attrs 74127f6d22bSBorislav Petkov */ 74227f6d22bSBorislav Petkov int attr_rdpmc_broken; 74327f6d22bSBorislav Petkov int attr_rdpmc; 74427f6d22bSBorislav Petkov struct attribute **format_attrs; 74527f6d22bSBorislav Petkov 74627f6d22bSBorislav Petkov ssize_t (*events_sysfs_show)(char *page, u64 config); 747baa0c833SJiri Olsa const struct attribute_group **attr_update; 74827f6d22bSBorislav Petkov 7496089327fSKan Liang unsigned long attr_freeze_on_smi; 7506089327fSKan Liang 75127f6d22bSBorislav Petkov /* 75227f6d22bSBorislav Petkov * CPU Hotplug hooks 75327f6d22bSBorislav Petkov */ 75427f6d22bSBorislav Petkov int (*cpu_prepare)(int cpu); 75527f6d22bSBorislav Petkov void (*cpu_starting)(int cpu); 75627f6d22bSBorislav Petkov void (*cpu_dying)(int cpu); 75727f6d22bSBorislav Petkov void (*cpu_dead)(int cpu); 75827f6d22bSBorislav Petkov 75927f6d22bSBorislav Petkov void (*check_microcode)(void); 76027f6d22bSBorislav Petkov void (*sched_task)(struct perf_event_context *ctx, 76127f6d22bSBorislav Petkov bool sched_in); 76227f6d22bSBorislav Petkov 76327f6d22bSBorislav Petkov /* 76427f6d22bSBorislav Petkov * Intel Arch Perfmon v2+ 76527f6d22bSBorislav Petkov */ 76627f6d22bSBorislav Petkov u64 intel_ctrl; 76727f6d22bSBorislav Petkov union perf_capabilities intel_cap; 76827f6d22bSBorislav Petkov 76927f6d22bSBorislav Petkov /* 77027f6d22bSBorislav Petkov * Intel DebugStore bits 77127f6d22bSBorislav Petkov */ 77227f6d22bSBorislav Petkov unsigned int bts :1, 77327f6d22bSBorislav Petkov bts_active :1, 77427f6d22bSBorislav Petkov pebs :1, 77527f6d22bSBorislav Petkov pebs_active :1, 77627f6d22bSBorislav Petkov pebs_broken :1, 77795298355SAndi Kleen pebs_prec_dist :1, 7789b545c04SAndi Kleen pebs_no_tlb :1, 77961b985e3SKan Liang pebs_no_isolation :1, 78061b985e3SKan Liang pebs_block :1; 78127f6d22bSBorislav Petkov int pebs_record_size; 782e72daf3fSJiri Olsa int pebs_buffer_size; 783c22497f5SKan Liang int max_pebs_events; 7849dfa9a5cSPeter Zijlstra void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data); 78527f6d22bSBorislav Petkov struct event_constraint *pebs_constraints; 78627f6d22bSBorislav Petkov void (*pebs_aliases)(struct perf_event *event); 787174afc3eSKan Liang unsigned long large_pebs_flags; 788c22497f5SKan Liang u64 rtm_abort_event; 78927f6d22bSBorislav Petkov 79027f6d22bSBorislav Petkov /* 79127f6d22bSBorislav Petkov * Intel LBR 79227f6d22bSBorislav Petkov */ 7933cb9d546SWei Wang unsigned int lbr_tos, lbr_from, lbr_to, 794fda1f99fSKan Liang lbr_info, lbr_nr; /* LBR base regs and size */ 79549d8184fSKan Liang union { 79627f6d22bSBorislav Petkov u64 lbr_sel_mask; /* LBR_SELECT valid bits */ 79749d8184fSKan Liang u64 lbr_ctl_mask; /* LBR_CTL valid bits */ 79849d8184fSKan Liang }; 79949d8184fSKan Liang union { 80027f6d22bSBorislav Petkov const int *lbr_sel_map; /* lbr_select mappings */ 80149d8184fSKan Liang int *lbr_ctl_map; /* LBR_CTL mappings */ 80249d8184fSKan Liang }; 80327f6d22bSBorislav Petkov bool lbr_double_abort; /* duplicated lbr aborts */ 804b0c1ef52SAndi Kleen bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */ 80527f6d22bSBorislav Petkov 806af6cf129SKan Liang /* 807af6cf129SKan Liang * Intel Architectural LBR CPUID Enumeration 808af6cf129SKan Liang */ 809af6cf129SKan Liang unsigned int lbr_depth_mask:8; 810af6cf129SKan Liang unsigned int lbr_deep_c_reset:1; 811af6cf129SKan Liang unsigned int lbr_lip:1; 812af6cf129SKan Liang unsigned int lbr_cpl:1; 813af6cf129SKan Liang unsigned int lbr_filter:1; 814af6cf129SKan Liang unsigned int lbr_call_stack:1; 815af6cf129SKan Liang unsigned int lbr_mispred:1; 816af6cf129SKan Liang unsigned int lbr_timed_lbr:1; 817af6cf129SKan Liang unsigned int lbr_br_type:1; 818af6cf129SKan Liang 8199f354a72SKan Liang void (*lbr_reset)(void); 820c301b1d8SKan Liang void (*lbr_read)(struct cpu_hw_events *cpuc); 821799571bfSKan Liang void (*lbr_save)(void *ctx); 822799571bfSKan Liang void (*lbr_restore)(void *ctx); 8239f354a72SKan Liang 82427f6d22bSBorislav Petkov /* 82527f6d22bSBorislav Petkov * Intel PT/LBR/BTS are exclusive 82627f6d22bSBorislav Petkov */ 82727f6d22bSBorislav Petkov atomic_t lbr_exclusive[x86_lbr_exclusive_max]; 82827f6d22bSBorislav Petkov 82927f6d22bSBorislav Petkov /* 8307b2c05a1SKan Liang * Intel perf metrics 8317b2c05a1SKan Liang */ 8321ab5f235SKan Liang int num_topdown_events; 8337b2c05a1SKan Liang u64 (*update_topdown_event)(struct perf_event *event); 8347b2c05a1SKan Liang int (*set_topdown_event_period)(struct perf_event *event); 8357b2c05a1SKan Liang 8367b2c05a1SKan Liang /* 837fc1adfe3SAlexey Budankov * perf task context (i.e. struct perf_event_context::task_ctx_data) 838fc1adfe3SAlexey Budankov * switch helper to bridge calls from perf/core to perf/x86. 839fc1adfe3SAlexey Budankov * See struct pmu::swap_task_ctx() usage for examples; 840fc1adfe3SAlexey Budankov */ 841fc1adfe3SAlexey Budankov void (*swap_task_ctx)(struct perf_event_context *prev, 842fc1adfe3SAlexey Budankov struct perf_event_context *next); 843fc1adfe3SAlexey Budankov 844fc1adfe3SAlexey Budankov /* 84532b62f44SPeter Zijlstra * AMD bits 84632b62f44SPeter Zijlstra */ 84732b62f44SPeter Zijlstra unsigned int amd_nb_constraints : 1; 84857388912SKim Phillips u64 perf_ctr_pair_en; 84932b62f44SPeter Zijlstra 85032b62f44SPeter Zijlstra /* 85127f6d22bSBorislav Petkov * Extra registers for events 85227f6d22bSBorislav Petkov */ 85327f6d22bSBorislav Petkov struct extra_reg *extra_regs; 85427f6d22bSBorislav Petkov unsigned int flags; 85527f6d22bSBorislav Petkov 85627f6d22bSBorislav Petkov /* 85727f6d22bSBorislav Petkov * Intel host/guest support (KVM) 85827f6d22bSBorislav Petkov */ 85927f6d22bSBorislav Petkov struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); 86081ec3f3cSJiri Olsa 86181ec3f3cSJiri Olsa /* 86281ec3f3cSJiri Olsa * Check period value for PERF_EVENT_IOC_PERIOD ioctl. 86381ec3f3cSJiri Olsa */ 86481ec3f3cSJiri Olsa int (*check_period) (struct perf_event *event, u64 period); 86542880f72SAlexander Shishkin 86642880f72SAlexander Shishkin int (*aux_output_match) (struct perf_event *event); 867d0946a88SKan Liang 868d0946a88SKan Liang /* 869d0946a88SKan Liang * Hybrid support 870d0946a88SKan Liang * 871d0946a88SKan Liang * Most PMU capabilities are the same among different hybrid PMUs. 872d0946a88SKan Liang * The global x86_pmu saves the architecture capabilities, which 873d0946a88SKan Liang * are available for all PMUs. The hybrid_pmu only includes the 874d0946a88SKan Liang * unique capabilities. 875d0946a88SKan Liang */ 876d4b294bfSKan Liang int num_hybrid_pmus; 877d0946a88SKan Liang struct x86_hybrid_pmu *hybrid_pmu; 87827f6d22bSBorislav Petkov }; 87927f6d22bSBorislav Petkov 880530bfff6SKan Liang struct x86_perf_task_context_opt { 881530bfff6SKan Liang int lbr_callstack_users; 882530bfff6SKan Liang int lbr_stack_state; 883530bfff6SKan Liang int log_id; 884530bfff6SKan Liang }; 885530bfff6SKan Liang 88627f6d22bSBorislav Petkov struct x86_perf_task_context { 887e1ad1ac2SLike Xu u64 lbr_sel; 88827f6d22bSBorislav Petkov int tos; 8890592e57bSKan Liang int valid_lbrs; 890530bfff6SKan Liang struct x86_perf_task_context_opt opt; 8915624986dSKan Liang struct lbr_entry lbr[MAX_LBR_ENTRIES]; 89227f6d22bSBorislav Petkov }; 89327f6d22bSBorislav Petkov 89447125db2SKan Liang struct x86_perf_task_context_arch_lbr { 89547125db2SKan Liang struct x86_perf_task_context_opt opt; 89647125db2SKan Liang struct lbr_entry entries[]; 89747125db2SKan Liang }; 89847125db2SKan Liang 899ce711ea3SKan Liang /* 900ce711ea3SKan Liang * Add padding to guarantee the 64-byte alignment of the state buffer. 901ce711ea3SKan Liang * 902ce711ea3SKan Liang * The structure is dynamically allocated. The size of the LBR state may vary 903ce711ea3SKan Liang * based on the number of LBR registers. 904ce711ea3SKan Liang * 905ce711ea3SKan Liang * Do not put anything after the LBR state. 906ce711ea3SKan Liang */ 907ce711ea3SKan Liang struct x86_perf_task_context_arch_lbr_xsave { 908ce711ea3SKan Liang struct x86_perf_task_context_opt opt; 909ce711ea3SKan Liang 910ce711ea3SKan Liang union { 911ce711ea3SKan Liang struct xregs_state xsave; 912ce711ea3SKan Liang struct { 913ce711ea3SKan Liang struct fxregs_state i387; 914ce711ea3SKan Liang struct xstate_header header; 915ce711ea3SKan Liang struct arch_lbr_state lbr; 916ce711ea3SKan Liang } __attribute__ ((packed, aligned (XSAVE_ALIGNMENT))); 917ce711ea3SKan Liang }; 918ce711ea3SKan Liang }; 919ce711ea3SKan Liang 92027f6d22bSBorislav Petkov #define x86_add_quirk(func_) \ 92127f6d22bSBorislav Petkov do { \ 92227f6d22bSBorislav Petkov static struct x86_pmu_quirk __quirk __initdata = { \ 92327f6d22bSBorislav Petkov .func = func_, \ 92427f6d22bSBorislav Petkov }; \ 92527f6d22bSBorislav Petkov __quirk.next = x86_pmu.quirks; \ 92627f6d22bSBorislav Petkov x86_pmu.quirks = &__quirk; \ 92727f6d22bSBorislav Petkov } while (0) 92827f6d22bSBorislav Petkov 92927f6d22bSBorislav Petkov /* 93027f6d22bSBorislav Petkov * x86_pmu flags 93127f6d22bSBorislav Petkov */ 93227f6d22bSBorislav Petkov #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */ 93327f6d22bSBorislav Petkov #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */ 93427f6d22bSBorislav Petkov #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ 93527f6d22bSBorislav Petkov #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ 93631962340SKan Liang #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */ 937400816f6SPeter Zijlstra (Intel) #define PMU_FL_TFA 0x20 /* deal with TSX force abort */ 938471af006SKim Phillips #define PMU_FL_PAIR 0x40 /* merge counters for large incr. events */ 93961b985e3SKan Liang #define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */ 94061b985e3SKan Liang #define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */ 94127f6d22bSBorislav Petkov 94227f6d22bSBorislav Petkov #define EVENT_VAR(_id) event_attr_##_id 94327f6d22bSBorislav Petkov #define EVENT_PTR(_id) &event_attr_##_id.attr.attr 94427f6d22bSBorislav Petkov 94527f6d22bSBorislav Petkov #define EVENT_ATTR(_name, _id) \ 94627f6d22bSBorislav Petkov static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ 94727f6d22bSBorislav Petkov .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ 94827f6d22bSBorislav Petkov .id = PERF_COUNT_HW_##_id, \ 94927f6d22bSBorislav Petkov .event_str = NULL, \ 95027f6d22bSBorislav Petkov }; 95127f6d22bSBorislav Petkov 95227f6d22bSBorislav Petkov #define EVENT_ATTR_STR(_name, v, str) \ 95327f6d22bSBorislav Petkov static struct perf_pmu_events_attr event_attr_##v = { \ 95427f6d22bSBorislav Petkov .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ 95527f6d22bSBorislav Petkov .id = 0, \ 95627f6d22bSBorislav Petkov .event_str = str, \ 95727f6d22bSBorislav Petkov }; 95827f6d22bSBorislav Petkov 959fc07e9f9SAndi Kleen #define EVENT_ATTR_STR_HT(_name, v, noht, ht) \ 960fc07e9f9SAndi Kleen static struct perf_pmu_events_ht_attr event_attr_##v = { \ 961fc07e9f9SAndi Kleen .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\ 962fc07e9f9SAndi Kleen .id = 0, \ 963fc07e9f9SAndi Kleen .event_str_noht = noht, \ 964fc07e9f9SAndi Kleen .event_str_ht = ht, \ 965fc07e9f9SAndi Kleen } 966fc07e9f9SAndi Kleen 96761e76d53SKan Liang struct pmu *x86_get_pmu(unsigned int cpu); 96827f6d22bSBorislav Petkov extern struct x86_pmu x86_pmu __read_mostly; 96927f6d22bSBorislav Petkov 970f42be865SKan Liang static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx) 971f42be865SKan Liang { 97247125db2SKan Liang if (static_cpu_has(X86_FEATURE_ARCH_LBR)) 97347125db2SKan Liang return &((struct x86_perf_task_context_arch_lbr *)ctx)->opt; 97447125db2SKan Liang 975f42be865SKan Liang return &((struct x86_perf_task_context *)ctx)->opt; 976f42be865SKan Liang } 977f42be865SKan Liang 97827f6d22bSBorislav Petkov static inline bool x86_pmu_has_lbr_callstack(void) 97927f6d22bSBorislav Petkov { 98027f6d22bSBorislav Petkov return x86_pmu.lbr_sel_map && 98127f6d22bSBorislav Petkov x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; 98227f6d22bSBorislav Petkov } 98327f6d22bSBorislav Petkov 98427f6d22bSBorislav Petkov DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); 98527f6d22bSBorislav Petkov 98627f6d22bSBorislav Petkov int x86_perf_event_set_period(struct perf_event *event); 98727f6d22bSBorislav Petkov 98827f6d22bSBorislav Petkov /* 98927f6d22bSBorislav Petkov * Generalized hw caching related hw_event table, filled 99027f6d22bSBorislav Petkov * in on a per model basis. A value of 0 means 99127f6d22bSBorislav Petkov * 'not supported', -1 means 'hw_event makes no sense on 99227f6d22bSBorislav Petkov * this CPU', any other value means the raw hw_event 99327f6d22bSBorislav Petkov * ID. 99427f6d22bSBorislav Petkov */ 99527f6d22bSBorislav Petkov 99627f6d22bSBorislav Petkov #define C(x) PERF_COUNT_HW_CACHE_##x 99727f6d22bSBorislav Petkov 99827f6d22bSBorislav Petkov extern u64 __read_mostly hw_cache_event_ids 99927f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_MAX] 100027f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_OP_MAX] 100127f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_RESULT_MAX]; 100227f6d22bSBorislav Petkov extern u64 __read_mostly hw_cache_extra_regs 100327f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_MAX] 100427f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_OP_MAX] 100527f6d22bSBorislav Petkov [PERF_COUNT_HW_CACHE_RESULT_MAX]; 100627f6d22bSBorislav Petkov 100727f6d22bSBorislav Petkov u64 x86_perf_event_update(struct perf_event *event); 100827f6d22bSBorislav Petkov 100927f6d22bSBorislav Petkov static inline unsigned int x86_pmu_config_addr(int index) 101027f6d22bSBorislav Petkov { 101127f6d22bSBorislav Petkov return x86_pmu.eventsel + (x86_pmu.addr_offset ? 101227f6d22bSBorislav Petkov x86_pmu.addr_offset(index, true) : index); 101327f6d22bSBorislav Petkov } 101427f6d22bSBorislav Petkov 101527f6d22bSBorislav Petkov static inline unsigned int x86_pmu_event_addr(int index) 101627f6d22bSBorislav Petkov { 101727f6d22bSBorislav Petkov return x86_pmu.perfctr + (x86_pmu.addr_offset ? 101827f6d22bSBorislav Petkov x86_pmu.addr_offset(index, false) : index); 101927f6d22bSBorislav Petkov } 102027f6d22bSBorislav Petkov 102127f6d22bSBorislav Petkov static inline int x86_pmu_rdpmc_index(int index) 102227f6d22bSBorislav Petkov { 102327f6d22bSBorislav Petkov return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; 102427f6d22bSBorislav Petkov } 102527f6d22bSBorislav Petkov 1026fc4b8fcaSKan Liang bool check_hw_exists(struct pmu *pmu, int num_counters, 1027fc4b8fcaSKan Liang int num_counters_fixed); 1028fc4b8fcaSKan Liang 102927f6d22bSBorislav Petkov int x86_add_exclusive(unsigned int what); 103027f6d22bSBorislav Petkov 103127f6d22bSBorislav Petkov void x86_del_exclusive(unsigned int what); 103227f6d22bSBorislav Petkov 103327f6d22bSBorislav Petkov int x86_reserve_hardware(void); 103427f6d22bSBorislav Petkov 103527f6d22bSBorislav Petkov void x86_release_hardware(void); 103627f6d22bSBorislav Petkov 1037b00233b5SAndi Kleen int x86_pmu_max_precise(void); 1038b00233b5SAndi Kleen 103927f6d22bSBorislav Petkov void hw_perf_lbr_event_destroy(struct perf_event *event); 104027f6d22bSBorislav Petkov 104127f6d22bSBorislav Petkov int x86_setup_perfctr(struct perf_event *event); 104227f6d22bSBorislav Petkov 104327f6d22bSBorislav Petkov int x86_pmu_hw_config(struct perf_event *event); 104427f6d22bSBorislav Petkov 104527f6d22bSBorislav Petkov void x86_pmu_disable_all(void); 104627f6d22bSBorislav Petkov 104757388912SKim Phillips static inline bool is_counter_pair(struct hw_perf_event *hwc) 104857388912SKim Phillips { 104957388912SKim Phillips return hwc->flags & PERF_X86_EVENT_PAIR; 105057388912SKim Phillips } 105157388912SKim Phillips 105227f6d22bSBorislav Petkov static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, 105327f6d22bSBorislav Petkov u64 enable_mask) 105427f6d22bSBorislav Petkov { 105527f6d22bSBorislav Petkov u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); 105627f6d22bSBorislav Petkov 105727f6d22bSBorislav Petkov if (hwc->extra_reg.reg) 105827f6d22bSBorislav Petkov wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); 105957388912SKim Phillips 106057388912SKim Phillips /* 106157388912SKim Phillips * Add enabled Merge event on next counter 106257388912SKim Phillips * if large increment event being enabled on this counter 106357388912SKim Phillips */ 106457388912SKim Phillips if (is_counter_pair(hwc)) 106557388912SKim Phillips wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en); 106657388912SKim Phillips 106727f6d22bSBorislav Petkov wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); 106827f6d22bSBorislav Petkov } 106927f6d22bSBorislav Petkov 107027f6d22bSBorislav Petkov void x86_pmu_enable_all(int added); 107127f6d22bSBorislav Petkov 107227f6d22bSBorislav Petkov int perf_assign_events(struct event_constraint **constraints, int n, 107327f6d22bSBorislav Petkov int wmin, int wmax, int gpmax, int *assign); 107427f6d22bSBorislav Petkov int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); 107527f6d22bSBorislav Petkov 107627f6d22bSBorislav Petkov void x86_pmu_stop(struct perf_event *event, int flags); 107727f6d22bSBorislav Petkov 107827f6d22bSBorislav Petkov static inline void x86_pmu_disable_event(struct perf_event *event) 107927f6d22bSBorislav Petkov { 108027f6d22bSBorislav Petkov struct hw_perf_event *hwc = &event->hw; 108127f6d22bSBorislav Petkov 108227f6d22bSBorislav Petkov wrmsrl(hwc->config_base, hwc->config); 108357388912SKim Phillips 108457388912SKim Phillips if (is_counter_pair(hwc)) 108557388912SKim Phillips wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0); 108627f6d22bSBorislav Petkov } 108727f6d22bSBorislav Petkov 108827f6d22bSBorislav Petkov void x86_pmu_enable_event(struct perf_event *event); 108927f6d22bSBorislav Petkov 109027f6d22bSBorislav Petkov int x86_pmu_handle_irq(struct pt_regs *regs); 109127f6d22bSBorislav Petkov 109227f6d22bSBorislav Petkov extern struct event_constraint emptyconstraint; 109327f6d22bSBorislav Petkov 109427f6d22bSBorislav Petkov extern struct event_constraint unconstrained; 109527f6d22bSBorislav Petkov 109627f6d22bSBorislav Petkov static inline bool kernel_ip(unsigned long ip) 109727f6d22bSBorislav Petkov { 109827f6d22bSBorislav Petkov #ifdef CONFIG_X86_32 109927f6d22bSBorislav Petkov return ip > PAGE_OFFSET; 110027f6d22bSBorislav Petkov #else 110127f6d22bSBorislav Petkov return (long)ip < 0; 110227f6d22bSBorislav Petkov #endif 110327f6d22bSBorislav Petkov } 110427f6d22bSBorislav Petkov 110527f6d22bSBorislav Petkov /* 110627f6d22bSBorislav Petkov * Not all PMUs provide the right context information to place the reported IP 110727f6d22bSBorislav Petkov * into full context. Specifically segment registers are typically not 110827f6d22bSBorislav Petkov * supplied. 110927f6d22bSBorislav Petkov * 111027f6d22bSBorislav Petkov * Assuming the address is a linear address (it is for IBS), we fake the CS and 111127f6d22bSBorislav Petkov * vm86 mode using the known zero-based code segment and 'fix up' the registers 111227f6d22bSBorislav Petkov * to reflect this. 111327f6d22bSBorislav Petkov * 111427f6d22bSBorislav Petkov * Intel PEBS/LBR appear to typically provide the effective address, nothing 111527f6d22bSBorislav Petkov * much we can do about that but pray and treat it like a linear address. 111627f6d22bSBorislav Petkov */ 111727f6d22bSBorislav Petkov static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) 111827f6d22bSBorislav Petkov { 111927f6d22bSBorislav Petkov regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; 112027f6d22bSBorislav Petkov if (regs->flags & X86_VM_MASK) 112127f6d22bSBorislav Petkov regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); 112227f6d22bSBorislav Petkov regs->ip = ip; 112327f6d22bSBorislav Petkov } 112427f6d22bSBorislav Petkov 112527f6d22bSBorislav Petkov ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); 112627f6d22bSBorislav Petkov ssize_t intel_event_sysfs_show(char *page, u64 config); 112727f6d22bSBorislav Petkov 1128a49ac9f8SHuang Rui ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, 1129a49ac9f8SHuang Rui char *page); 1130fc07e9f9SAndi Kleen ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr, 1131fc07e9f9SAndi Kleen char *page); 1132a49ac9f8SHuang Rui 1133fc4b8fcaSKan Liang static inline bool fixed_counter_disabled(int i, struct pmu *pmu) 113432451614SKan Liang { 1135fc4b8fcaSKan Liang u64 intel_ctrl = hybrid(pmu, intel_ctrl); 1136fc4b8fcaSKan Liang 1137fc4b8fcaSKan Liang return !(intel_ctrl >> (i + INTEL_PMC_IDX_FIXED)); 113832451614SKan Liang } 113932451614SKan Liang 114027f6d22bSBorislav Petkov #ifdef CONFIG_CPU_SUP_AMD 114127f6d22bSBorislav Petkov 114227f6d22bSBorislav Petkov int amd_pmu_init(void); 114327f6d22bSBorislav Petkov 114427f6d22bSBorislav Petkov #else /* CONFIG_CPU_SUP_AMD */ 114527f6d22bSBorislav Petkov 114627f6d22bSBorislav Petkov static inline int amd_pmu_init(void) 114727f6d22bSBorislav Petkov { 114827f6d22bSBorislav Petkov return 0; 114927f6d22bSBorislav Petkov } 115027f6d22bSBorislav Petkov 115127f6d22bSBorislav Petkov #endif /* CONFIG_CPU_SUP_AMD */ 115227f6d22bSBorislav Petkov 115342880f72SAlexander Shishkin static inline int is_pebs_pt(struct perf_event *event) 115442880f72SAlexander Shishkin { 115542880f72SAlexander Shishkin return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT); 115642880f72SAlexander Shishkin } 115742880f72SAlexander Shishkin 115827f6d22bSBorislav Petkov #ifdef CONFIG_CPU_SUP_INTEL 115927f6d22bSBorislav Petkov 116081ec3f3cSJiri Olsa static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) 116127f6d22bSBorislav Petkov { 116267266c10SJiri Olsa struct hw_perf_event *hwc = &event->hw; 116367266c10SJiri Olsa unsigned int hw_event, bts_event; 116427f6d22bSBorislav Petkov 116567266c10SJiri Olsa if (event->attr.freq) 116627f6d22bSBorislav Petkov return false; 116767266c10SJiri Olsa 116867266c10SJiri Olsa hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; 116967266c10SJiri Olsa bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 117067266c10SJiri Olsa 117181ec3f3cSJiri Olsa return hw_event == bts_event && period == 1; 117281ec3f3cSJiri Olsa } 117381ec3f3cSJiri Olsa 117481ec3f3cSJiri Olsa static inline bool intel_pmu_has_bts(struct perf_event *event) 117581ec3f3cSJiri Olsa { 117681ec3f3cSJiri Olsa struct hw_perf_event *hwc = &event->hw; 117781ec3f3cSJiri Olsa 117881ec3f3cSJiri Olsa return intel_pmu_has_bts_period(event, hwc->sample_period); 117927f6d22bSBorislav Petkov } 118027f6d22bSBorislav Petkov 118127f6d22bSBorislav Petkov int intel_pmu_save_and_restart(struct perf_event *event); 118227f6d22bSBorislav Petkov 118327f6d22bSBorislav Petkov struct event_constraint * 118427f6d22bSBorislav Petkov x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 118527f6d22bSBorislav Petkov struct perf_event *event); 118627f6d22bSBorislav Petkov 1187d01b1f96SPeter Zijlstra (Intel) extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu); 1188d01b1f96SPeter Zijlstra (Intel) extern void intel_cpuc_finish(struct cpu_hw_events *cpuc); 118927f6d22bSBorislav Petkov 119027f6d22bSBorislav Petkov int intel_pmu_init(void); 119127f6d22bSBorislav Petkov 119227f6d22bSBorislav Petkov void init_debug_store_on_cpu(int cpu); 119327f6d22bSBorislav Petkov 119427f6d22bSBorislav Petkov void fini_debug_store_on_cpu(int cpu); 119527f6d22bSBorislav Petkov 119627f6d22bSBorislav Petkov void release_ds_buffers(void); 119727f6d22bSBorislav Petkov 119827f6d22bSBorislav Petkov void reserve_ds_buffers(void); 119927f6d22bSBorislav Petkov 1200c085fb87SKan Liang void release_lbr_buffers(void); 1201c085fb87SKan Liang 120227f6d22bSBorislav Petkov extern struct event_constraint bts_constraint; 1203097e4311SLike Xu extern struct event_constraint vlbr_constraint; 120427f6d22bSBorislav Petkov 120527f6d22bSBorislav Petkov void intel_pmu_enable_bts(u64 config); 120627f6d22bSBorislav Petkov 120727f6d22bSBorislav Petkov void intel_pmu_disable_bts(void); 120827f6d22bSBorislav Petkov 120927f6d22bSBorislav Petkov int intel_pmu_drain_bts_buffer(void); 121027f6d22bSBorislav Petkov 121127f6d22bSBorislav Petkov extern struct event_constraint intel_core2_pebs_event_constraints[]; 121227f6d22bSBorislav Petkov 121327f6d22bSBorislav Petkov extern struct event_constraint intel_atom_pebs_event_constraints[]; 121427f6d22bSBorislav Petkov 121527f6d22bSBorislav Petkov extern struct event_constraint intel_slm_pebs_event_constraints[]; 121627f6d22bSBorislav Petkov 12178b92c3a7SKan Liang extern struct event_constraint intel_glm_pebs_event_constraints[]; 12188b92c3a7SKan Liang 1219dd0b06b5SKan Liang extern struct event_constraint intel_glp_pebs_event_constraints[]; 1220dd0b06b5SKan Liang 122127f6d22bSBorislav Petkov extern struct event_constraint intel_nehalem_pebs_event_constraints[]; 122227f6d22bSBorislav Petkov 122327f6d22bSBorislav Petkov extern struct event_constraint intel_westmere_pebs_event_constraints[]; 122427f6d22bSBorislav Petkov 122527f6d22bSBorislav Petkov extern struct event_constraint intel_snb_pebs_event_constraints[]; 122627f6d22bSBorislav Petkov 122727f6d22bSBorislav Petkov extern struct event_constraint intel_ivb_pebs_event_constraints[]; 122827f6d22bSBorislav Petkov 122927f6d22bSBorislav Petkov extern struct event_constraint intel_hsw_pebs_event_constraints[]; 123027f6d22bSBorislav Petkov 1231b3e62463SStephane Eranian extern struct event_constraint intel_bdw_pebs_event_constraints[]; 1232b3e62463SStephane Eranian 123327f6d22bSBorislav Petkov extern struct event_constraint intel_skl_pebs_event_constraints[]; 123427f6d22bSBorislav Petkov 123560176089SKan Liang extern struct event_constraint intel_icl_pebs_event_constraints[]; 123660176089SKan Liang 123761b985e3SKan Liang extern struct event_constraint intel_spr_pebs_event_constraints[]; 123861b985e3SKan Liang 123927f6d22bSBorislav Petkov struct event_constraint *intel_pebs_constraints(struct perf_event *event); 124027f6d22bSBorislav Petkov 124168f7082fSPeter Zijlstra void intel_pmu_pebs_add(struct perf_event *event); 124268f7082fSPeter Zijlstra 124368f7082fSPeter Zijlstra void intel_pmu_pebs_del(struct perf_event *event); 124468f7082fSPeter Zijlstra 124527f6d22bSBorislav Petkov void intel_pmu_pebs_enable(struct perf_event *event); 124627f6d22bSBorislav Petkov 124727f6d22bSBorislav Petkov void intel_pmu_pebs_disable(struct perf_event *event); 124827f6d22bSBorislav Petkov 124927f6d22bSBorislav Petkov void intel_pmu_pebs_enable_all(void); 125027f6d22bSBorislav Petkov 125127f6d22bSBorislav Petkov void intel_pmu_pebs_disable_all(void); 125227f6d22bSBorislav Petkov 125327f6d22bSBorislav Petkov void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in); 125427f6d22bSBorislav Petkov 12555bee2cc6SKan Liang void intel_pmu_auto_reload_read(struct perf_event *event); 12565bee2cc6SKan Liang 12575624986dSKan Liang void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr); 1258c22497f5SKan Liang 125927f6d22bSBorislav Petkov void intel_ds_init(void); 126027f6d22bSBorislav Petkov 1261421ca868SAlexey Budankov void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev, 1262421ca868SAlexey Budankov struct perf_event_context *next); 1263421ca868SAlexey Budankov 126427f6d22bSBorislav Petkov void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); 126527f6d22bSBorislav Petkov 126619fc9dddSDavid Carrillo-Cisneros u64 lbr_from_signext_quirk_wr(u64 val); 126719fc9dddSDavid Carrillo-Cisneros 126827f6d22bSBorislav Petkov void intel_pmu_lbr_reset(void); 126927f6d22bSBorislav Petkov 12709f354a72SKan Liang void intel_pmu_lbr_reset_32(void); 12719f354a72SKan Liang 12729f354a72SKan Liang void intel_pmu_lbr_reset_64(void); 12739f354a72SKan Liang 127468f7082fSPeter Zijlstra void intel_pmu_lbr_add(struct perf_event *event); 127527f6d22bSBorislav Petkov 127668f7082fSPeter Zijlstra void intel_pmu_lbr_del(struct perf_event *event); 127727f6d22bSBorislav Petkov 127827f6d22bSBorislav Petkov void intel_pmu_lbr_enable_all(bool pmi); 127927f6d22bSBorislav Petkov 128027f6d22bSBorislav Petkov void intel_pmu_lbr_disable_all(void); 128127f6d22bSBorislav Petkov 128227f6d22bSBorislav Petkov void intel_pmu_lbr_read(void); 128327f6d22bSBorislav Petkov 1284c301b1d8SKan Liang void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc); 1285c301b1d8SKan Liang 1286c301b1d8SKan Liang void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc); 1287c301b1d8SKan Liang 1288799571bfSKan Liang void intel_pmu_lbr_save(void *ctx); 1289799571bfSKan Liang 1290799571bfSKan Liang void intel_pmu_lbr_restore(void *ctx); 1291799571bfSKan Liang 129227f6d22bSBorislav Petkov void intel_pmu_lbr_init_core(void); 129327f6d22bSBorislav Petkov 129427f6d22bSBorislav Petkov void intel_pmu_lbr_init_nhm(void); 129527f6d22bSBorislav Petkov 129627f6d22bSBorislav Petkov void intel_pmu_lbr_init_atom(void); 129727f6d22bSBorislav Petkov 1298f21d5adcSKan Liang void intel_pmu_lbr_init_slm(void); 1299f21d5adcSKan Liang 130027f6d22bSBorislav Petkov void intel_pmu_lbr_init_snb(void); 130127f6d22bSBorislav Petkov 130227f6d22bSBorislav Petkov void intel_pmu_lbr_init_hsw(void); 130327f6d22bSBorislav Petkov 130427f6d22bSBorislav Petkov void intel_pmu_lbr_init_skl(void); 130527f6d22bSBorislav Petkov 130627f6d22bSBorislav Petkov void intel_pmu_lbr_init_knl(void); 130727f6d22bSBorislav Petkov 130847125db2SKan Liang void intel_pmu_arch_lbr_init(void); 130947125db2SKan Liang 1310e17dc653SAndi Kleen void intel_pmu_pebs_data_source_nhm(void); 1311e17dc653SAndi Kleen 13126ae5fa61SAndi Kleen void intel_pmu_pebs_data_source_skl(bool pmem); 13136ae5fa61SAndi Kleen 131427f6d22bSBorislav Petkov int intel_pmu_setup_lbr_filter(struct perf_event *event); 131527f6d22bSBorislav Petkov 131627f6d22bSBorislav Petkov void intel_pt_interrupt(void); 131727f6d22bSBorislav Petkov 131827f6d22bSBorislav Petkov int intel_bts_interrupt(void); 131927f6d22bSBorislav Petkov 132027f6d22bSBorislav Petkov void intel_bts_enable_local(void); 132127f6d22bSBorislav Petkov 132227f6d22bSBorislav Petkov void intel_bts_disable_local(void); 132327f6d22bSBorislav Petkov 132427f6d22bSBorislav Petkov int p4_pmu_init(void); 132527f6d22bSBorislav Petkov 132627f6d22bSBorislav Petkov int p6_pmu_init(void); 132727f6d22bSBorislav Petkov 132827f6d22bSBorislav Petkov int knc_pmu_init(void); 132927f6d22bSBorislav Petkov 133027f6d22bSBorislav Petkov static inline int is_ht_workaround_enabled(void) 133127f6d22bSBorislav Petkov { 133227f6d22bSBorislav Petkov return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); 133327f6d22bSBorislav Petkov } 133427f6d22bSBorislav Petkov 133527f6d22bSBorislav Petkov #else /* CONFIG_CPU_SUP_INTEL */ 133627f6d22bSBorislav Petkov 133727f6d22bSBorislav Petkov static inline void reserve_ds_buffers(void) 133827f6d22bSBorislav Petkov { 133927f6d22bSBorislav Petkov } 134027f6d22bSBorislav Petkov 134127f6d22bSBorislav Petkov static inline void release_ds_buffers(void) 134227f6d22bSBorislav Petkov { 134327f6d22bSBorislav Petkov } 134427f6d22bSBorislav Petkov 1345c085fb87SKan Liang static inline void release_lbr_buffers(void) 1346c085fb87SKan Liang { 1347c085fb87SKan Liang } 1348c085fb87SKan Liang 134927f6d22bSBorislav Petkov static inline int intel_pmu_init(void) 135027f6d22bSBorislav Petkov { 135127f6d22bSBorislav Petkov return 0; 135227f6d22bSBorislav Petkov } 135327f6d22bSBorislav Petkov 1354f764c58bSPeter Zijlstra static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) 135527f6d22bSBorislav Petkov { 1356d01b1f96SPeter Zijlstra (Intel) return 0; 1357d01b1f96SPeter Zijlstra (Intel) } 1358d01b1f96SPeter Zijlstra (Intel) 1359f764c58bSPeter Zijlstra static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc) 1360d01b1f96SPeter Zijlstra (Intel) { 136127f6d22bSBorislav Petkov } 136227f6d22bSBorislav Petkov 136327f6d22bSBorislav Petkov static inline int is_ht_workaround_enabled(void) 136427f6d22bSBorislav Petkov { 136527f6d22bSBorislav Petkov return 0; 136627f6d22bSBorislav Petkov } 136727f6d22bSBorislav Petkov #endif /* CONFIG_CPU_SUP_INTEL */ 13683a4ac121SCodyYao-oc 13693a4ac121SCodyYao-oc #if ((defined CONFIG_CPU_SUP_CENTAUR) || (defined CONFIG_CPU_SUP_ZHAOXIN)) 13703a4ac121SCodyYao-oc int zhaoxin_pmu_init(void); 13713a4ac121SCodyYao-oc #else 13723a4ac121SCodyYao-oc static inline int zhaoxin_pmu_init(void) 13733a4ac121SCodyYao-oc { 13743a4ac121SCodyYao-oc return 0; 13753a4ac121SCodyYao-oc } 13763a4ac121SCodyYao-oc #endif /*CONFIG_CPU_SUP_CENTAUR or CONFIG_CPU_SUP_ZHAOXIN*/ 1377