xref: /linux/arch/x86/events/perf_event.h (revision af6cf129706b2f79e12f97e62d977e7f653cdfd1)
127f6d22bSBorislav Petkov /*
227f6d22bSBorislav Petkov  * Performance events x86 architecture header
327f6d22bSBorislav Petkov  *
427f6d22bSBorislav Petkov  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
527f6d22bSBorislav Petkov  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
627f6d22bSBorislav Petkov  *  Copyright (C) 2009 Jaswinder Singh Rajput
727f6d22bSBorislav Petkov  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
827f6d22bSBorislav Petkov  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
927f6d22bSBorislav Petkov  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
1027f6d22bSBorislav Petkov  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
1127f6d22bSBorislav Petkov  *
1227f6d22bSBorislav Petkov  *  For licencing details see kernel-base/COPYING
1327f6d22bSBorislav Petkov  */
1427f6d22bSBorislav Petkov 
1527f6d22bSBorislav Petkov #include <linux/perf_event.h>
1627f6d22bSBorislav Petkov 
1710043e02SThomas Gleixner #include <asm/intel_ds.h>
1810043e02SThomas Gleixner 
1927f6d22bSBorislav Petkov /* To enable MSR tracing please use the generic trace points. */
2027f6d22bSBorislav Petkov 
2127f6d22bSBorislav Petkov /*
2227f6d22bSBorislav Petkov  *          |   NHM/WSM    |      SNB     |
2327f6d22bSBorislav Petkov  * register -------------------------------
2427f6d22bSBorislav Petkov  *          |  HT  | no HT |  HT  | no HT |
2527f6d22bSBorislav Petkov  *-----------------------------------------
2627f6d22bSBorislav Petkov  * offcore  | core | core  | cpu  | core  |
2727f6d22bSBorislav Petkov  * lbr_sel  | core | core  | cpu  | core  |
2827f6d22bSBorislav Petkov  * ld_lat   | cpu  | core  | cpu  | core  |
2927f6d22bSBorislav Petkov  *-----------------------------------------
3027f6d22bSBorislav Petkov  *
3127f6d22bSBorislav Petkov  * Given that there is a small number of shared regs,
3227f6d22bSBorislav Petkov  * we can pre-allocate their slot in the per-cpu
3327f6d22bSBorislav Petkov  * per-core reg tables.
3427f6d22bSBorislav Petkov  */
3527f6d22bSBorislav Petkov enum extra_reg_type {
3627f6d22bSBorislav Petkov 	EXTRA_REG_NONE  = -1,	/* not used */
3727f6d22bSBorislav Petkov 
3827f6d22bSBorislav Petkov 	EXTRA_REG_RSP_0 = 0,	/* offcore_response_0 */
3927f6d22bSBorislav Petkov 	EXTRA_REG_RSP_1 = 1,	/* offcore_response_1 */
4027f6d22bSBorislav Petkov 	EXTRA_REG_LBR   = 2,	/* lbr_select */
4127f6d22bSBorislav Petkov 	EXTRA_REG_LDLAT = 3,	/* ld_lat_threshold */
4227f6d22bSBorislav Petkov 	EXTRA_REG_FE    = 4,    /* fe_* */
4327f6d22bSBorislav Petkov 
4427f6d22bSBorislav Petkov 	EXTRA_REG_MAX		/* number of entries needed */
4527f6d22bSBorislav Petkov };
4627f6d22bSBorislav Petkov 
4727f6d22bSBorislav Petkov struct event_constraint {
4827f6d22bSBorislav Petkov 	union {
4927f6d22bSBorislav Petkov 		unsigned long	idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
5027f6d22bSBorislav Petkov 		u64		idxmsk64;
5127f6d22bSBorislav Petkov 	};
5227f6d22bSBorislav Petkov 	u64		code;
5327f6d22bSBorislav Petkov 	u64		cmask;
5427f6d22bSBorislav Petkov 	int		weight;
5527f6d22bSBorislav Petkov 	int		overlap;
5627f6d22bSBorislav Petkov 	int		flags;
5763b79f6eSPeter Zijlstra 	unsigned int	size;
5827f6d22bSBorislav Petkov };
591f6a1e2dSPeter Zijlstra 
6063b79f6eSPeter Zijlstra static inline bool constraint_match(struct event_constraint *c, u64 ecode)
6163b79f6eSPeter Zijlstra {
6263b79f6eSPeter Zijlstra 	return ((ecode & c->cmask) - c->code) <= (u64)c->size;
6363b79f6eSPeter Zijlstra }
6463b79f6eSPeter Zijlstra 
6527f6d22bSBorislav Petkov /*
6627f6d22bSBorislav Petkov  * struct hw_perf_event.flags flags
6727f6d22bSBorislav Petkov  */
6827f6d22bSBorislav Petkov #define PERF_X86_EVENT_PEBS_LDLAT	0x0001 /* ld+ldlat data address sampling */
6927f6d22bSBorislav Petkov #define PERF_X86_EVENT_PEBS_ST		0x0002 /* st data address sampling */
7027f6d22bSBorislav Petkov #define PERF_X86_EVENT_PEBS_ST_HSW	0x0004 /* haswell style datala, store */
711f6a1e2dSPeter Zijlstra #define PERF_X86_EVENT_PEBS_LD_HSW	0x0008 /* haswell style datala, load */
721f6a1e2dSPeter Zijlstra #define PERF_X86_EVENT_PEBS_NA_HSW	0x0010 /* haswell style datala, unknown */
731f6a1e2dSPeter Zijlstra #define PERF_X86_EVENT_EXCL		0x0020 /* HT exclusivity on counter */
741f6a1e2dSPeter Zijlstra #define PERF_X86_EVENT_DYNAMIC		0x0040 /* dynamic alloc'd constraint */
751f6a1e2dSPeter Zijlstra #define PERF_X86_EVENT_RDPMC_ALLOWED	0x0080 /* grant rdpmc permission */
761f6a1e2dSPeter Zijlstra #define PERF_X86_EVENT_EXCL_ACCT	0x0100 /* accounted EXCL event */
771f6a1e2dSPeter Zijlstra #define PERF_X86_EVENT_AUTO_RELOAD	0x0200 /* use PEBS auto-reload */
781f6a1e2dSPeter Zijlstra #define PERF_X86_EVENT_LARGE_PEBS	0x0400 /* use large PEBS */
7942880f72SAlexander Shishkin #define PERF_X86_EVENT_PEBS_VIA_PT	0x0800 /* use PT buffer for PEBS */
80471af006SKim Phillips #define PERF_X86_EVENT_PAIR		0x1000 /* Large Increment per Cycle */
81e1ad1ac2SLike Xu #define PERF_X86_EVENT_LBR_SELECT	0x2000 /* Save/Restore MSR_LBR_SELECT */
8227f6d22bSBorislav Petkov 
8327f6d22bSBorislav Petkov struct amd_nb {
8427f6d22bSBorislav Petkov 	int nb_id;  /* NorthBridge id */
8527f6d22bSBorislav Petkov 	int refcnt; /* reference count */
8627f6d22bSBorislav Petkov 	struct perf_event *owners[X86_PMC_IDX_MAX];
8727f6d22bSBorislav Petkov 	struct event_constraint event_constraints[X86_PMC_IDX_MAX];
8827f6d22bSBorislav Petkov };
8927f6d22bSBorislav Petkov 
90fd583ad1SKan Liang #define PEBS_COUNTER_MASK	((1ULL << MAX_PEBS_EVENTS) - 1)
9142880f72SAlexander Shishkin #define PEBS_PMI_AFTER_EACH_RECORD BIT_ULL(60)
9242880f72SAlexander Shishkin #define PEBS_OUTPUT_OFFSET	61
9342880f72SAlexander Shishkin #define PEBS_OUTPUT_MASK	(3ull << PEBS_OUTPUT_OFFSET)
9442880f72SAlexander Shishkin #define PEBS_OUTPUT_PT		(1ull << PEBS_OUTPUT_OFFSET)
9542880f72SAlexander Shishkin #define PEBS_VIA_PT_MASK	(PEBS_OUTPUT_PT | PEBS_PMI_AFTER_EACH_RECORD)
9627f6d22bSBorislav Petkov 
9727f6d22bSBorislav Petkov /*
9827f6d22bSBorislav Petkov  * Flags PEBS can handle without an PMI.
9927f6d22bSBorislav Petkov  *
10027f6d22bSBorislav Petkov  * TID can only be handled by flushing at context switch.
1012fe1bc1fSAndi Kleen  * REGS_USER can be handled for events limited to ring 3.
10227f6d22bSBorislav Petkov  *
10327f6d22bSBorislav Petkov  */
104174afc3eSKan Liang #define LARGE_PEBS_FLAGS \
10527f6d22bSBorislav Petkov 	(PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
10627f6d22bSBorislav Petkov 	PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
10727f6d22bSBorislav Petkov 	PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
1082fe1bc1fSAndi Kleen 	PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
10911974914SJiri Olsa 	PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
11011974914SJiri Olsa 	PERF_SAMPLE_PERIOD)
11127f6d22bSBorislav Petkov 
1129d5dcc93SKan Liang #define PEBS_GP_REGS			\
1139d5dcc93SKan Liang 	((1ULL << PERF_REG_X86_AX)    | \
1149d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_BX)    | \
1159d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_CX)    | \
1169d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_DX)    | \
1179d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_DI)    | \
1189d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_SI)    | \
1199d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_SP)    | \
1209d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_BP)    | \
1219d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_IP)    | \
1229d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_FLAGS) | \
1239d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_R8)    | \
1249d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_R9)    | \
1259d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_R10)   | \
1269d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_R11)   | \
1279d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_R12)   | \
1289d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_R13)   | \
1299d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_R14)   | \
1309d5dcc93SKan Liang 	 (1ULL << PERF_REG_X86_R15))
1312fe1bc1fSAndi Kleen 
13227f6d22bSBorislav Petkov /*
13327f6d22bSBorislav Petkov  * Per register state.
13427f6d22bSBorislav Petkov  */
13527f6d22bSBorislav Petkov struct er_account {
13627f6d22bSBorislav Petkov 	raw_spinlock_t      lock;	/* per-core: protect structure */
13727f6d22bSBorislav Petkov 	u64                 config;	/* extra MSR config */
13827f6d22bSBorislav Petkov 	u64                 reg;	/* extra MSR number */
13927f6d22bSBorislav Petkov 	atomic_t            ref;	/* reference count */
14027f6d22bSBorislav Petkov };
14127f6d22bSBorislav Petkov 
14227f6d22bSBorislav Petkov /*
14327f6d22bSBorislav Petkov  * Per core/cpu state
14427f6d22bSBorislav Petkov  *
14527f6d22bSBorislav Petkov  * Used to coordinate shared registers between HT threads or
14627f6d22bSBorislav Petkov  * among events on a single PMU.
14727f6d22bSBorislav Petkov  */
14827f6d22bSBorislav Petkov struct intel_shared_regs {
14927f6d22bSBorislav Petkov 	struct er_account       regs[EXTRA_REG_MAX];
15027f6d22bSBorislav Petkov 	int                     refcnt;		/* per-core: #HT threads */
15127f6d22bSBorislav Petkov 	unsigned                core_id;	/* per-core: core id */
15227f6d22bSBorislav Petkov };
15327f6d22bSBorislav Petkov 
15427f6d22bSBorislav Petkov enum intel_excl_state_type {
15527f6d22bSBorislav Petkov 	INTEL_EXCL_UNUSED    = 0, /* counter is unused */
15627f6d22bSBorislav Petkov 	INTEL_EXCL_SHARED    = 1, /* counter can be used by both threads */
15727f6d22bSBorislav Petkov 	INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
15827f6d22bSBorislav Petkov };
15927f6d22bSBorislav Petkov 
16027f6d22bSBorislav Petkov struct intel_excl_states {
16127f6d22bSBorislav Petkov 	enum intel_excl_state_type state[X86_PMC_IDX_MAX];
16227f6d22bSBorislav Petkov 	bool sched_started; /* true if scheduling has started */
16327f6d22bSBorislav Petkov };
16427f6d22bSBorislav Petkov 
16527f6d22bSBorislav Petkov struct intel_excl_cntrs {
16627f6d22bSBorislav Petkov 	raw_spinlock_t	lock;
16727f6d22bSBorislav Petkov 
16827f6d22bSBorislav Petkov 	struct intel_excl_states states[2];
16927f6d22bSBorislav Petkov 
17027f6d22bSBorislav Petkov 	union {
17127f6d22bSBorislav Petkov 		u16	has_exclusive[2];
17227f6d22bSBorislav Petkov 		u32	exclusive_present;
17327f6d22bSBorislav Petkov 	};
17427f6d22bSBorislav Petkov 
17527f6d22bSBorislav Petkov 	int		refcnt;		/* per-core: #HT threads */
17627f6d22bSBorislav Petkov 	unsigned	core_id;	/* per-core: core id */
17727f6d22bSBorislav Petkov };
17827f6d22bSBorislav Petkov 
1798b077e4aSKan Liang struct x86_perf_task_context;
18027f6d22bSBorislav Petkov #define MAX_LBR_ENTRIES		32
18127f6d22bSBorislav Petkov 
18227f6d22bSBorislav Petkov enum {
1839f354a72SKan Liang 	LBR_FORMAT_32		= 0x00,
1849f354a72SKan Liang 	LBR_FORMAT_LIP		= 0x01,
1859f354a72SKan Liang 	LBR_FORMAT_EIP		= 0x02,
1869f354a72SKan Liang 	LBR_FORMAT_EIP_FLAGS	= 0x03,
1879f354a72SKan Liang 	LBR_FORMAT_EIP_FLAGS2	= 0x04,
1889f354a72SKan Liang 	LBR_FORMAT_INFO		= 0x05,
1899f354a72SKan Liang 	LBR_FORMAT_TIME		= 0x06,
1909f354a72SKan Liang 	LBR_FORMAT_MAX_KNOWN    = LBR_FORMAT_TIME,
1919f354a72SKan Liang };
1929f354a72SKan Liang 
1939f354a72SKan Liang enum {
19427f6d22bSBorislav Petkov 	X86_PERF_KFREE_SHARED = 0,
19527f6d22bSBorislav Petkov 	X86_PERF_KFREE_EXCL   = 1,
19627f6d22bSBorislav Petkov 	X86_PERF_KFREE_MAX
19727f6d22bSBorislav Petkov };
19827f6d22bSBorislav Petkov 
19927f6d22bSBorislav Petkov struct cpu_hw_events {
20027f6d22bSBorislav Petkov 	/*
20127f6d22bSBorislav Petkov 	 * Generic x86 PMC bits
20227f6d22bSBorislav Petkov 	 */
20327f6d22bSBorislav Petkov 	struct perf_event	*events[X86_PMC_IDX_MAX]; /* in counter order */
20427f6d22bSBorislav Petkov 	unsigned long		active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
20527f6d22bSBorislav Petkov 	unsigned long		running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
20627f6d22bSBorislav Petkov 	int			enabled;
20727f6d22bSBorislav Petkov 
20827f6d22bSBorislav Petkov 	int			n_events; /* the # of events in the below arrays */
20927f6d22bSBorislav Petkov 	int			n_added;  /* the # last events in the below arrays;
21027f6d22bSBorislav Petkov 					     they've never been enabled yet */
21127f6d22bSBorislav Petkov 	int			n_txn;    /* the # last events in the below arrays;
21227f6d22bSBorislav Petkov 					     added in the current transaction */
21327f6d22bSBorislav Petkov 	int			assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
21427f6d22bSBorislav Petkov 	u64			tags[X86_PMC_IDX_MAX];
21527f6d22bSBorislav Petkov 
21627f6d22bSBorislav Petkov 	struct perf_event	*event_list[X86_PMC_IDX_MAX]; /* in enabled order */
21727f6d22bSBorislav Petkov 	struct event_constraint	*event_constraint[X86_PMC_IDX_MAX];
21827f6d22bSBorislav Petkov 
21927f6d22bSBorislav Petkov 	int			n_excl; /* the number of exclusive events */
22027f6d22bSBorislav Petkov 
22127f6d22bSBorislav Petkov 	unsigned int		txn_flags;
22227f6d22bSBorislav Petkov 	int			is_fake;
22327f6d22bSBorislav Petkov 
22427f6d22bSBorislav Petkov 	/*
22527f6d22bSBorislav Petkov 	 * Intel DebugStore bits
22627f6d22bSBorislav Petkov 	 */
22727f6d22bSBorislav Petkov 	struct debug_store	*ds;
228c1961a46SHugh Dickins 	void			*ds_pebs_vaddr;
229c1961a46SHugh Dickins 	void			*ds_bts_vaddr;
23027f6d22bSBorislav Petkov 	u64			pebs_enabled;
23109e61b4fSPeter Zijlstra 	int			n_pebs;
23209e61b4fSPeter Zijlstra 	int			n_large_pebs;
23342880f72SAlexander Shishkin 	int			n_pebs_via_pt;
23442880f72SAlexander Shishkin 	int			pebs_output;
23527f6d22bSBorislav Petkov 
236c22497f5SKan Liang 	/* Current super set of events hardware configuration */
237c22497f5SKan Liang 	u64			pebs_data_cfg;
238c22497f5SKan Liang 	u64			active_pebs_data_cfg;
239c22497f5SKan Liang 	int			pebs_record_size;
240c22497f5SKan Liang 
24127f6d22bSBorislav Petkov 	/*
24227f6d22bSBorislav Petkov 	 * Intel LBR bits
24327f6d22bSBorislav Petkov 	 */
24427f6d22bSBorislav Petkov 	int				lbr_users;
245d3617b98SAndi Kleen 	int				lbr_pebs_users;
24627f6d22bSBorislav Petkov 	struct perf_branch_stack	lbr_stack;
24727f6d22bSBorislav Petkov 	struct perf_branch_entry	lbr_entries[MAX_LBR_ENTRIES];
24827f6d22bSBorislav Petkov 	struct er_account		*lbr_sel;
24927f6d22bSBorislav Petkov 	u64				br_sel;
250f42be865SKan Liang 	void				*last_task_ctx;
2518b077e4aSKan Liang 	int				last_log_id;
252e1ad1ac2SLike Xu 	int				lbr_select;
25327f6d22bSBorislav Petkov 
25427f6d22bSBorislav Petkov 	/*
25527f6d22bSBorislav Petkov 	 * Intel host/guest exclude bits
25627f6d22bSBorislav Petkov 	 */
25727f6d22bSBorislav Petkov 	u64				intel_ctrl_guest_mask;
25827f6d22bSBorislav Petkov 	u64				intel_ctrl_host_mask;
25927f6d22bSBorislav Petkov 	struct perf_guest_switch_msr	guest_switch_msrs[X86_PMC_IDX_MAX];
26027f6d22bSBorislav Petkov 
26127f6d22bSBorislav Petkov 	/*
26227f6d22bSBorislav Petkov 	 * Intel checkpoint mask
26327f6d22bSBorislav Petkov 	 */
26427f6d22bSBorislav Petkov 	u64				intel_cp_status;
26527f6d22bSBorislav Petkov 
26627f6d22bSBorislav Petkov 	/*
26727f6d22bSBorislav Petkov 	 * manage shared (per-core, per-cpu) registers
26827f6d22bSBorislav Petkov 	 * used on Intel NHM/WSM/SNB
26927f6d22bSBorislav Petkov 	 */
27027f6d22bSBorislav Petkov 	struct intel_shared_regs	*shared_regs;
27127f6d22bSBorislav Petkov 	/*
27227f6d22bSBorislav Petkov 	 * manage exclusive counter access between hyperthread
27327f6d22bSBorislav Petkov 	 */
27427f6d22bSBorislav Petkov 	struct event_constraint *constraint_list; /* in enable order */
27527f6d22bSBorislav Petkov 	struct intel_excl_cntrs		*excl_cntrs;
27627f6d22bSBorislav Petkov 	int excl_thread_id; /* 0 or 1 */
27727f6d22bSBorislav Petkov 
27827f6d22bSBorislav Petkov 	/*
279400816f6SPeter Zijlstra (Intel) 	 * SKL TSX_FORCE_ABORT shadow
280400816f6SPeter Zijlstra (Intel) 	 */
281400816f6SPeter Zijlstra (Intel) 	u64				tfa_shadow;
282400816f6SPeter Zijlstra (Intel) 
283400816f6SPeter Zijlstra (Intel) 	/*
28427f6d22bSBorislav Petkov 	 * AMD specific bits
28527f6d22bSBorislav Petkov 	 */
28627f6d22bSBorislav Petkov 	struct amd_nb			*amd_nb;
28727f6d22bSBorislav Petkov 	/* Inverted mask of bits to clear in the perf_ctr ctrl registers */
28827f6d22bSBorislav Petkov 	u64				perf_ctr_virt_mask;
28957388912SKim Phillips 	int				n_pair; /* Large increment events */
29027f6d22bSBorislav Petkov 
29127f6d22bSBorislav Petkov 	void				*kfree_on_online[X86_PERF_KFREE_MAX];
29227f6d22bSBorislav Petkov };
29327f6d22bSBorislav Petkov 
29463b79f6eSPeter Zijlstra #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) {	\
29527f6d22bSBorislav Petkov 	{ .idxmsk64 = (n) },		\
29627f6d22bSBorislav Petkov 	.code = (c),			\
29763b79f6eSPeter Zijlstra 	.size = (e) - (c),		\
29827f6d22bSBorislav Petkov 	.cmask = (m),			\
29927f6d22bSBorislav Petkov 	.weight = (w),			\
30027f6d22bSBorislav Petkov 	.overlap = (o),			\
30127f6d22bSBorislav Petkov 	.flags = f,			\
30227f6d22bSBorislav Petkov }
30327f6d22bSBorislav Petkov 
30463b79f6eSPeter Zijlstra #define __EVENT_CONSTRAINT(c, n, m, w, o, f) \
30563b79f6eSPeter Zijlstra 	__EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f)
30663b79f6eSPeter Zijlstra 
30727f6d22bSBorislav Petkov #define EVENT_CONSTRAINT(c, n, m)	\
30827f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
30927f6d22bSBorislav Petkov 
31063b79f6eSPeter Zijlstra /*
31163b79f6eSPeter Zijlstra  * The constraint_match() function only works for 'simple' event codes
31263b79f6eSPeter Zijlstra  * and not for extended (AMD64_EVENTSEL_EVENT) events codes.
31363b79f6eSPeter Zijlstra  */
31463b79f6eSPeter Zijlstra #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
31563b79f6eSPeter Zijlstra 	__EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
31663b79f6eSPeter Zijlstra 
31727f6d22bSBorislav Petkov #define INTEL_EXCLEVT_CONSTRAINT(c, n)	\
31827f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
31927f6d22bSBorislav Petkov 			   0, PERF_X86_EVENT_EXCL)
32027f6d22bSBorislav Petkov 
32127f6d22bSBorislav Petkov /*
32227f6d22bSBorislav Petkov  * The overlap flag marks event constraints with overlapping counter
32327f6d22bSBorislav Petkov  * masks. This is the case if the counter mask of such an event is not
32427f6d22bSBorislav Petkov  * a subset of any other counter mask of a constraint with an equal or
32527f6d22bSBorislav Petkov  * higher weight, e.g.:
32627f6d22bSBorislav Petkov  *
32727f6d22bSBorislav Petkov  *  c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
32827f6d22bSBorislav Petkov  *  c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
32927f6d22bSBorislav Petkov  *  c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
33027f6d22bSBorislav Petkov  *
33127f6d22bSBorislav Petkov  * The event scheduler may not select the correct counter in the first
33227f6d22bSBorislav Petkov  * cycle because it needs to know which subsequent events will be
33327f6d22bSBorislav Petkov  * scheduled. It may fail to schedule the events then. So we set the
33427f6d22bSBorislav Petkov  * overlap flag for such constraints to give the scheduler a hint which
33527f6d22bSBorislav Petkov  * events to select for counter rescheduling.
33627f6d22bSBorislav Petkov  *
33727f6d22bSBorislav Petkov  * Care must be taken as the rescheduling algorithm is O(n!) which
33800f52685SIngo Molnar  * will increase scheduling cycles for an over-committed system
33927f6d22bSBorislav Petkov  * dramatically.  The number of such EVENT_CONSTRAINT_OVERLAP() macros
34027f6d22bSBorislav Petkov  * and its counter masks must be kept at a minimum.
34127f6d22bSBorislav Petkov  */
34227f6d22bSBorislav Petkov #define EVENT_CONSTRAINT_OVERLAP(c, n, m)	\
34327f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
34427f6d22bSBorislav Petkov 
34527f6d22bSBorislav Petkov /*
34627f6d22bSBorislav Petkov  * Constraint on the Event code.
34727f6d22bSBorislav Petkov  */
34827f6d22bSBorislav Petkov #define INTEL_EVENT_CONSTRAINT(c, n)	\
34927f6d22bSBorislav Petkov 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
35027f6d22bSBorislav Petkov 
35127f6d22bSBorislav Petkov /*
35263b79f6eSPeter Zijlstra  * Constraint on a range of Event codes
35363b79f6eSPeter Zijlstra  */
35463b79f6eSPeter Zijlstra #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n)			\
35563b79f6eSPeter Zijlstra 	EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
35663b79f6eSPeter Zijlstra 
35763b79f6eSPeter Zijlstra /*
35827f6d22bSBorislav Petkov  * Constraint on the Event code + UMask + fixed-mask
35927f6d22bSBorislav Petkov  *
36027f6d22bSBorislav Petkov  * filter mask to validate fixed counter events.
36127f6d22bSBorislav Petkov  * the following filters disqualify for fixed counters:
36227f6d22bSBorislav Petkov  *  - inv
36327f6d22bSBorislav Petkov  *  - edge
36427f6d22bSBorislav Petkov  *  - cnt-mask
36527f6d22bSBorislav Petkov  *  - in_tx
36627f6d22bSBorislav Petkov  *  - in_tx_checkpointed
36727f6d22bSBorislav Petkov  *  The other filters are supported by fixed counters.
36827f6d22bSBorislav Petkov  *  The any-thread option is supported starting with v3.
36927f6d22bSBorislav Petkov  */
37027f6d22bSBorislav Petkov #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
37127f6d22bSBorislav Petkov #define FIXED_EVENT_CONSTRAINT(c, n)	\
37227f6d22bSBorislav Petkov 	EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
37327f6d22bSBorislav Petkov 
37427f6d22bSBorislav Petkov /*
37527f6d22bSBorislav Petkov  * Constraint on the Event code + UMask
37627f6d22bSBorislav Petkov  */
37727f6d22bSBorislav Petkov #define INTEL_UEVENT_CONSTRAINT(c, n)	\
37827f6d22bSBorislav Petkov 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
37927f6d22bSBorislav Petkov 
38027f6d22bSBorislav Petkov /* Constraint on specific umask bit only + event */
38127f6d22bSBorislav Petkov #define INTEL_UBIT_EVENT_CONSTRAINT(c, n)	\
38227f6d22bSBorislav Petkov 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
38327f6d22bSBorislav Petkov 
38427f6d22bSBorislav Petkov /* Like UEVENT_CONSTRAINT, but match flags too */
38527f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n)	\
38627f6d22bSBorislav Petkov 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
38727f6d22bSBorislav Petkov 
38827f6d22bSBorislav Petkov #define INTEL_EXCLUEVT_CONSTRAINT(c, n)	\
38927f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
39027f6d22bSBorislav Petkov 			   HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
39127f6d22bSBorislav Petkov 
39227f6d22bSBorislav Petkov #define INTEL_PLD_CONSTRAINT(c, n)	\
39327f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
39427f6d22bSBorislav Petkov 			   HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
39527f6d22bSBorislav Petkov 
39627f6d22bSBorislav Petkov #define INTEL_PST_CONSTRAINT(c, n)	\
39727f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
39827f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
39927f6d22bSBorislav Petkov 
40027f6d22bSBorislav Petkov /* Event constraint, but match on all event flags too. */
40127f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
4026b89d4c1SStephane Eranian 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
40327f6d22bSBorislav Petkov 
40463b79f6eSPeter Zijlstra #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n)			\
4056b89d4c1SStephane Eranian 	EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
40663b79f6eSPeter Zijlstra 
40727f6d22bSBorislav Petkov /* Check only flags, but allow all event/umask */
40827f6d22bSBorislav Petkov #define INTEL_ALL_EVENT_CONSTRAINT(code, n)	\
40927f6d22bSBorislav Petkov 	EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
41027f6d22bSBorislav Petkov 
41127f6d22bSBorislav Petkov /* Check flags and event code, and set the HSW store flag */
41227f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
41327f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(code, n, 			\
41427f6d22bSBorislav Petkov 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
41527f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
41627f6d22bSBorislav Petkov 
41727f6d22bSBorislav Petkov /* Check flags and event code, and set the HSW load flag */
41827f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
41927f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(code, n,			\
42027f6d22bSBorislav Petkov 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
42127f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
42227f6d22bSBorislav Petkov 
42363b79f6eSPeter Zijlstra #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \
42463b79f6eSPeter Zijlstra 	__EVENT_CONSTRAINT_RANGE(code, end, n,				\
42563b79f6eSPeter Zijlstra 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
42663b79f6eSPeter Zijlstra 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
42763b79f6eSPeter Zijlstra 
42827f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
42927f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(code, n,			\
43027f6d22bSBorislav Petkov 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
43127f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, \
43227f6d22bSBorislav Petkov 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
43327f6d22bSBorislav Petkov 
43427f6d22bSBorislav Petkov /* Check flags and event code/umask, and set the HSW store flag */
43527f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
43627f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(code, n, 			\
43727f6d22bSBorislav Petkov 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
43827f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
43927f6d22bSBorislav Petkov 
44027f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
44127f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(code, n,			\
44227f6d22bSBorislav Petkov 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
44327f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, \
44427f6d22bSBorislav Petkov 			  PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
44527f6d22bSBorislav Petkov 
44627f6d22bSBorislav Petkov /* Check flags and event code/umask, and set the HSW load flag */
44727f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
44827f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(code, n, 			\
44927f6d22bSBorislav Petkov 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
45027f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
45127f6d22bSBorislav Petkov 
45227f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
45327f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(code, n,			\
45427f6d22bSBorislav Petkov 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
45527f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, \
45627f6d22bSBorislav Petkov 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
45727f6d22bSBorislav Petkov 
45827f6d22bSBorislav Petkov /* Check flags and event code/umask, and set the HSW N/A flag */
45927f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
46027f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(code, n, 			\
46127f6d22bSBorislav Petkov 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
46227f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
46327f6d22bSBorislav Petkov 
46427f6d22bSBorislav Petkov 
46527f6d22bSBorislav Petkov /*
46627f6d22bSBorislav Petkov  * We define the end marker as having a weight of -1
46727f6d22bSBorislav Petkov  * to enable blacklisting of events using a counter bitmask
46827f6d22bSBorislav Petkov  * of zero and thus a weight of zero.
46927f6d22bSBorislav Petkov  * The end marker has a weight that cannot possibly be
47027f6d22bSBorislav Petkov  * obtained from counting the bits in the bitmask.
47127f6d22bSBorislav Petkov  */
47227f6d22bSBorislav Petkov #define EVENT_CONSTRAINT_END { .weight = -1 }
47327f6d22bSBorislav Petkov 
47427f6d22bSBorislav Petkov /*
47527f6d22bSBorislav Petkov  * Check for end marker with weight == -1
47627f6d22bSBorislav Petkov  */
47727f6d22bSBorislav Petkov #define for_each_event_constraint(e, c)	\
47827f6d22bSBorislav Petkov 	for ((e) = (c); (e)->weight != -1; (e)++)
47927f6d22bSBorislav Petkov 
48027f6d22bSBorislav Petkov /*
48127f6d22bSBorislav Petkov  * Extra registers for specific events.
48227f6d22bSBorislav Petkov  *
48327f6d22bSBorislav Petkov  * Some events need large masks and require external MSRs.
48427f6d22bSBorislav Petkov  * Those extra MSRs end up being shared for all events on
48527f6d22bSBorislav Petkov  * a PMU and sometimes between PMU of sibling HT threads.
48627f6d22bSBorislav Petkov  * In either case, the kernel needs to handle conflicting
48727f6d22bSBorislav Petkov  * accesses to those extra, shared, regs. The data structure
48827f6d22bSBorislav Petkov  * to manage those registers is stored in cpu_hw_event.
48927f6d22bSBorislav Petkov  */
49027f6d22bSBorislav Petkov struct extra_reg {
49127f6d22bSBorislav Petkov 	unsigned int		event;
49227f6d22bSBorislav Petkov 	unsigned int		msr;
49327f6d22bSBorislav Petkov 	u64			config_mask;
49427f6d22bSBorislav Petkov 	u64			valid_mask;
49527f6d22bSBorislav Petkov 	int			idx;  /* per_xxx->regs[] reg index */
49627f6d22bSBorislav Petkov 	bool			extra_msr_access;
49727f6d22bSBorislav Petkov };
49827f6d22bSBorislav Petkov 
49927f6d22bSBorislav Petkov #define EVENT_EXTRA_REG(e, ms, m, vm, i) {	\
50027f6d22bSBorislav Petkov 	.event = (e),			\
50127f6d22bSBorislav Petkov 	.msr = (ms),			\
50227f6d22bSBorislav Petkov 	.config_mask = (m),		\
50327f6d22bSBorislav Petkov 	.valid_mask = (vm),		\
50427f6d22bSBorislav Petkov 	.idx = EXTRA_REG_##i,		\
50527f6d22bSBorislav Petkov 	.extra_msr_access = true,	\
50627f6d22bSBorislav Petkov 	}
50727f6d22bSBorislav Petkov 
50827f6d22bSBorislav Petkov #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)	\
50927f6d22bSBorislav Petkov 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
51027f6d22bSBorislav Petkov 
51127f6d22bSBorislav Petkov #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
51227f6d22bSBorislav Petkov 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
51327f6d22bSBorislav Petkov 			ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
51427f6d22bSBorislav Petkov 
51527f6d22bSBorislav Petkov #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
51627f6d22bSBorislav Petkov 	INTEL_UEVENT_EXTRA_REG(c, \
51727f6d22bSBorislav Petkov 			       MSR_PEBS_LD_LAT_THRESHOLD, \
51827f6d22bSBorislav Petkov 			       0xffff, \
51927f6d22bSBorislav Petkov 			       LDLAT)
52027f6d22bSBorislav Petkov 
52127f6d22bSBorislav Petkov #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
52227f6d22bSBorislav Petkov 
52327f6d22bSBorislav Petkov union perf_capabilities {
52427f6d22bSBorislav Petkov 	struct {
52527f6d22bSBorislav Petkov 		u64	lbr_format:6;
52627f6d22bSBorislav Petkov 		u64	pebs_trap:1;
52727f6d22bSBorislav Petkov 		u64	pebs_arch_reg:1;
52827f6d22bSBorislav Petkov 		u64	pebs_format:4;
52927f6d22bSBorislav Petkov 		u64	smm_freeze:1;
53027f6d22bSBorislav Petkov 		/*
53127f6d22bSBorislav Petkov 		 * PMU supports separate counter range for writing
53227f6d22bSBorislav Petkov 		 * values > 32bit.
53327f6d22bSBorislav Petkov 		 */
53427f6d22bSBorislav Petkov 		u64	full_width_write:1;
535c22497f5SKan Liang 		u64     pebs_baseline:1;
53642880f72SAlexander Shishkin 		u64	pebs_metrics_available:1;
53742880f72SAlexander Shishkin 		u64	pebs_output_pt_available:1;
53827f6d22bSBorislav Petkov 	};
53927f6d22bSBorislav Petkov 	u64	capabilities;
54027f6d22bSBorislav Petkov };
54127f6d22bSBorislav Petkov 
54227f6d22bSBorislav Petkov struct x86_pmu_quirk {
54327f6d22bSBorislav Petkov 	struct x86_pmu_quirk *next;
54427f6d22bSBorislav Petkov 	void (*func)(void);
54527f6d22bSBorislav Petkov };
54627f6d22bSBorislav Petkov 
54727f6d22bSBorislav Petkov union x86_pmu_config {
54827f6d22bSBorislav Petkov 	struct {
54927f6d22bSBorislav Petkov 		u64 event:8,
55027f6d22bSBorislav Petkov 		    umask:8,
55127f6d22bSBorislav Petkov 		    usr:1,
55227f6d22bSBorislav Petkov 		    os:1,
55327f6d22bSBorislav Petkov 		    edge:1,
55427f6d22bSBorislav Petkov 		    pc:1,
55527f6d22bSBorislav Petkov 		    interrupt:1,
55627f6d22bSBorislav Petkov 		    __reserved1:1,
55727f6d22bSBorislav Petkov 		    en:1,
55827f6d22bSBorislav Petkov 		    inv:1,
55927f6d22bSBorislav Petkov 		    cmask:8,
56027f6d22bSBorislav Petkov 		    event2:4,
56127f6d22bSBorislav Petkov 		    __reserved2:4,
56227f6d22bSBorislav Petkov 		    go:1,
56327f6d22bSBorislav Petkov 		    ho:1;
56427f6d22bSBorislav Petkov 	} bits;
56527f6d22bSBorislav Petkov 	u64 value;
56627f6d22bSBorislav Petkov };
56727f6d22bSBorislav Petkov 
56827f6d22bSBorislav Petkov #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
56927f6d22bSBorislav Petkov 
57027f6d22bSBorislav Petkov enum {
57127f6d22bSBorislav Petkov 	x86_lbr_exclusive_lbr,
57227f6d22bSBorislav Petkov 	x86_lbr_exclusive_bts,
57327f6d22bSBorislav Petkov 	x86_lbr_exclusive_pt,
57427f6d22bSBorislav Petkov 	x86_lbr_exclusive_max,
57527f6d22bSBorislav Petkov };
57627f6d22bSBorislav Petkov 
57727f6d22bSBorislav Petkov /*
57827f6d22bSBorislav Petkov  * struct x86_pmu - generic x86 pmu
57927f6d22bSBorislav Petkov  */
58027f6d22bSBorislav Petkov struct x86_pmu {
58127f6d22bSBorislav Petkov 	/*
58227f6d22bSBorislav Petkov 	 * Generic x86 PMC bits
58327f6d22bSBorislav Petkov 	 */
58427f6d22bSBorislav Petkov 	const char	*name;
58527f6d22bSBorislav Petkov 	int		version;
58627f6d22bSBorislav Petkov 	int		(*handle_irq)(struct pt_regs *);
58727f6d22bSBorislav Petkov 	void		(*disable_all)(void);
58827f6d22bSBorislav Petkov 	void		(*enable_all)(int added);
58927f6d22bSBorislav Petkov 	void		(*enable)(struct perf_event *);
59027f6d22bSBorislav Petkov 	void		(*disable)(struct perf_event *);
59168f7082fSPeter Zijlstra 	void		(*add)(struct perf_event *);
59268f7082fSPeter Zijlstra 	void		(*del)(struct perf_event *);
593bcfbe5c4SKan Liang 	void		(*read)(struct perf_event *event);
59427f6d22bSBorislav Petkov 	int		(*hw_config)(struct perf_event *event);
59527f6d22bSBorislav Petkov 	int		(*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
59627f6d22bSBorislav Petkov 	unsigned	eventsel;
59727f6d22bSBorislav Petkov 	unsigned	perfctr;
59827f6d22bSBorislav Petkov 	int		(*addr_offset)(int index, bool eventsel);
59927f6d22bSBorislav Petkov 	int		(*rdpmc_index)(int index);
60027f6d22bSBorislav Petkov 	u64		(*event_map)(int);
60127f6d22bSBorislav Petkov 	int		max_events;
60227f6d22bSBorislav Petkov 	int		num_counters;
60327f6d22bSBorislav Petkov 	int		num_counters_fixed;
60427f6d22bSBorislav Petkov 	int		cntval_bits;
60527f6d22bSBorislav Petkov 	u64		cntval_mask;
60627f6d22bSBorislav Petkov 	union {
60727f6d22bSBorislav Petkov 			unsigned long events_maskl;
60827f6d22bSBorislav Petkov 			unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
60927f6d22bSBorislav Petkov 	};
61027f6d22bSBorislav Petkov 	int		events_mask_len;
61127f6d22bSBorislav Petkov 	int		apic;
61227f6d22bSBorislav Petkov 	u64		max_period;
61327f6d22bSBorislav Petkov 	struct event_constraint *
61427f6d22bSBorislav Petkov 			(*get_event_constraints)(struct cpu_hw_events *cpuc,
61527f6d22bSBorislav Petkov 						 int idx,
61627f6d22bSBorislav Petkov 						 struct perf_event *event);
61727f6d22bSBorislav Petkov 
61827f6d22bSBorislav Petkov 	void		(*put_event_constraints)(struct cpu_hw_events *cpuc,
61927f6d22bSBorislav Petkov 						 struct perf_event *event);
62027f6d22bSBorislav Petkov 
62127f6d22bSBorislav Petkov 	void		(*start_scheduling)(struct cpu_hw_events *cpuc);
62227f6d22bSBorislav Petkov 
62327f6d22bSBorislav Petkov 	void		(*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
62427f6d22bSBorislav Petkov 
62527f6d22bSBorislav Petkov 	void		(*stop_scheduling)(struct cpu_hw_events *cpuc);
62627f6d22bSBorislav Petkov 
62727f6d22bSBorislav Petkov 	struct event_constraint *event_constraints;
62827f6d22bSBorislav Petkov 	struct x86_pmu_quirk *quirks;
62927f6d22bSBorislav Petkov 	int		perfctr_second_write;
630f605cfcaSKan Liang 	u64		(*limit_period)(struct perf_event *event, u64 l);
63127f6d22bSBorislav Petkov 
632af3bdb99SAndi Kleen 	/* PMI handler bits */
633af3bdb99SAndi Kleen 	unsigned int	late_ack		:1,
6343a4ac121SCodyYao-oc 			enabled_ack		:1,
635af3bdb99SAndi Kleen 			counter_freezing	:1;
63627f6d22bSBorislav Petkov 	/*
63727f6d22bSBorislav Petkov 	 * sysfs attrs
63827f6d22bSBorislav Petkov 	 */
63927f6d22bSBorislav Petkov 	int		attr_rdpmc_broken;
64027f6d22bSBorislav Petkov 	int		attr_rdpmc;
64127f6d22bSBorislav Petkov 	struct attribute **format_attrs;
64227f6d22bSBorislav Petkov 
64327f6d22bSBorislav Petkov 	ssize_t		(*events_sysfs_show)(char *page, u64 config);
644baa0c833SJiri Olsa 	const struct attribute_group **attr_update;
64527f6d22bSBorislav Petkov 
6466089327fSKan Liang 	unsigned long	attr_freeze_on_smi;
6476089327fSKan Liang 
64827f6d22bSBorislav Petkov 	/*
64927f6d22bSBorislav Petkov 	 * CPU Hotplug hooks
65027f6d22bSBorislav Petkov 	 */
65127f6d22bSBorislav Petkov 	int		(*cpu_prepare)(int cpu);
65227f6d22bSBorislav Petkov 	void		(*cpu_starting)(int cpu);
65327f6d22bSBorislav Petkov 	void		(*cpu_dying)(int cpu);
65427f6d22bSBorislav Petkov 	void		(*cpu_dead)(int cpu);
65527f6d22bSBorislav Petkov 
65627f6d22bSBorislav Petkov 	void		(*check_microcode)(void);
65727f6d22bSBorislav Petkov 	void		(*sched_task)(struct perf_event_context *ctx,
65827f6d22bSBorislav Petkov 				      bool sched_in);
65927f6d22bSBorislav Petkov 
66027f6d22bSBorislav Petkov 	/*
66127f6d22bSBorislav Petkov 	 * Intel Arch Perfmon v2+
66227f6d22bSBorislav Petkov 	 */
66327f6d22bSBorislav Petkov 	u64			intel_ctrl;
66427f6d22bSBorislav Petkov 	union perf_capabilities intel_cap;
66527f6d22bSBorislav Petkov 
66627f6d22bSBorislav Petkov 	/*
66727f6d22bSBorislav Petkov 	 * Intel DebugStore bits
66827f6d22bSBorislav Petkov 	 */
66927f6d22bSBorislav Petkov 	unsigned int	bts			:1,
67027f6d22bSBorislav Petkov 			bts_active		:1,
67127f6d22bSBorislav Petkov 			pebs			:1,
67227f6d22bSBorislav Petkov 			pebs_active		:1,
67327f6d22bSBorislav Petkov 			pebs_broken		:1,
67495298355SAndi Kleen 			pebs_prec_dist		:1,
6759b545c04SAndi Kleen 			pebs_no_tlb		:1,
676cd6b984fSKan Liang 			pebs_no_isolation	:1;
67727f6d22bSBorislav Petkov 	int		pebs_record_size;
678e72daf3fSJiri Olsa 	int		pebs_buffer_size;
679c22497f5SKan Liang 	int		max_pebs_events;
68027f6d22bSBorislav Petkov 	void		(*drain_pebs)(struct pt_regs *regs);
68127f6d22bSBorislav Petkov 	struct event_constraint *pebs_constraints;
68227f6d22bSBorislav Petkov 	void		(*pebs_aliases)(struct perf_event *event);
683174afc3eSKan Liang 	unsigned long	large_pebs_flags;
684c22497f5SKan Liang 	u64		rtm_abort_event;
68527f6d22bSBorislav Petkov 
68627f6d22bSBorislav Petkov 	/*
68727f6d22bSBorislav Petkov 	 * Intel LBR
68827f6d22bSBorislav Petkov 	 */
6893cb9d546SWei Wang 	unsigned int	lbr_tos, lbr_from, lbr_to,
6903cb9d546SWei Wang 			lbr_nr;			   /* LBR base regs and size */
69127f6d22bSBorislav Petkov 	u64		lbr_sel_mask;		   /* LBR_SELECT valid bits */
69227f6d22bSBorislav Petkov 	const int	*lbr_sel_map;		   /* lbr_select mappings */
69327f6d22bSBorislav Petkov 	bool		lbr_double_abort;	   /* duplicated lbr aborts */
694b0c1ef52SAndi Kleen 	bool		lbr_pt_coexist;		   /* (LBR|BTS) may coexist with PT */
69527f6d22bSBorislav Petkov 
696*af6cf129SKan Liang 	/*
697*af6cf129SKan Liang 	 * Intel Architectural LBR CPUID Enumeration
698*af6cf129SKan Liang 	 */
699*af6cf129SKan Liang 	unsigned int	lbr_depth_mask:8;
700*af6cf129SKan Liang 	unsigned int	lbr_deep_c_reset:1;
701*af6cf129SKan Liang 	unsigned int	lbr_lip:1;
702*af6cf129SKan Liang 	unsigned int	lbr_cpl:1;
703*af6cf129SKan Liang 	unsigned int	lbr_filter:1;
704*af6cf129SKan Liang 	unsigned int	lbr_call_stack:1;
705*af6cf129SKan Liang 	unsigned int	lbr_mispred:1;
706*af6cf129SKan Liang 	unsigned int	lbr_timed_lbr:1;
707*af6cf129SKan Liang 	unsigned int	lbr_br_type:1;
708*af6cf129SKan Liang 
7099f354a72SKan Liang 	void		(*lbr_reset)(void);
710c301b1d8SKan Liang 	void		(*lbr_read)(struct cpu_hw_events *cpuc);
711799571bfSKan Liang 	void		(*lbr_save)(void *ctx);
712799571bfSKan Liang 	void		(*lbr_restore)(void *ctx);
7139f354a72SKan Liang 
71427f6d22bSBorislav Petkov 	/*
71527f6d22bSBorislav Petkov 	 * Intel PT/LBR/BTS are exclusive
71627f6d22bSBorislav Petkov 	 */
71727f6d22bSBorislav Petkov 	atomic_t	lbr_exclusive[x86_lbr_exclusive_max];
71827f6d22bSBorislav Petkov 
71927f6d22bSBorislav Petkov 	/*
720fc1adfe3SAlexey Budankov 	 * perf task context (i.e. struct perf_event_context::task_ctx_data)
721fc1adfe3SAlexey Budankov 	 * switch helper to bridge calls from perf/core to perf/x86.
722fc1adfe3SAlexey Budankov 	 * See struct pmu::swap_task_ctx() usage for examples;
723fc1adfe3SAlexey Budankov 	 */
724fc1adfe3SAlexey Budankov 	void		(*swap_task_ctx)(struct perf_event_context *prev,
725fc1adfe3SAlexey Budankov 					 struct perf_event_context *next);
726fc1adfe3SAlexey Budankov 
727fc1adfe3SAlexey Budankov 	/*
72832b62f44SPeter Zijlstra 	 * AMD bits
72932b62f44SPeter Zijlstra 	 */
73032b62f44SPeter Zijlstra 	unsigned int	amd_nb_constraints : 1;
73157388912SKim Phillips 	u64		perf_ctr_pair_en;
73232b62f44SPeter Zijlstra 
73332b62f44SPeter Zijlstra 	/*
73427f6d22bSBorislav Petkov 	 * Extra registers for events
73527f6d22bSBorislav Petkov 	 */
73627f6d22bSBorislav Petkov 	struct extra_reg *extra_regs;
73727f6d22bSBorislav Petkov 	unsigned int flags;
73827f6d22bSBorislav Petkov 
73927f6d22bSBorislav Petkov 	/*
74027f6d22bSBorislav Petkov 	 * Intel host/guest support (KVM)
74127f6d22bSBorislav Petkov 	 */
74227f6d22bSBorislav Petkov 	struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
74381ec3f3cSJiri Olsa 
74481ec3f3cSJiri Olsa 	/*
74581ec3f3cSJiri Olsa 	 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
74681ec3f3cSJiri Olsa 	 */
74781ec3f3cSJiri Olsa 	int (*check_period) (struct perf_event *event, u64 period);
74842880f72SAlexander Shishkin 
74942880f72SAlexander Shishkin 	int (*aux_output_match) (struct perf_event *event);
75027f6d22bSBorislav Petkov };
75127f6d22bSBorislav Petkov 
752530bfff6SKan Liang struct x86_perf_task_context_opt {
753530bfff6SKan Liang 	int lbr_callstack_users;
754530bfff6SKan Liang 	int lbr_stack_state;
755530bfff6SKan Liang 	int log_id;
756530bfff6SKan Liang };
757530bfff6SKan Liang 
75827f6d22bSBorislav Petkov struct x86_perf_task_context {
75927f6d22bSBorislav Petkov 	u64 lbr_from[MAX_LBR_ENTRIES];
76027f6d22bSBorislav Petkov 	u64 lbr_to[MAX_LBR_ENTRIES];
76127f6d22bSBorislav Petkov 	u64 lbr_info[MAX_LBR_ENTRIES];
762e1ad1ac2SLike Xu 	u64 lbr_sel;
76327f6d22bSBorislav Petkov 	int tos;
7640592e57bSKan Liang 	int valid_lbrs;
765530bfff6SKan Liang 	struct x86_perf_task_context_opt opt;
76627f6d22bSBorislav Petkov };
76727f6d22bSBorislav Petkov 
76827f6d22bSBorislav Petkov #define x86_add_quirk(func_)						\
76927f6d22bSBorislav Petkov do {									\
77027f6d22bSBorislav Petkov 	static struct x86_pmu_quirk __quirk __initdata = {		\
77127f6d22bSBorislav Petkov 		.func = func_,						\
77227f6d22bSBorislav Petkov 	};								\
77327f6d22bSBorislav Petkov 	__quirk.next = x86_pmu.quirks;					\
77427f6d22bSBorislav Petkov 	x86_pmu.quirks = &__quirk;					\
77527f6d22bSBorislav Petkov } while (0)
77627f6d22bSBorislav Petkov 
77727f6d22bSBorislav Petkov /*
77827f6d22bSBorislav Petkov  * x86_pmu flags
77927f6d22bSBorislav Petkov  */
78027f6d22bSBorislav Petkov #define PMU_FL_NO_HT_SHARING	0x1 /* no hyper-threading resource sharing */
78127f6d22bSBorislav Petkov #define PMU_FL_HAS_RSP_1	0x2 /* has 2 equivalent offcore_rsp regs   */
78227f6d22bSBorislav Petkov #define PMU_FL_EXCL_CNTRS	0x4 /* has exclusive counter requirements  */
78327f6d22bSBorislav Petkov #define PMU_FL_EXCL_ENABLED	0x8 /* exclusive counter active */
78431962340SKan Liang #define PMU_FL_PEBS_ALL		0x10 /* all events are valid PEBS events */
785400816f6SPeter Zijlstra (Intel) #define PMU_FL_TFA		0x20 /* deal with TSX force abort */
786471af006SKim Phillips #define PMU_FL_PAIR		0x40 /* merge counters for large incr. events */
78727f6d22bSBorislav Petkov 
78827f6d22bSBorislav Petkov #define EVENT_VAR(_id)  event_attr_##_id
78927f6d22bSBorislav Petkov #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
79027f6d22bSBorislav Petkov 
79127f6d22bSBorislav Petkov #define EVENT_ATTR(_name, _id)						\
79227f6d22bSBorislav Petkov static struct perf_pmu_events_attr EVENT_VAR(_id) = {			\
79327f6d22bSBorislav Petkov 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
79427f6d22bSBorislav Petkov 	.id		= PERF_COUNT_HW_##_id,				\
79527f6d22bSBorislav Petkov 	.event_str	= NULL,						\
79627f6d22bSBorislav Petkov };
79727f6d22bSBorislav Petkov 
79827f6d22bSBorislav Petkov #define EVENT_ATTR_STR(_name, v, str)					\
79927f6d22bSBorislav Petkov static struct perf_pmu_events_attr event_attr_##v = {			\
80027f6d22bSBorislav Petkov 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
80127f6d22bSBorislav Petkov 	.id		= 0,						\
80227f6d22bSBorislav Petkov 	.event_str	= str,						\
80327f6d22bSBorislav Petkov };
80427f6d22bSBorislav Petkov 
805fc07e9f9SAndi Kleen #define EVENT_ATTR_STR_HT(_name, v, noht, ht)				\
806fc07e9f9SAndi Kleen static struct perf_pmu_events_ht_attr event_attr_##v = {		\
807fc07e9f9SAndi Kleen 	.attr		= __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
808fc07e9f9SAndi Kleen 	.id		= 0,						\
809fc07e9f9SAndi Kleen 	.event_str_noht	= noht,						\
810fc07e9f9SAndi Kleen 	.event_str_ht	= ht,						\
811fc07e9f9SAndi Kleen }
812fc07e9f9SAndi Kleen 
813f447e4ebSStephane Eranian struct pmu *x86_get_pmu(void);
81427f6d22bSBorislav Petkov extern struct x86_pmu x86_pmu __read_mostly;
81527f6d22bSBorislav Petkov 
816f42be865SKan Liang static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
817f42be865SKan Liang {
818f42be865SKan Liang 	return &((struct x86_perf_task_context *)ctx)->opt;
819f42be865SKan Liang }
820f42be865SKan Liang 
82127f6d22bSBorislav Petkov static inline bool x86_pmu_has_lbr_callstack(void)
82227f6d22bSBorislav Petkov {
82327f6d22bSBorislav Petkov 	return  x86_pmu.lbr_sel_map &&
82427f6d22bSBorislav Petkov 		x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
82527f6d22bSBorislav Petkov }
82627f6d22bSBorislav Petkov 
82727f6d22bSBorislav Petkov DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
82827f6d22bSBorislav Petkov 
82927f6d22bSBorislav Petkov int x86_perf_event_set_period(struct perf_event *event);
83027f6d22bSBorislav Petkov 
83127f6d22bSBorislav Petkov /*
83227f6d22bSBorislav Petkov  * Generalized hw caching related hw_event table, filled
83327f6d22bSBorislav Petkov  * in on a per model basis. A value of 0 means
83427f6d22bSBorislav Petkov  * 'not supported', -1 means 'hw_event makes no sense on
83527f6d22bSBorislav Petkov  * this CPU', any other value means the raw hw_event
83627f6d22bSBorislav Petkov  * ID.
83727f6d22bSBorislav Petkov  */
83827f6d22bSBorislav Petkov 
83927f6d22bSBorislav Petkov #define C(x) PERF_COUNT_HW_CACHE_##x
84027f6d22bSBorislav Petkov 
84127f6d22bSBorislav Petkov extern u64 __read_mostly hw_cache_event_ids
84227f6d22bSBorislav Petkov 				[PERF_COUNT_HW_CACHE_MAX]
84327f6d22bSBorislav Petkov 				[PERF_COUNT_HW_CACHE_OP_MAX]
84427f6d22bSBorislav Petkov 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
84527f6d22bSBorislav Petkov extern u64 __read_mostly hw_cache_extra_regs
84627f6d22bSBorislav Petkov 				[PERF_COUNT_HW_CACHE_MAX]
84727f6d22bSBorislav Petkov 				[PERF_COUNT_HW_CACHE_OP_MAX]
84827f6d22bSBorislav Petkov 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
84927f6d22bSBorislav Petkov 
85027f6d22bSBorislav Petkov u64 x86_perf_event_update(struct perf_event *event);
85127f6d22bSBorislav Petkov 
85227f6d22bSBorislav Petkov static inline unsigned int x86_pmu_config_addr(int index)
85327f6d22bSBorislav Petkov {
85427f6d22bSBorislav Petkov 	return x86_pmu.eventsel + (x86_pmu.addr_offset ?
85527f6d22bSBorislav Petkov 				   x86_pmu.addr_offset(index, true) : index);
85627f6d22bSBorislav Petkov }
85727f6d22bSBorislav Petkov 
85827f6d22bSBorislav Petkov static inline unsigned int x86_pmu_event_addr(int index)
85927f6d22bSBorislav Petkov {
86027f6d22bSBorislav Petkov 	return x86_pmu.perfctr + (x86_pmu.addr_offset ?
86127f6d22bSBorislav Petkov 				  x86_pmu.addr_offset(index, false) : index);
86227f6d22bSBorislav Petkov }
86327f6d22bSBorislav Petkov 
86427f6d22bSBorislav Petkov static inline int x86_pmu_rdpmc_index(int index)
86527f6d22bSBorislav Petkov {
86627f6d22bSBorislav Petkov 	return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
86727f6d22bSBorislav Petkov }
86827f6d22bSBorislav Petkov 
86927f6d22bSBorislav Petkov int x86_add_exclusive(unsigned int what);
87027f6d22bSBorislav Petkov 
87127f6d22bSBorislav Petkov void x86_del_exclusive(unsigned int what);
87227f6d22bSBorislav Petkov 
87327f6d22bSBorislav Petkov int x86_reserve_hardware(void);
87427f6d22bSBorislav Petkov 
87527f6d22bSBorislav Petkov void x86_release_hardware(void);
87627f6d22bSBorislav Petkov 
877b00233b5SAndi Kleen int x86_pmu_max_precise(void);
878b00233b5SAndi Kleen 
87927f6d22bSBorislav Petkov void hw_perf_lbr_event_destroy(struct perf_event *event);
88027f6d22bSBorislav Petkov 
88127f6d22bSBorislav Petkov int x86_setup_perfctr(struct perf_event *event);
88227f6d22bSBorislav Petkov 
88327f6d22bSBorislav Petkov int x86_pmu_hw_config(struct perf_event *event);
88427f6d22bSBorislav Petkov 
88527f6d22bSBorislav Petkov void x86_pmu_disable_all(void);
88627f6d22bSBorislav Petkov 
88757388912SKim Phillips static inline bool is_counter_pair(struct hw_perf_event *hwc)
88857388912SKim Phillips {
88957388912SKim Phillips 	return hwc->flags & PERF_X86_EVENT_PAIR;
89057388912SKim Phillips }
89157388912SKim Phillips 
89227f6d22bSBorislav Petkov static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
89327f6d22bSBorislav Petkov 					  u64 enable_mask)
89427f6d22bSBorislav Petkov {
89527f6d22bSBorislav Petkov 	u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
89627f6d22bSBorislav Petkov 
89727f6d22bSBorislav Petkov 	if (hwc->extra_reg.reg)
89827f6d22bSBorislav Petkov 		wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
89957388912SKim Phillips 
90057388912SKim Phillips 	/*
90157388912SKim Phillips 	 * Add enabled Merge event on next counter
90257388912SKim Phillips 	 * if large increment event being enabled on this counter
90357388912SKim Phillips 	 */
90457388912SKim Phillips 	if (is_counter_pair(hwc))
90557388912SKim Phillips 		wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en);
90657388912SKim Phillips 
90727f6d22bSBorislav Petkov 	wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
90827f6d22bSBorislav Petkov }
90927f6d22bSBorislav Petkov 
91027f6d22bSBorislav Petkov void x86_pmu_enable_all(int added);
91127f6d22bSBorislav Petkov 
91227f6d22bSBorislav Petkov int perf_assign_events(struct event_constraint **constraints, int n,
91327f6d22bSBorislav Petkov 			int wmin, int wmax, int gpmax, int *assign);
91427f6d22bSBorislav Petkov int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
91527f6d22bSBorislav Petkov 
91627f6d22bSBorislav Petkov void x86_pmu_stop(struct perf_event *event, int flags);
91727f6d22bSBorislav Petkov 
91827f6d22bSBorislav Petkov static inline void x86_pmu_disable_event(struct perf_event *event)
91927f6d22bSBorislav Petkov {
92027f6d22bSBorislav Petkov 	struct hw_perf_event *hwc = &event->hw;
92127f6d22bSBorislav Petkov 
92227f6d22bSBorislav Petkov 	wrmsrl(hwc->config_base, hwc->config);
92357388912SKim Phillips 
92457388912SKim Phillips 	if (is_counter_pair(hwc))
92557388912SKim Phillips 		wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0);
92627f6d22bSBorislav Petkov }
92727f6d22bSBorislav Petkov 
92827f6d22bSBorislav Petkov void x86_pmu_enable_event(struct perf_event *event);
92927f6d22bSBorislav Petkov 
93027f6d22bSBorislav Petkov int x86_pmu_handle_irq(struct pt_regs *regs);
93127f6d22bSBorislav Petkov 
93227f6d22bSBorislav Petkov extern struct event_constraint emptyconstraint;
93327f6d22bSBorislav Petkov 
93427f6d22bSBorislav Petkov extern struct event_constraint unconstrained;
93527f6d22bSBorislav Petkov 
93627f6d22bSBorislav Petkov static inline bool kernel_ip(unsigned long ip)
93727f6d22bSBorislav Petkov {
93827f6d22bSBorislav Petkov #ifdef CONFIG_X86_32
93927f6d22bSBorislav Petkov 	return ip > PAGE_OFFSET;
94027f6d22bSBorislav Petkov #else
94127f6d22bSBorislav Petkov 	return (long)ip < 0;
94227f6d22bSBorislav Petkov #endif
94327f6d22bSBorislav Petkov }
94427f6d22bSBorislav Petkov 
94527f6d22bSBorislav Petkov /*
94627f6d22bSBorislav Petkov  * Not all PMUs provide the right context information to place the reported IP
94727f6d22bSBorislav Petkov  * into full context. Specifically segment registers are typically not
94827f6d22bSBorislav Petkov  * supplied.
94927f6d22bSBorislav Petkov  *
95027f6d22bSBorislav Petkov  * Assuming the address is a linear address (it is for IBS), we fake the CS and
95127f6d22bSBorislav Petkov  * vm86 mode using the known zero-based code segment and 'fix up' the registers
95227f6d22bSBorislav Petkov  * to reflect this.
95327f6d22bSBorislav Petkov  *
95427f6d22bSBorislav Petkov  * Intel PEBS/LBR appear to typically provide the effective address, nothing
95527f6d22bSBorislav Petkov  * much we can do about that but pray and treat it like a linear address.
95627f6d22bSBorislav Petkov  */
95727f6d22bSBorislav Petkov static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
95827f6d22bSBorislav Petkov {
95927f6d22bSBorislav Petkov 	regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
96027f6d22bSBorislav Petkov 	if (regs->flags & X86_VM_MASK)
96127f6d22bSBorislav Petkov 		regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
96227f6d22bSBorislav Petkov 	regs->ip = ip;
96327f6d22bSBorislav Petkov }
96427f6d22bSBorislav Petkov 
96527f6d22bSBorislav Petkov ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
96627f6d22bSBorislav Petkov ssize_t intel_event_sysfs_show(char *page, u64 config);
96727f6d22bSBorislav Petkov 
968a49ac9f8SHuang Rui ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
969a49ac9f8SHuang Rui 			  char *page);
970fc07e9f9SAndi Kleen ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
971fc07e9f9SAndi Kleen 			  char *page);
972a49ac9f8SHuang Rui 
97327f6d22bSBorislav Petkov #ifdef CONFIG_CPU_SUP_AMD
97427f6d22bSBorislav Petkov 
97527f6d22bSBorislav Petkov int amd_pmu_init(void);
97627f6d22bSBorislav Petkov 
97727f6d22bSBorislav Petkov #else /* CONFIG_CPU_SUP_AMD */
97827f6d22bSBorislav Petkov 
97927f6d22bSBorislav Petkov static inline int amd_pmu_init(void)
98027f6d22bSBorislav Petkov {
98127f6d22bSBorislav Petkov 	return 0;
98227f6d22bSBorislav Petkov }
98327f6d22bSBorislav Petkov 
98427f6d22bSBorislav Petkov #endif /* CONFIG_CPU_SUP_AMD */
98527f6d22bSBorislav Petkov 
98642880f72SAlexander Shishkin static inline int is_pebs_pt(struct perf_event *event)
98742880f72SAlexander Shishkin {
98842880f72SAlexander Shishkin 	return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT);
98942880f72SAlexander Shishkin }
99042880f72SAlexander Shishkin 
99127f6d22bSBorislav Petkov #ifdef CONFIG_CPU_SUP_INTEL
99227f6d22bSBorislav Petkov 
99381ec3f3cSJiri Olsa static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
99427f6d22bSBorislav Petkov {
99567266c10SJiri Olsa 	struct hw_perf_event *hwc = &event->hw;
99667266c10SJiri Olsa 	unsigned int hw_event, bts_event;
99727f6d22bSBorislav Petkov 
99867266c10SJiri Olsa 	if (event->attr.freq)
99927f6d22bSBorislav Petkov 		return false;
100067266c10SJiri Olsa 
100167266c10SJiri Olsa 	hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
100267266c10SJiri Olsa 	bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
100367266c10SJiri Olsa 
100481ec3f3cSJiri Olsa 	return hw_event == bts_event && period == 1;
100581ec3f3cSJiri Olsa }
100681ec3f3cSJiri Olsa 
100781ec3f3cSJiri Olsa static inline bool intel_pmu_has_bts(struct perf_event *event)
100881ec3f3cSJiri Olsa {
100981ec3f3cSJiri Olsa 	struct hw_perf_event *hwc = &event->hw;
101081ec3f3cSJiri Olsa 
101181ec3f3cSJiri Olsa 	return intel_pmu_has_bts_period(event, hwc->sample_period);
101227f6d22bSBorislav Petkov }
101327f6d22bSBorislav Petkov 
101427f6d22bSBorislav Petkov int intel_pmu_save_and_restart(struct perf_event *event);
101527f6d22bSBorislav Petkov 
101627f6d22bSBorislav Petkov struct event_constraint *
101727f6d22bSBorislav Petkov x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
101827f6d22bSBorislav Petkov 			  struct perf_event *event);
101927f6d22bSBorislav Petkov 
1020d01b1f96SPeter Zijlstra (Intel) extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
1021d01b1f96SPeter Zijlstra (Intel) extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
102227f6d22bSBorislav Petkov 
102327f6d22bSBorislav Petkov int intel_pmu_init(void);
102427f6d22bSBorislav Petkov 
102527f6d22bSBorislav Petkov void init_debug_store_on_cpu(int cpu);
102627f6d22bSBorislav Petkov 
102727f6d22bSBorislav Petkov void fini_debug_store_on_cpu(int cpu);
102827f6d22bSBorislav Petkov 
102927f6d22bSBorislav Petkov void release_ds_buffers(void);
103027f6d22bSBorislav Petkov 
103127f6d22bSBorislav Petkov void reserve_ds_buffers(void);
103227f6d22bSBorislav Petkov 
103327f6d22bSBorislav Petkov extern struct event_constraint bts_constraint;
1034097e4311SLike Xu extern struct event_constraint vlbr_constraint;
103527f6d22bSBorislav Petkov 
103627f6d22bSBorislav Petkov void intel_pmu_enable_bts(u64 config);
103727f6d22bSBorislav Petkov 
103827f6d22bSBorislav Petkov void intel_pmu_disable_bts(void);
103927f6d22bSBorislav Petkov 
104027f6d22bSBorislav Petkov int intel_pmu_drain_bts_buffer(void);
104127f6d22bSBorislav Petkov 
104227f6d22bSBorislav Petkov extern struct event_constraint intel_core2_pebs_event_constraints[];
104327f6d22bSBorislav Petkov 
104427f6d22bSBorislav Petkov extern struct event_constraint intel_atom_pebs_event_constraints[];
104527f6d22bSBorislav Petkov 
104627f6d22bSBorislav Petkov extern struct event_constraint intel_slm_pebs_event_constraints[];
104727f6d22bSBorislav Petkov 
10488b92c3a7SKan Liang extern struct event_constraint intel_glm_pebs_event_constraints[];
10498b92c3a7SKan Liang 
1050dd0b06b5SKan Liang extern struct event_constraint intel_glp_pebs_event_constraints[];
1051dd0b06b5SKan Liang 
105227f6d22bSBorislav Petkov extern struct event_constraint intel_nehalem_pebs_event_constraints[];
105327f6d22bSBorislav Petkov 
105427f6d22bSBorislav Petkov extern struct event_constraint intel_westmere_pebs_event_constraints[];
105527f6d22bSBorislav Petkov 
105627f6d22bSBorislav Petkov extern struct event_constraint intel_snb_pebs_event_constraints[];
105727f6d22bSBorislav Petkov 
105827f6d22bSBorislav Petkov extern struct event_constraint intel_ivb_pebs_event_constraints[];
105927f6d22bSBorislav Petkov 
106027f6d22bSBorislav Petkov extern struct event_constraint intel_hsw_pebs_event_constraints[];
106127f6d22bSBorislav Petkov 
1062b3e62463SStephane Eranian extern struct event_constraint intel_bdw_pebs_event_constraints[];
1063b3e62463SStephane Eranian 
106427f6d22bSBorislav Petkov extern struct event_constraint intel_skl_pebs_event_constraints[];
106527f6d22bSBorislav Petkov 
106660176089SKan Liang extern struct event_constraint intel_icl_pebs_event_constraints[];
106760176089SKan Liang 
106827f6d22bSBorislav Petkov struct event_constraint *intel_pebs_constraints(struct perf_event *event);
106927f6d22bSBorislav Petkov 
107068f7082fSPeter Zijlstra void intel_pmu_pebs_add(struct perf_event *event);
107168f7082fSPeter Zijlstra 
107268f7082fSPeter Zijlstra void intel_pmu_pebs_del(struct perf_event *event);
107368f7082fSPeter Zijlstra 
107427f6d22bSBorislav Petkov void intel_pmu_pebs_enable(struct perf_event *event);
107527f6d22bSBorislav Petkov 
107627f6d22bSBorislav Petkov void intel_pmu_pebs_disable(struct perf_event *event);
107727f6d22bSBorislav Petkov 
107827f6d22bSBorislav Petkov void intel_pmu_pebs_enable_all(void);
107927f6d22bSBorislav Petkov 
108027f6d22bSBorislav Petkov void intel_pmu_pebs_disable_all(void);
108127f6d22bSBorislav Petkov 
108227f6d22bSBorislav Petkov void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
108327f6d22bSBorislav Petkov 
10845bee2cc6SKan Liang void intel_pmu_auto_reload_read(struct perf_event *event);
10855bee2cc6SKan Liang 
1086c22497f5SKan Liang void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr);
1087c22497f5SKan Liang 
108827f6d22bSBorislav Petkov void intel_ds_init(void);
108927f6d22bSBorislav Petkov 
1090421ca868SAlexey Budankov void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
1091421ca868SAlexey Budankov 				 struct perf_event_context *next);
1092421ca868SAlexey Budankov 
109327f6d22bSBorislav Petkov void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
109427f6d22bSBorislav Petkov 
109519fc9dddSDavid Carrillo-Cisneros u64 lbr_from_signext_quirk_wr(u64 val);
109619fc9dddSDavid Carrillo-Cisneros 
109727f6d22bSBorislav Petkov void intel_pmu_lbr_reset(void);
109827f6d22bSBorislav Petkov 
10999f354a72SKan Liang void intel_pmu_lbr_reset_32(void);
11009f354a72SKan Liang 
11019f354a72SKan Liang void intel_pmu_lbr_reset_64(void);
11029f354a72SKan Liang 
110368f7082fSPeter Zijlstra void intel_pmu_lbr_add(struct perf_event *event);
110427f6d22bSBorislav Petkov 
110568f7082fSPeter Zijlstra void intel_pmu_lbr_del(struct perf_event *event);
110627f6d22bSBorislav Petkov 
110727f6d22bSBorislav Petkov void intel_pmu_lbr_enable_all(bool pmi);
110827f6d22bSBorislav Petkov 
110927f6d22bSBorislav Petkov void intel_pmu_lbr_disable_all(void);
111027f6d22bSBorislav Petkov 
111127f6d22bSBorislav Petkov void intel_pmu_lbr_read(void);
111227f6d22bSBorislav Petkov 
1113c301b1d8SKan Liang void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc);
1114c301b1d8SKan Liang 
1115c301b1d8SKan Liang void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc);
1116c301b1d8SKan Liang 
1117799571bfSKan Liang void intel_pmu_lbr_save(void *ctx);
1118799571bfSKan Liang 
1119799571bfSKan Liang void intel_pmu_lbr_restore(void *ctx);
1120799571bfSKan Liang 
112127f6d22bSBorislav Petkov void intel_pmu_lbr_init_core(void);
112227f6d22bSBorislav Petkov 
112327f6d22bSBorislav Petkov void intel_pmu_lbr_init_nhm(void);
112427f6d22bSBorislav Petkov 
112527f6d22bSBorislav Petkov void intel_pmu_lbr_init_atom(void);
112627f6d22bSBorislav Petkov 
1127f21d5adcSKan Liang void intel_pmu_lbr_init_slm(void);
1128f21d5adcSKan Liang 
112927f6d22bSBorislav Petkov void intel_pmu_lbr_init_snb(void);
113027f6d22bSBorislav Petkov 
113127f6d22bSBorislav Petkov void intel_pmu_lbr_init_hsw(void);
113227f6d22bSBorislav Petkov 
113327f6d22bSBorislav Petkov void intel_pmu_lbr_init_skl(void);
113427f6d22bSBorislav Petkov 
113527f6d22bSBorislav Petkov void intel_pmu_lbr_init_knl(void);
113627f6d22bSBorislav Petkov 
1137e17dc653SAndi Kleen void intel_pmu_pebs_data_source_nhm(void);
1138e17dc653SAndi Kleen 
11396ae5fa61SAndi Kleen void intel_pmu_pebs_data_source_skl(bool pmem);
11406ae5fa61SAndi Kleen 
114127f6d22bSBorislav Petkov int intel_pmu_setup_lbr_filter(struct perf_event *event);
114227f6d22bSBorislav Petkov 
114327f6d22bSBorislav Petkov void intel_pt_interrupt(void);
114427f6d22bSBorislav Petkov 
114527f6d22bSBorislav Petkov int intel_bts_interrupt(void);
114627f6d22bSBorislav Petkov 
114727f6d22bSBorislav Petkov void intel_bts_enable_local(void);
114827f6d22bSBorislav Petkov 
114927f6d22bSBorislav Petkov void intel_bts_disable_local(void);
115027f6d22bSBorislav Petkov 
115127f6d22bSBorislav Petkov int p4_pmu_init(void);
115227f6d22bSBorislav Petkov 
115327f6d22bSBorislav Petkov int p6_pmu_init(void);
115427f6d22bSBorislav Petkov 
115527f6d22bSBorislav Petkov int knc_pmu_init(void);
115627f6d22bSBorislav Petkov 
115727f6d22bSBorislav Petkov static inline int is_ht_workaround_enabled(void)
115827f6d22bSBorislav Petkov {
115927f6d22bSBorislav Petkov 	return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
116027f6d22bSBorislav Petkov }
116127f6d22bSBorislav Petkov 
116227f6d22bSBorislav Petkov #else /* CONFIG_CPU_SUP_INTEL */
116327f6d22bSBorislav Petkov 
116427f6d22bSBorislav Petkov static inline void reserve_ds_buffers(void)
116527f6d22bSBorislav Petkov {
116627f6d22bSBorislav Petkov }
116727f6d22bSBorislav Petkov 
116827f6d22bSBorislav Petkov static inline void release_ds_buffers(void)
116927f6d22bSBorislav Petkov {
117027f6d22bSBorislav Petkov }
117127f6d22bSBorislav Petkov 
117227f6d22bSBorislav Petkov static inline int intel_pmu_init(void)
117327f6d22bSBorislav Petkov {
117427f6d22bSBorislav Petkov 	return 0;
117527f6d22bSBorislav Petkov }
117627f6d22bSBorislav Petkov 
1177f764c58bSPeter Zijlstra static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
117827f6d22bSBorislav Petkov {
1179d01b1f96SPeter Zijlstra (Intel) 	return 0;
1180d01b1f96SPeter Zijlstra (Intel) }
1181d01b1f96SPeter Zijlstra (Intel) 
1182f764c58bSPeter Zijlstra static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
1183d01b1f96SPeter Zijlstra (Intel) {
118427f6d22bSBorislav Petkov }
118527f6d22bSBorislav Petkov 
118627f6d22bSBorislav Petkov static inline int is_ht_workaround_enabled(void)
118727f6d22bSBorislav Petkov {
118827f6d22bSBorislav Petkov 	return 0;
118927f6d22bSBorislav Petkov }
119027f6d22bSBorislav Petkov #endif /* CONFIG_CPU_SUP_INTEL */
11913a4ac121SCodyYao-oc 
11923a4ac121SCodyYao-oc #if ((defined CONFIG_CPU_SUP_CENTAUR) || (defined CONFIG_CPU_SUP_ZHAOXIN))
11933a4ac121SCodyYao-oc int zhaoxin_pmu_init(void);
11943a4ac121SCodyYao-oc #else
11953a4ac121SCodyYao-oc static inline int zhaoxin_pmu_init(void)
11963a4ac121SCodyYao-oc {
11973a4ac121SCodyYao-oc 	return 0;
11983a4ac121SCodyYao-oc }
11993a4ac121SCodyYao-oc #endif /*CONFIG_CPU_SUP_CENTAUR or CONFIG_CPU_SUP_ZHAOXIN*/
1200