xref: /linux/arch/x86/events/perf_event.h (revision 2fe1bc1f501d55e5925b4035bcd85781adc76c63)
127f6d22bSBorislav Petkov /*
227f6d22bSBorislav Petkov  * Performance events x86 architecture header
327f6d22bSBorislav Petkov  *
427f6d22bSBorislav Petkov  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
527f6d22bSBorislav Petkov  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
627f6d22bSBorislav Petkov  *  Copyright (C) 2009 Jaswinder Singh Rajput
727f6d22bSBorislav Petkov  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
827f6d22bSBorislav Petkov  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
927f6d22bSBorislav Petkov  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
1027f6d22bSBorislav Petkov  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
1127f6d22bSBorislav Petkov  *
1227f6d22bSBorislav Petkov  *  For licencing details see kernel-base/COPYING
1327f6d22bSBorislav Petkov  */
1427f6d22bSBorislav Petkov 
1527f6d22bSBorislav Petkov #include <linux/perf_event.h>
1627f6d22bSBorislav Petkov 
1727f6d22bSBorislav Petkov /* To enable MSR tracing please use the generic trace points. */
1827f6d22bSBorislav Petkov 
1927f6d22bSBorislav Petkov /*
2027f6d22bSBorislav Petkov  *          |   NHM/WSM    |      SNB     |
2127f6d22bSBorislav Petkov  * register -------------------------------
2227f6d22bSBorislav Petkov  *          |  HT  | no HT |  HT  | no HT |
2327f6d22bSBorislav Petkov  *-----------------------------------------
2427f6d22bSBorislav Petkov  * offcore  | core | core  | cpu  | core  |
2527f6d22bSBorislav Petkov  * lbr_sel  | core | core  | cpu  | core  |
2627f6d22bSBorislav Petkov  * ld_lat   | cpu  | core  | cpu  | core  |
2727f6d22bSBorislav Petkov  *-----------------------------------------
2827f6d22bSBorislav Petkov  *
2927f6d22bSBorislav Petkov  * Given that there is a small number of shared regs,
3027f6d22bSBorislav Petkov  * we can pre-allocate their slot in the per-cpu
3127f6d22bSBorislav Petkov  * per-core reg tables.
3227f6d22bSBorislav Petkov  */
3327f6d22bSBorislav Petkov enum extra_reg_type {
3427f6d22bSBorislav Petkov 	EXTRA_REG_NONE  = -1,	/* not used */
3527f6d22bSBorislav Petkov 
3627f6d22bSBorislav Petkov 	EXTRA_REG_RSP_0 = 0,	/* offcore_response_0 */
3727f6d22bSBorislav Petkov 	EXTRA_REG_RSP_1 = 1,	/* offcore_response_1 */
3827f6d22bSBorislav Petkov 	EXTRA_REG_LBR   = 2,	/* lbr_select */
3927f6d22bSBorislav Petkov 	EXTRA_REG_LDLAT = 3,	/* ld_lat_threshold */
4027f6d22bSBorislav Petkov 	EXTRA_REG_FE    = 4,    /* fe_* */
4127f6d22bSBorislav Petkov 
4227f6d22bSBorislav Petkov 	EXTRA_REG_MAX		/* number of entries needed */
4327f6d22bSBorislav Petkov };
4427f6d22bSBorislav Petkov 
4527f6d22bSBorislav Petkov struct event_constraint {
4627f6d22bSBorislav Petkov 	union {
4727f6d22bSBorislav Petkov 		unsigned long	idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
4827f6d22bSBorislav Petkov 		u64		idxmsk64;
4927f6d22bSBorislav Petkov 	};
5027f6d22bSBorislav Petkov 	u64	code;
5127f6d22bSBorislav Petkov 	u64	cmask;
5227f6d22bSBorislav Petkov 	int	weight;
5327f6d22bSBorislav Petkov 	int	overlap;
5427f6d22bSBorislav Petkov 	int	flags;
5527f6d22bSBorislav Petkov };
5627f6d22bSBorislav Petkov /*
5727f6d22bSBorislav Petkov  * struct hw_perf_event.flags flags
5827f6d22bSBorislav Petkov  */
5927f6d22bSBorislav Petkov #define PERF_X86_EVENT_PEBS_LDLAT	0x0001 /* ld+ldlat data address sampling */
6027f6d22bSBorislav Petkov #define PERF_X86_EVENT_PEBS_ST		0x0002 /* st data address sampling */
6127f6d22bSBorislav Petkov #define PERF_X86_EVENT_PEBS_ST_HSW	0x0004 /* haswell style datala, store */
6227f6d22bSBorislav Petkov #define PERF_X86_EVENT_COMMITTED	0x0008 /* event passed commit_txn */
6327f6d22bSBorislav Petkov #define PERF_X86_EVENT_PEBS_LD_HSW	0x0010 /* haswell style datala, load */
6427f6d22bSBorislav Petkov #define PERF_X86_EVENT_PEBS_NA_HSW	0x0020 /* haswell style datala, unknown */
6527f6d22bSBorislav Petkov #define PERF_X86_EVENT_EXCL		0x0040 /* HT exclusivity on counter */
6627f6d22bSBorislav Petkov #define PERF_X86_EVENT_DYNAMIC		0x0080 /* dynamic alloc'd constraint */
6727f6d22bSBorislav Petkov #define PERF_X86_EVENT_RDPMC_ALLOWED	0x0100 /* grant rdpmc permission */
6827f6d22bSBorislav Petkov #define PERF_X86_EVENT_EXCL_ACCT	0x0200 /* accounted EXCL event */
6927f6d22bSBorislav Petkov #define PERF_X86_EVENT_AUTO_RELOAD	0x0400 /* use PEBS auto-reload */
7027f6d22bSBorislav Petkov #define PERF_X86_EVENT_FREERUNNING	0x0800 /* use freerunning PEBS */
7127f6d22bSBorislav Petkov 
7227f6d22bSBorislav Petkov 
7327f6d22bSBorislav Petkov struct amd_nb {
7427f6d22bSBorislav Petkov 	int nb_id;  /* NorthBridge id */
7527f6d22bSBorislav Petkov 	int refcnt; /* reference count */
7627f6d22bSBorislav Petkov 	struct perf_event *owners[X86_PMC_IDX_MAX];
7727f6d22bSBorislav Petkov 	struct event_constraint event_constraints[X86_PMC_IDX_MAX];
7827f6d22bSBorislav Petkov };
7927f6d22bSBorislav Petkov 
8027f6d22bSBorislav Petkov /* The maximal number of PEBS events: */
8127f6d22bSBorislav Petkov #define MAX_PEBS_EVENTS		8
82fd583ad1SKan Liang #define PEBS_COUNTER_MASK	((1ULL << MAX_PEBS_EVENTS) - 1)
8327f6d22bSBorislav Petkov 
8427f6d22bSBorislav Petkov /*
8527f6d22bSBorislav Petkov  * Flags PEBS can handle without an PMI.
8627f6d22bSBorislav Petkov  *
8727f6d22bSBorislav Petkov  * TID can only be handled by flushing at context switch.
88*2fe1bc1fSAndi Kleen  * REGS_USER can be handled for events limited to ring 3.
8927f6d22bSBorislav Petkov  *
9027f6d22bSBorislav Petkov  */
9127f6d22bSBorislav Petkov #define PEBS_FREERUNNING_FLAGS \
9227f6d22bSBorislav Petkov 	(PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
9327f6d22bSBorislav Petkov 	PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
9427f6d22bSBorislav Petkov 	PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
95*2fe1bc1fSAndi Kleen 	PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
96*2fe1bc1fSAndi Kleen 	PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)
9727f6d22bSBorislav Petkov 
9827f6d22bSBorislav Petkov /*
9927f6d22bSBorislav Petkov  * A debug store configuration.
10027f6d22bSBorislav Petkov  *
10127f6d22bSBorislav Petkov  * We only support architectures that use 64bit fields.
10227f6d22bSBorislav Petkov  */
10327f6d22bSBorislav Petkov struct debug_store {
10427f6d22bSBorislav Petkov 	u64	bts_buffer_base;
10527f6d22bSBorislav Petkov 	u64	bts_index;
10627f6d22bSBorislav Petkov 	u64	bts_absolute_maximum;
10727f6d22bSBorislav Petkov 	u64	bts_interrupt_threshold;
10827f6d22bSBorislav Petkov 	u64	pebs_buffer_base;
10927f6d22bSBorislav Petkov 	u64	pebs_index;
11027f6d22bSBorislav Petkov 	u64	pebs_absolute_maximum;
11127f6d22bSBorislav Petkov 	u64	pebs_interrupt_threshold;
11227f6d22bSBorislav Petkov 	u64	pebs_event_reset[MAX_PEBS_EVENTS];
11327f6d22bSBorislav Petkov };
11427f6d22bSBorislav Petkov 
115*2fe1bc1fSAndi Kleen #define PEBS_REGS \
116*2fe1bc1fSAndi Kleen 	(PERF_REG_X86_AX | \
117*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_BX | \
118*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_CX | \
119*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_DX | \
120*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_DI | \
121*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_SI | \
122*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_SP | \
123*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_BP | \
124*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_IP | \
125*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_FLAGS | \
126*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_R8 | \
127*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_R9 | \
128*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_R10 | \
129*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_R11 | \
130*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_R12 | \
131*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_R13 | \
132*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_R14 | \
133*2fe1bc1fSAndi Kleen 	 PERF_REG_X86_R15)
134*2fe1bc1fSAndi Kleen 
13527f6d22bSBorislav Petkov /*
13627f6d22bSBorislav Petkov  * Per register state.
13727f6d22bSBorislav Petkov  */
13827f6d22bSBorislav Petkov struct er_account {
13927f6d22bSBorislav Petkov 	raw_spinlock_t      lock;	/* per-core: protect structure */
14027f6d22bSBorislav Petkov 	u64                 config;	/* extra MSR config */
14127f6d22bSBorislav Petkov 	u64                 reg;	/* extra MSR number */
14227f6d22bSBorislav Petkov 	atomic_t            ref;	/* reference count */
14327f6d22bSBorislav Petkov };
14427f6d22bSBorislav Petkov 
14527f6d22bSBorislav Petkov /*
14627f6d22bSBorislav Petkov  * Per core/cpu state
14727f6d22bSBorislav Petkov  *
14827f6d22bSBorislav Petkov  * Used to coordinate shared registers between HT threads or
14927f6d22bSBorislav Petkov  * among events on a single PMU.
15027f6d22bSBorislav Petkov  */
15127f6d22bSBorislav Petkov struct intel_shared_regs {
15227f6d22bSBorislav Petkov 	struct er_account       regs[EXTRA_REG_MAX];
15327f6d22bSBorislav Petkov 	int                     refcnt;		/* per-core: #HT threads */
15427f6d22bSBorislav Petkov 	unsigned                core_id;	/* per-core: core id */
15527f6d22bSBorislav Petkov };
15627f6d22bSBorislav Petkov 
15727f6d22bSBorislav Petkov enum intel_excl_state_type {
15827f6d22bSBorislav Petkov 	INTEL_EXCL_UNUSED    = 0, /* counter is unused */
15927f6d22bSBorislav Petkov 	INTEL_EXCL_SHARED    = 1, /* counter can be used by both threads */
16027f6d22bSBorislav Petkov 	INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
16127f6d22bSBorislav Petkov };
16227f6d22bSBorislav Petkov 
16327f6d22bSBorislav Petkov struct intel_excl_states {
16427f6d22bSBorislav Petkov 	enum intel_excl_state_type state[X86_PMC_IDX_MAX];
16527f6d22bSBorislav Petkov 	bool sched_started; /* true if scheduling has started */
16627f6d22bSBorislav Petkov };
16727f6d22bSBorislav Petkov 
16827f6d22bSBorislav Petkov struct intel_excl_cntrs {
16927f6d22bSBorislav Petkov 	raw_spinlock_t	lock;
17027f6d22bSBorislav Petkov 
17127f6d22bSBorislav Petkov 	struct intel_excl_states states[2];
17227f6d22bSBorislav Petkov 
17327f6d22bSBorislav Petkov 	union {
17427f6d22bSBorislav Petkov 		u16	has_exclusive[2];
17527f6d22bSBorislav Petkov 		u32	exclusive_present;
17627f6d22bSBorislav Petkov 	};
17727f6d22bSBorislav Petkov 
17827f6d22bSBorislav Petkov 	int		refcnt;		/* per-core: #HT threads */
17927f6d22bSBorislav Petkov 	unsigned	core_id;	/* per-core: core id */
18027f6d22bSBorislav Petkov };
18127f6d22bSBorislav Petkov 
18227f6d22bSBorislav Petkov #define MAX_LBR_ENTRIES		32
18327f6d22bSBorislav Petkov 
18427f6d22bSBorislav Petkov enum {
18527f6d22bSBorislav Petkov 	X86_PERF_KFREE_SHARED = 0,
18627f6d22bSBorislav Petkov 	X86_PERF_KFREE_EXCL   = 1,
18727f6d22bSBorislav Petkov 	X86_PERF_KFREE_MAX
18827f6d22bSBorislav Petkov };
18927f6d22bSBorislav Petkov 
19027f6d22bSBorislav Petkov struct cpu_hw_events {
19127f6d22bSBorislav Petkov 	/*
19227f6d22bSBorislav Petkov 	 * Generic x86 PMC bits
19327f6d22bSBorislav Petkov 	 */
19427f6d22bSBorislav Petkov 	struct perf_event	*events[X86_PMC_IDX_MAX]; /* in counter order */
19527f6d22bSBorislav Petkov 	unsigned long		active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
19627f6d22bSBorislav Petkov 	unsigned long		running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
19727f6d22bSBorislav Petkov 	int			enabled;
19827f6d22bSBorislav Petkov 
19927f6d22bSBorislav Petkov 	int			n_events; /* the # of events in the below arrays */
20027f6d22bSBorislav Petkov 	int			n_added;  /* the # last events in the below arrays;
20127f6d22bSBorislav Petkov 					     they've never been enabled yet */
20227f6d22bSBorislav Petkov 	int			n_txn;    /* the # last events in the below arrays;
20327f6d22bSBorislav Petkov 					     added in the current transaction */
20427f6d22bSBorislav Petkov 	int			assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
20527f6d22bSBorislav Petkov 	u64			tags[X86_PMC_IDX_MAX];
20627f6d22bSBorislav Petkov 
20727f6d22bSBorislav Petkov 	struct perf_event	*event_list[X86_PMC_IDX_MAX]; /* in enabled order */
20827f6d22bSBorislav Petkov 	struct event_constraint	*event_constraint[X86_PMC_IDX_MAX];
20927f6d22bSBorislav Petkov 
21027f6d22bSBorislav Petkov 	int			n_excl; /* the number of exclusive events */
21127f6d22bSBorislav Petkov 
21227f6d22bSBorislav Petkov 	unsigned int		txn_flags;
21327f6d22bSBorislav Petkov 	int			is_fake;
21427f6d22bSBorislav Petkov 
21527f6d22bSBorislav Petkov 	/*
21627f6d22bSBorislav Petkov 	 * Intel DebugStore bits
21727f6d22bSBorislav Petkov 	 */
21827f6d22bSBorislav Petkov 	struct debug_store	*ds;
21927f6d22bSBorislav Petkov 	u64			pebs_enabled;
22009e61b4fSPeter Zijlstra 	int			n_pebs;
22109e61b4fSPeter Zijlstra 	int			n_large_pebs;
22227f6d22bSBorislav Petkov 
22327f6d22bSBorislav Petkov 	/*
22427f6d22bSBorislav Petkov 	 * Intel LBR bits
22527f6d22bSBorislav Petkov 	 */
22627f6d22bSBorislav Petkov 	int				lbr_users;
22727f6d22bSBorislav Petkov 	struct perf_branch_stack	lbr_stack;
22827f6d22bSBorislav Petkov 	struct perf_branch_entry	lbr_entries[MAX_LBR_ENTRIES];
22927f6d22bSBorislav Petkov 	struct er_account		*lbr_sel;
23027f6d22bSBorislav Petkov 	u64				br_sel;
23127f6d22bSBorislav Petkov 
23227f6d22bSBorislav Petkov 	/*
23327f6d22bSBorislav Petkov 	 * Intel host/guest exclude bits
23427f6d22bSBorislav Petkov 	 */
23527f6d22bSBorislav Petkov 	u64				intel_ctrl_guest_mask;
23627f6d22bSBorislav Petkov 	u64				intel_ctrl_host_mask;
23727f6d22bSBorislav Petkov 	struct perf_guest_switch_msr	guest_switch_msrs[X86_PMC_IDX_MAX];
23827f6d22bSBorislav Petkov 
23927f6d22bSBorislav Petkov 	/*
24027f6d22bSBorislav Petkov 	 * Intel checkpoint mask
24127f6d22bSBorislav Petkov 	 */
24227f6d22bSBorislav Petkov 	u64				intel_cp_status;
24327f6d22bSBorislav Petkov 
24427f6d22bSBorislav Petkov 	/*
24527f6d22bSBorislav Petkov 	 * manage shared (per-core, per-cpu) registers
24627f6d22bSBorislav Petkov 	 * used on Intel NHM/WSM/SNB
24727f6d22bSBorislav Petkov 	 */
24827f6d22bSBorislav Petkov 	struct intel_shared_regs	*shared_regs;
24927f6d22bSBorislav Petkov 	/*
25027f6d22bSBorislav Petkov 	 * manage exclusive counter access between hyperthread
25127f6d22bSBorislav Petkov 	 */
25227f6d22bSBorislav Petkov 	struct event_constraint *constraint_list; /* in enable order */
25327f6d22bSBorislav Petkov 	struct intel_excl_cntrs		*excl_cntrs;
25427f6d22bSBorislav Petkov 	int excl_thread_id; /* 0 or 1 */
25527f6d22bSBorislav Petkov 
25627f6d22bSBorislav Petkov 	/*
25727f6d22bSBorislav Petkov 	 * AMD specific bits
25827f6d22bSBorislav Petkov 	 */
25927f6d22bSBorislav Petkov 	struct amd_nb			*amd_nb;
26027f6d22bSBorislav Petkov 	/* Inverted mask of bits to clear in the perf_ctr ctrl registers */
26127f6d22bSBorislav Petkov 	u64				perf_ctr_virt_mask;
26227f6d22bSBorislav Petkov 
26327f6d22bSBorislav Petkov 	void				*kfree_on_online[X86_PERF_KFREE_MAX];
26427f6d22bSBorislav Petkov };
26527f6d22bSBorislav Petkov 
26627f6d22bSBorislav Petkov #define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
26727f6d22bSBorislav Petkov 	{ .idxmsk64 = (n) },		\
26827f6d22bSBorislav Petkov 	.code = (c),			\
26927f6d22bSBorislav Petkov 	.cmask = (m),			\
27027f6d22bSBorislav Petkov 	.weight = (w),			\
27127f6d22bSBorislav Petkov 	.overlap = (o),			\
27227f6d22bSBorislav Petkov 	.flags = f,			\
27327f6d22bSBorislav Petkov }
27427f6d22bSBorislav Petkov 
27527f6d22bSBorislav Petkov #define EVENT_CONSTRAINT(c, n, m)	\
27627f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
27727f6d22bSBorislav Petkov 
27827f6d22bSBorislav Petkov #define INTEL_EXCLEVT_CONSTRAINT(c, n)	\
27927f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
28027f6d22bSBorislav Petkov 			   0, PERF_X86_EVENT_EXCL)
28127f6d22bSBorislav Petkov 
28227f6d22bSBorislav Petkov /*
28327f6d22bSBorislav Petkov  * The overlap flag marks event constraints with overlapping counter
28427f6d22bSBorislav Petkov  * masks. This is the case if the counter mask of such an event is not
28527f6d22bSBorislav Petkov  * a subset of any other counter mask of a constraint with an equal or
28627f6d22bSBorislav Petkov  * higher weight, e.g.:
28727f6d22bSBorislav Petkov  *
28827f6d22bSBorislav Petkov  *  c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
28927f6d22bSBorislav Petkov  *  c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
29027f6d22bSBorislav Petkov  *  c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
29127f6d22bSBorislav Petkov  *
29227f6d22bSBorislav Petkov  * The event scheduler may not select the correct counter in the first
29327f6d22bSBorislav Petkov  * cycle because it needs to know which subsequent events will be
29427f6d22bSBorislav Petkov  * scheduled. It may fail to schedule the events then. So we set the
29527f6d22bSBorislav Petkov  * overlap flag for such constraints to give the scheduler a hint which
29627f6d22bSBorislav Petkov  * events to select for counter rescheduling.
29727f6d22bSBorislav Petkov  *
29827f6d22bSBorislav Petkov  * Care must be taken as the rescheduling algorithm is O(n!) which
29900f52685SIngo Molnar  * will increase scheduling cycles for an over-committed system
30027f6d22bSBorislav Petkov  * dramatically.  The number of such EVENT_CONSTRAINT_OVERLAP() macros
30127f6d22bSBorislav Petkov  * and its counter masks must be kept at a minimum.
30227f6d22bSBorislav Petkov  */
30327f6d22bSBorislav Petkov #define EVENT_CONSTRAINT_OVERLAP(c, n, m)	\
30427f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
30527f6d22bSBorislav Petkov 
30627f6d22bSBorislav Petkov /*
30727f6d22bSBorislav Petkov  * Constraint on the Event code.
30827f6d22bSBorislav Petkov  */
30927f6d22bSBorislav Petkov #define INTEL_EVENT_CONSTRAINT(c, n)	\
31027f6d22bSBorislav Petkov 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
31127f6d22bSBorislav Petkov 
31227f6d22bSBorislav Petkov /*
31327f6d22bSBorislav Petkov  * Constraint on the Event code + UMask + fixed-mask
31427f6d22bSBorislav Petkov  *
31527f6d22bSBorislav Petkov  * filter mask to validate fixed counter events.
31627f6d22bSBorislav Petkov  * the following filters disqualify for fixed counters:
31727f6d22bSBorislav Petkov  *  - inv
31827f6d22bSBorislav Petkov  *  - edge
31927f6d22bSBorislav Petkov  *  - cnt-mask
32027f6d22bSBorislav Petkov  *  - in_tx
32127f6d22bSBorislav Petkov  *  - in_tx_checkpointed
32227f6d22bSBorislav Petkov  *  The other filters are supported by fixed counters.
32327f6d22bSBorislav Petkov  *  The any-thread option is supported starting with v3.
32427f6d22bSBorislav Petkov  */
32527f6d22bSBorislav Petkov #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
32627f6d22bSBorislav Petkov #define FIXED_EVENT_CONSTRAINT(c, n)	\
32727f6d22bSBorislav Petkov 	EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
32827f6d22bSBorislav Petkov 
32927f6d22bSBorislav Petkov /*
33027f6d22bSBorislav Petkov  * Constraint on the Event code + UMask
33127f6d22bSBorislav Petkov  */
33227f6d22bSBorislav Petkov #define INTEL_UEVENT_CONSTRAINT(c, n)	\
33327f6d22bSBorislav Petkov 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
33427f6d22bSBorislav Petkov 
33527f6d22bSBorislav Petkov /* Constraint on specific umask bit only + event */
33627f6d22bSBorislav Petkov #define INTEL_UBIT_EVENT_CONSTRAINT(c, n)	\
33727f6d22bSBorislav Petkov 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
33827f6d22bSBorislav Petkov 
33927f6d22bSBorislav Petkov /* Like UEVENT_CONSTRAINT, but match flags too */
34027f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n)	\
34127f6d22bSBorislav Petkov 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
34227f6d22bSBorislav Petkov 
34327f6d22bSBorislav Petkov #define INTEL_EXCLUEVT_CONSTRAINT(c, n)	\
34427f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
34527f6d22bSBorislav Petkov 			   HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
34627f6d22bSBorislav Petkov 
34727f6d22bSBorislav Petkov #define INTEL_PLD_CONSTRAINT(c, n)	\
34827f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
34927f6d22bSBorislav Petkov 			   HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
35027f6d22bSBorislav Petkov 
35127f6d22bSBorislav Petkov #define INTEL_PST_CONSTRAINT(c, n)	\
35227f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
35327f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
35427f6d22bSBorislav Petkov 
35527f6d22bSBorislav Petkov /* Event constraint, but match on all event flags too. */
35627f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
35727f6d22bSBorislav Petkov 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
35827f6d22bSBorislav Petkov 
35927f6d22bSBorislav Petkov /* Check only flags, but allow all event/umask */
36027f6d22bSBorislav Petkov #define INTEL_ALL_EVENT_CONSTRAINT(code, n)	\
36127f6d22bSBorislav Petkov 	EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
36227f6d22bSBorislav Petkov 
36327f6d22bSBorislav Petkov /* Check flags and event code, and set the HSW store flag */
36427f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
36527f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(code, n, 			\
36627f6d22bSBorislav Petkov 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
36727f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
36827f6d22bSBorislav Petkov 
36927f6d22bSBorislav Petkov /* Check flags and event code, and set the HSW load flag */
37027f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
37127f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(code, n,			\
37227f6d22bSBorislav Petkov 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
37327f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
37427f6d22bSBorislav Petkov 
37527f6d22bSBorislav Petkov #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
37627f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(code, n,			\
37727f6d22bSBorislav Petkov 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
37827f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, \
37927f6d22bSBorislav Petkov 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
38027f6d22bSBorislav Petkov 
38127f6d22bSBorislav Petkov /* Check flags and event code/umask, and set the HSW store flag */
38227f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
38327f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(code, n, 			\
38427f6d22bSBorislav Petkov 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
38527f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
38627f6d22bSBorislav Petkov 
38727f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
38827f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(code, n,			\
38927f6d22bSBorislav Petkov 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
39027f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, \
39127f6d22bSBorislav Petkov 			  PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
39227f6d22bSBorislav Petkov 
39327f6d22bSBorislav Petkov /* Check flags and event code/umask, and set the HSW load flag */
39427f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
39527f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(code, n, 			\
39627f6d22bSBorislav Petkov 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
39727f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
39827f6d22bSBorislav Petkov 
39927f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
40027f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(code, n,			\
40127f6d22bSBorislav Petkov 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
40227f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, \
40327f6d22bSBorislav Petkov 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
40427f6d22bSBorislav Petkov 
40527f6d22bSBorislav Petkov /* Check flags and event code/umask, and set the HSW N/A flag */
40627f6d22bSBorislav Petkov #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
40727f6d22bSBorislav Petkov 	__EVENT_CONSTRAINT(code, n, 			\
40827f6d22bSBorislav Petkov 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
40927f6d22bSBorislav Petkov 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
41027f6d22bSBorislav Petkov 
41127f6d22bSBorislav Petkov 
41227f6d22bSBorislav Petkov /*
41327f6d22bSBorislav Petkov  * We define the end marker as having a weight of -1
41427f6d22bSBorislav Petkov  * to enable blacklisting of events using a counter bitmask
41527f6d22bSBorislav Petkov  * of zero and thus a weight of zero.
41627f6d22bSBorislav Petkov  * The end marker has a weight that cannot possibly be
41727f6d22bSBorislav Petkov  * obtained from counting the bits in the bitmask.
41827f6d22bSBorislav Petkov  */
41927f6d22bSBorislav Petkov #define EVENT_CONSTRAINT_END { .weight = -1 }
42027f6d22bSBorislav Petkov 
42127f6d22bSBorislav Petkov /*
42227f6d22bSBorislav Petkov  * Check for end marker with weight == -1
42327f6d22bSBorislav Petkov  */
42427f6d22bSBorislav Petkov #define for_each_event_constraint(e, c)	\
42527f6d22bSBorislav Petkov 	for ((e) = (c); (e)->weight != -1; (e)++)
42627f6d22bSBorislav Petkov 
42727f6d22bSBorislav Petkov /*
42827f6d22bSBorislav Petkov  * Extra registers for specific events.
42927f6d22bSBorislav Petkov  *
43027f6d22bSBorislav Petkov  * Some events need large masks and require external MSRs.
43127f6d22bSBorislav Petkov  * Those extra MSRs end up being shared for all events on
43227f6d22bSBorislav Petkov  * a PMU and sometimes between PMU of sibling HT threads.
43327f6d22bSBorislav Petkov  * In either case, the kernel needs to handle conflicting
43427f6d22bSBorislav Petkov  * accesses to those extra, shared, regs. The data structure
43527f6d22bSBorislav Petkov  * to manage those registers is stored in cpu_hw_event.
43627f6d22bSBorislav Petkov  */
43727f6d22bSBorislav Petkov struct extra_reg {
43827f6d22bSBorislav Petkov 	unsigned int		event;
43927f6d22bSBorislav Petkov 	unsigned int		msr;
44027f6d22bSBorislav Petkov 	u64			config_mask;
44127f6d22bSBorislav Petkov 	u64			valid_mask;
44227f6d22bSBorislav Petkov 	int			idx;  /* per_xxx->regs[] reg index */
44327f6d22bSBorislav Petkov 	bool			extra_msr_access;
44427f6d22bSBorislav Petkov };
44527f6d22bSBorislav Petkov 
44627f6d22bSBorislav Petkov #define EVENT_EXTRA_REG(e, ms, m, vm, i) {	\
44727f6d22bSBorislav Petkov 	.event = (e),			\
44827f6d22bSBorislav Petkov 	.msr = (ms),			\
44927f6d22bSBorislav Petkov 	.config_mask = (m),		\
45027f6d22bSBorislav Petkov 	.valid_mask = (vm),		\
45127f6d22bSBorislav Petkov 	.idx = EXTRA_REG_##i,		\
45227f6d22bSBorislav Petkov 	.extra_msr_access = true,	\
45327f6d22bSBorislav Petkov 	}
45427f6d22bSBorislav Petkov 
45527f6d22bSBorislav Petkov #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)	\
45627f6d22bSBorislav Petkov 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
45727f6d22bSBorislav Petkov 
45827f6d22bSBorislav Petkov #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
45927f6d22bSBorislav Petkov 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
46027f6d22bSBorislav Petkov 			ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
46127f6d22bSBorislav Petkov 
46227f6d22bSBorislav Petkov #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
46327f6d22bSBorislav Petkov 	INTEL_UEVENT_EXTRA_REG(c, \
46427f6d22bSBorislav Petkov 			       MSR_PEBS_LD_LAT_THRESHOLD, \
46527f6d22bSBorislav Petkov 			       0xffff, \
46627f6d22bSBorislav Petkov 			       LDLAT)
46727f6d22bSBorislav Petkov 
46827f6d22bSBorislav Petkov #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
46927f6d22bSBorislav Petkov 
47027f6d22bSBorislav Petkov union perf_capabilities {
47127f6d22bSBorislav Petkov 	struct {
47227f6d22bSBorislav Petkov 		u64	lbr_format:6;
47327f6d22bSBorislav Petkov 		u64	pebs_trap:1;
47427f6d22bSBorislav Petkov 		u64	pebs_arch_reg:1;
47527f6d22bSBorislav Petkov 		u64	pebs_format:4;
47627f6d22bSBorislav Petkov 		u64	smm_freeze:1;
47727f6d22bSBorislav Petkov 		/*
47827f6d22bSBorislav Petkov 		 * PMU supports separate counter range for writing
47927f6d22bSBorislav Petkov 		 * values > 32bit.
48027f6d22bSBorislav Petkov 		 */
48127f6d22bSBorislav Petkov 		u64	full_width_write:1;
48227f6d22bSBorislav Petkov 	};
48327f6d22bSBorislav Petkov 	u64	capabilities;
48427f6d22bSBorislav Petkov };
48527f6d22bSBorislav Petkov 
48627f6d22bSBorislav Petkov struct x86_pmu_quirk {
48727f6d22bSBorislav Petkov 	struct x86_pmu_quirk *next;
48827f6d22bSBorislav Petkov 	void (*func)(void);
48927f6d22bSBorislav Petkov };
49027f6d22bSBorislav Petkov 
49127f6d22bSBorislav Petkov union x86_pmu_config {
49227f6d22bSBorislav Petkov 	struct {
49327f6d22bSBorislav Petkov 		u64 event:8,
49427f6d22bSBorislav Petkov 		    umask:8,
49527f6d22bSBorislav Petkov 		    usr:1,
49627f6d22bSBorislav Petkov 		    os:1,
49727f6d22bSBorislav Petkov 		    edge:1,
49827f6d22bSBorislav Petkov 		    pc:1,
49927f6d22bSBorislav Petkov 		    interrupt:1,
50027f6d22bSBorislav Petkov 		    __reserved1:1,
50127f6d22bSBorislav Petkov 		    en:1,
50227f6d22bSBorislav Petkov 		    inv:1,
50327f6d22bSBorislav Petkov 		    cmask:8,
50427f6d22bSBorislav Petkov 		    event2:4,
50527f6d22bSBorislav Petkov 		    __reserved2:4,
50627f6d22bSBorislav Petkov 		    go:1,
50727f6d22bSBorislav Petkov 		    ho:1;
50827f6d22bSBorislav Petkov 	} bits;
50927f6d22bSBorislav Petkov 	u64 value;
51027f6d22bSBorislav Petkov };
51127f6d22bSBorislav Petkov 
51227f6d22bSBorislav Petkov #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
51327f6d22bSBorislav Petkov 
51427f6d22bSBorislav Petkov enum {
51527f6d22bSBorislav Petkov 	x86_lbr_exclusive_lbr,
51627f6d22bSBorislav Petkov 	x86_lbr_exclusive_bts,
51727f6d22bSBorislav Petkov 	x86_lbr_exclusive_pt,
51827f6d22bSBorislav Petkov 	x86_lbr_exclusive_max,
51927f6d22bSBorislav Petkov };
52027f6d22bSBorislav Petkov 
52127f6d22bSBorislav Petkov /*
52227f6d22bSBorislav Petkov  * struct x86_pmu - generic x86 pmu
52327f6d22bSBorislav Petkov  */
52427f6d22bSBorislav Petkov struct x86_pmu {
52527f6d22bSBorislav Petkov 	/*
52627f6d22bSBorislav Petkov 	 * Generic x86 PMC bits
52727f6d22bSBorislav Petkov 	 */
52827f6d22bSBorislav Petkov 	const char	*name;
52927f6d22bSBorislav Petkov 	int		version;
53027f6d22bSBorislav Petkov 	int		(*handle_irq)(struct pt_regs *);
53127f6d22bSBorislav Petkov 	void		(*disable_all)(void);
53227f6d22bSBorislav Petkov 	void		(*enable_all)(int added);
53327f6d22bSBorislav Petkov 	void		(*enable)(struct perf_event *);
53427f6d22bSBorislav Petkov 	void		(*disable)(struct perf_event *);
53568f7082fSPeter Zijlstra 	void		(*add)(struct perf_event *);
53668f7082fSPeter Zijlstra 	void		(*del)(struct perf_event *);
53727f6d22bSBorislav Petkov 	int		(*hw_config)(struct perf_event *event);
53827f6d22bSBorislav Petkov 	int		(*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
53927f6d22bSBorislav Petkov 	unsigned	eventsel;
54027f6d22bSBorislav Petkov 	unsigned	perfctr;
54127f6d22bSBorislav Petkov 	int		(*addr_offset)(int index, bool eventsel);
54227f6d22bSBorislav Petkov 	int		(*rdpmc_index)(int index);
54327f6d22bSBorislav Petkov 	u64		(*event_map)(int);
54427f6d22bSBorislav Petkov 	int		max_events;
54527f6d22bSBorislav Petkov 	int		num_counters;
54627f6d22bSBorislav Petkov 	int		num_counters_fixed;
54727f6d22bSBorislav Petkov 	int		cntval_bits;
54827f6d22bSBorislav Petkov 	u64		cntval_mask;
54927f6d22bSBorislav Petkov 	union {
55027f6d22bSBorislav Petkov 			unsigned long events_maskl;
55127f6d22bSBorislav Petkov 			unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
55227f6d22bSBorislav Petkov 	};
55327f6d22bSBorislav Petkov 	int		events_mask_len;
55427f6d22bSBorislav Petkov 	int		apic;
55527f6d22bSBorislav Petkov 	u64		max_period;
55627f6d22bSBorislav Petkov 	struct event_constraint *
55727f6d22bSBorislav Petkov 			(*get_event_constraints)(struct cpu_hw_events *cpuc,
55827f6d22bSBorislav Petkov 						 int idx,
55927f6d22bSBorislav Petkov 						 struct perf_event *event);
56027f6d22bSBorislav Petkov 
56127f6d22bSBorislav Petkov 	void		(*put_event_constraints)(struct cpu_hw_events *cpuc,
56227f6d22bSBorislav Petkov 						 struct perf_event *event);
56327f6d22bSBorislav Petkov 
56427f6d22bSBorislav Petkov 	void		(*start_scheduling)(struct cpu_hw_events *cpuc);
56527f6d22bSBorislav Petkov 
56627f6d22bSBorislav Petkov 	void		(*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
56727f6d22bSBorislav Petkov 
56827f6d22bSBorislav Petkov 	void		(*stop_scheduling)(struct cpu_hw_events *cpuc);
56927f6d22bSBorislav Petkov 
57027f6d22bSBorislav Petkov 	struct event_constraint *event_constraints;
57127f6d22bSBorislav Petkov 	struct x86_pmu_quirk *quirks;
57227f6d22bSBorislav Petkov 	int		perfctr_second_write;
57327f6d22bSBorislav Petkov 	bool		late_ack;
57427f6d22bSBorislav Petkov 	unsigned	(*limit_period)(struct perf_event *event, unsigned l);
57527f6d22bSBorislav Petkov 
57627f6d22bSBorislav Petkov 	/*
57727f6d22bSBorislav Petkov 	 * sysfs attrs
57827f6d22bSBorislav Petkov 	 */
57927f6d22bSBorislav Petkov 	int		attr_rdpmc_broken;
58027f6d22bSBorislav Petkov 	int		attr_rdpmc;
58127f6d22bSBorislav Petkov 	struct attribute **format_attrs;
58227f6d22bSBorislav Petkov 	struct attribute **event_attrs;
583b00233b5SAndi Kleen 	struct attribute **caps_attrs;
58427f6d22bSBorislav Petkov 
58527f6d22bSBorislav Petkov 	ssize_t		(*events_sysfs_show)(char *page, u64 config);
58627f6d22bSBorislav Petkov 	struct attribute **cpu_events;
58727f6d22bSBorislav Petkov 
5886089327fSKan Liang 	unsigned long	attr_freeze_on_smi;
5896089327fSKan Liang 	struct attribute **attrs;
5906089327fSKan Liang 
59127f6d22bSBorislav Petkov 	/*
59227f6d22bSBorislav Petkov 	 * CPU Hotplug hooks
59327f6d22bSBorislav Petkov 	 */
59427f6d22bSBorislav Petkov 	int		(*cpu_prepare)(int cpu);
59527f6d22bSBorislav Petkov 	void		(*cpu_starting)(int cpu);
59627f6d22bSBorislav Petkov 	void		(*cpu_dying)(int cpu);
59727f6d22bSBorislav Petkov 	void		(*cpu_dead)(int cpu);
59827f6d22bSBorislav Petkov 
59927f6d22bSBorislav Petkov 	void		(*check_microcode)(void);
60027f6d22bSBorislav Petkov 	void		(*sched_task)(struct perf_event_context *ctx,
60127f6d22bSBorislav Petkov 				      bool sched_in);
60227f6d22bSBorislav Petkov 
60327f6d22bSBorislav Petkov 	/*
60427f6d22bSBorislav Petkov 	 * Intel Arch Perfmon v2+
60527f6d22bSBorislav Petkov 	 */
60627f6d22bSBorislav Petkov 	u64			intel_ctrl;
60727f6d22bSBorislav Petkov 	union perf_capabilities intel_cap;
60827f6d22bSBorislav Petkov 
60927f6d22bSBorislav Petkov 	/*
61027f6d22bSBorislav Petkov 	 * Intel DebugStore bits
61127f6d22bSBorislav Petkov 	 */
61227f6d22bSBorislav Petkov 	unsigned int	bts		:1,
61327f6d22bSBorislav Petkov 			bts_active	:1,
61427f6d22bSBorislav Petkov 			pebs		:1,
61527f6d22bSBorislav Petkov 			pebs_active	:1,
61627f6d22bSBorislav Petkov 			pebs_broken	:1,
61795298355SAndi Kleen 			pebs_prec_dist	:1,
61895298355SAndi Kleen 			pebs_no_tlb	:1;
61927f6d22bSBorislav Petkov 	int		pebs_record_size;
620e72daf3fSJiri Olsa 	int		pebs_buffer_size;
62127f6d22bSBorislav Petkov 	void		(*drain_pebs)(struct pt_regs *regs);
62227f6d22bSBorislav Petkov 	struct event_constraint *pebs_constraints;
62327f6d22bSBorislav Petkov 	void		(*pebs_aliases)(struct perf_event *event);
62427f6d22bSBorislav Petkov 	int 		max_pebs_events;
62527f6d22bSBorislav Petkov 	unsigned long	free_running_flags;
62627f6d22bSBorislav Petkov 
62727f6d22bSBorislav Petkov 	/*
62827f6d22bSBorislav Petkov 	 * Intel LBR
62927f6d22bSBorislav Petkov 	 */
63027f6d22bSBorislav Petkov 	unsigned long	lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
63127f6d22bSBorislav Petkov 	int		lbr_nr;			   /* hardware stack size */
63227f6d22bSBorislav Petkov 	u64		lbr_sel_mask;		   /* LBR_SELECT valid bits */
63327f6d22bSBorislav Petkov 	const int	*lbr_sel_map;		   /* lbr_select mappings */
63427f6d22bSBorislav Petkov 	bool		lbr_double_abort;	   /* duplicated lbr aborts */
635b0c1ef52SAndi Kleen 	bool		lbr_pt_coexist;		   /* (LBR|BTS) may coexist with PT */
63627f6d22bSBorislav Petkov 
63727f6d22bSBorislav Petkov 	/*
63827f6d22bSBorislav Petkov 	 * Intel PT/LBR/BTS are exclusive
63927f6d22bSBorislav Petkov 	 */
64027f6d22bSBorislav Petkov 	atomic_t	lbr_exclusive[x86_lbr_exclusive_max];
64127f6d22bSBorislav Petkov 
64227f6d22bSBorislav Petkov 	/*
64332b62f44SPeter Zijlstra 	 * AMD bits
64432b62f44SPeter Zijlstra 	 */
64532b62f44SPeter Zijlstra 	unsigned int	amd_nb_constraints : 1;
64632b62f44SPeter Zijlstra 
64732b62f44SPeter Zijlstra 	/*
64827f6d22bSBorislav Petkov 	 * Extra registers for events
64927f6d22bSBorislav Petkov 	 */
65027f6d22bSBorislav Petkov 	struct extra_reg *extra_regs;
65127f6d22bSBorislav Petkov 	unsigned int flags;
65227f6d22bSBorislav Petkov 
65327f6d22bSBorislav Petkov 	/*
65427f6d22bSBorislav Petkov 	 * Intel host/guest support (KVM)
65527f6d22bSBorislav Petkov 	 */
65627f6d22bSBorislav Petkov 	struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
65727f6d22bSBorislav Petkov };
65827f6d22bSBorislav Petkov 
65927f6d22bSBorislav Petkov struct x86_perf_task_context {
66027f6d22bSBorislav Petkov 	u64 lbr_from[MAX_LBR_ENTRIES];
66127f6d22bSBorislav Petkov 	u64 lbr_to[MAX_LBR_ENTRIES];
66227f6d22bSBorislav Petkov 	u64 lbr_info[MAX_LBR_ENTRIES];
66327f6d22bSBorislav Petkov 	int tos;
66427f6d22bSBorislav Petkov 	int lbr_callstack_users;
66527f6d22bSBorislav Petkov 	int lbr_stack_state;
66627f6d22bSBorislav Petkov };
66727f6d22bSBorislav Petkov 
66827f6d22bSBorislav Petkov #define x86_add_quirk(func_)						\
66927f6d22bSBorislav Petkov do {									\
67027f6d22bSBorislav Petkov 	static struct x86_pmu_quirk __quirk __initdata = {		\
67127f6d22bSBorislav Petkov 		.func = func_,						\
67227f6d22bSBorislav Petkov 	};								\
67327f6d22bSBorislav Petkov 	__quirk.next = x86_pmu.quirks;					\
67427f6d22bSBorislav Petkov 	x86_pmu.quirks = &__quirk;					\
67527f6d22bSBorislav Petkov } while (0)
67627f6d22bSBorislav Petkov 
67727f6d22bSBorislav Petkov /*
67827f6d22bSBorislav Petkov  * x86_pmu flags
67927f6d22bSBorislav Petkov  */
68027f6d22bSBorislav Petkov #define PMU_FL_NO_HT_SHARING	0x1 /* no hyper-threading resource sharing */
68127f6d22bSBorislav Petkov #define PMU_FL_HAS_RSP_1	0x2 /* has 2 equivalent offcore_rsp regs   */
68227f6d22bSBorislav Petkov #define PMU_FL_EXCL_CNTRS	0x4 /* has exclusive counter requirements  */
68327f6d22bSBorislav Petkov #define PMU_FL_EXCL_ENABLED	0x8 /* exclusive counter active */
68427f6d22bSBorislav Petkov 
68527f6d22bSBorislav Petkov #define EVENT_VAR(_id)  event_attr_##_id
68627f6d22bSBorislav Petkov #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
68727f6d22bSBorislav Petkov 
68827f6d22bSBorislav Petkov #define EVENT_ATTR(_name, _id)						\
68927f6d22bSBorislav Petkov static struct perf_pmu_events_attr EVENT_VAR(_id) = {			\
69027f6d22bSBorislav Petkov 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
69127f6d22bSBorislav Petkov 	.id		= PERF_COUNT_HW_##_id,				\
69227f6d22bSBorislav Petkov 	.event_str	= NULL,						\
69327f6d22bSBorislav Petkov };
69427f6d22bSBorislav Petkov 
69527f6d22bSBorislav Petkov #define EVENT_ATTR_STR(_name, v, str)					\
69627f6d22bSBorislav Petkov static struct perf_pmu_events_attr event_attr_##v = {			\
69727f6d22bSBorislav Petkov 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
69827f6d22bSBorislav Petkov 	.id		= 0,						\
69927f6d22bSBorislav Petkov 	.event_str	= str,						\
70027f6d22bSBorislav Petkov };
70127f6d22bSBorislav Petkov 
702fc07e9f9SAndi Kleen #define EVENT_ATTR_STR_HT(_name, v, noht, ht)				\
703fc07e9f9SAndi Kleen static struct perf_pmu_events_ht_attr event_attr_##v = {		\
704fc07e9f9SAndi Kleen 	.attr		= __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
705fc07e9f9SAndi Kleen 	.id		= 0,						\
706fc07e9f9SAndi Kleen 	.event_str_noht	= noht,						\
707fc07e9f9SAndi Kleen 	.event_str_ht	= ht,						\
708fc07e9f9SAndi Kleen }
709fc07e9f9SAndi Kleen 
71027f6d22bSBorislav Petkov extern struct x86_pmu x86_pmu __read_mostly;
71127f6d22bSBorislav Petkov 
71227f6d22bSBorislav Petkov static inline bool x86_pmu_has_lbr_callstack(void)
71327f6d22bSBorislav Petkov {
71427f6d22bSBorislav Petkov 	return  x86_pmu.lbr_sel_map &&
71527f6d22bSBorislav Petkov 		x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
71627f6d22bSBorislav Petkov }
71727f6d22bSBorislav Petkov 
71827f6d22bSBorislav Petkov DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
71927f6d22bSBorislav Petkov 
72027f6d22bSBorislav Petkov int x86_perf_event_set_period(struct perf_event *event);
72127f6d22bSBorislav Petkov 
72227f6d22bSBorislav Petkov /*
72327f6d22bSBorislav Petkov  * Generalized hw caching related hw_event table, filled
72427f6d22bSBorislav Petkov  * in on a per model basis. A value of 0 means
72527f6d22bSBorislav Petkov  * 'not supported', -1 means 'hw_event makes no sense on
72627f6d22bSBorislav Petkov  * this CPU', any other value means the raw hw_event
72727f6d22bSBorislav Petkov  * ID.
72827f6d22bSBorislav Petkov  */
72927f6d22bSBorislav Petkov 
73027f6d22bSBorislav Petkov #define C(x) PERF_COUNT_HW_CACHE_##x
73127f6d22bSBorislav Petkov 
73227f6d22bSBorislav Petkov extern u64 __read_mostly hw_cache_event_ids
73327f6d22bSBorislav Petkov 				[PERF_COUNT_HW_CACHE_MAX]
73427f6d22bSBorislav Petkov 				[PERF_COUNT_HW_CACHE_OP_MAX]
73527f6d22bSBorislav Petkov 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
73627f6d22bSBorislav Petkov extern u64 __read_mostly hw_cache_extra_regs
73727f6d22bSBorislav Petkov 				[PERF_COUNT_HW_CACHE_MAX]
73827f6d22bSBorislav Petkov 				[PERF_COUNT_HW_CACHE_OP_MAX]
73927f6d22bSBorislav Petkov 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
74027f6d22bSBorislav Petkov 
74127f6d22bSBorislav Petkov u64 x86_perf_event_update(struct perf_event *event);
74227f6d22bSBorislav Petkov 
74327f6d22bSBorislav Petkov static inline unsigned int x86_pmu_config_addr(int index)
74427f6d22bSBorislav Petkov {
74527f6d22bSBorislav Petkov 	return x86_pmu.eventsel + (x86_pmu.addr_offset ?
74627f6d22bSBorislav Petkov 				   x86_pmu.addr_offset(index, true) : index);
74727f6d22bSBorislav Petkov }
74827f6d22bSBorislav Petkov 
74927f6d22bSBorislav Petkov static inline unsigned int x86_pmu_event_addr(int index)
75027f6d22bSBorislav Petkov {
75127f6d22bSBorislav Petkov 	return x86_pmu.perfctr + (x86_pmu.addr_offset ?
75227f6d22bSBorislav Petkov 				  x86_pmu.addr_offset(index, false) : index);
75327f6d22bSBorislav Petkov }
75427f6d22bSBorislav Petkov 
75527f6d22bSBorislav Petkov static inline int x86_pmu_rdpmc_index(int index)
75627f6d22bSBorislav Petkov {
75727f6d22bSBorislav Petkov 	return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
75827f6d22bSBorislav Petkov }
75927f6d22bSBorislav Petkov 
76027f6d22bSBorislav Petkov int x86_add_exclusive(unsigned int what);
76127f6d22bSBorislav Petkov 
76227f6d22bSBorislav Petkov void x86_del_exclusive(unsigned int what);
76327f6d22bSBorislav Petkov 
76427f6d22bSBorislav Petkov int x86_reserve_hardware(void);
76527f6d22bSBorislav Petkov 
76627f6d22bSBorislav Petkov void x86_release_hardware(void);
76727f6d22bSBorislav Petkov 
768b00233b5SAndi Kleen int x86_pmu_max_precise(void);
769b00233b5SAndi Kleen 
77027f6d22bSBorislav Petkov void hw_perf_lbr_event_destroy(struct perf_event *event);
77127f6d22bSBorislav Petkov 
77227f6d22bSBorislav Petkov int x86_setup_perfctr(struct perf_event *event);
77327f6d22bSBorislav Petkov 
77427f6d22bSBorislav Petkov int x86_pmu_hw_config(struct perf_event *event);
77527f6d22bSBorislav Petkov 
77627f6d22bSBorislav Petkov void x86_pmu_disable_all(void);
77727f6d22bSBorislav Petkov 
77827f6d22bSBorislav Petkov static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
77927f6d22bSBorislav Petkov 					  u64 enable_mask)
78027f6d22bSBorislav Petkov {
78127f6d22bSBorislav Petkov 	u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
78227f6d22bSBorislav Petkov 
78327f6d22bSBorislav Petkov 	if (hwc->extra_reg.reg)
78427f6d22bSBorislav Petkov 		wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
78527f6d22bSBorislav Petkov 	wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
78627f6d22bSBorislav Petkov }
78727f6d22bSBorislav Petkov 
78827f6d22bSBorislav Petkov void x86_pmu_enable_all(int added);
78927f6d22bSBorislav Petkov 
79027f6d22bSBorislav Petkov int perf_assign_events(struct event_constraint **constraints, int n,
79127f6d22bSBorislav Petkov 			int wmin, int wmax, int gpmax, int *assign);
79227f6d22bSBorislav Petkov int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
79327f6d22bSBorislav Petkov 
79427f6d22bSBorislav Petkov void x86_pmu_stop(struct perf_event *event, int flags);
79527f6d22bSBorislav Petkov 
79627f6d22bSBorislav Petkov static inline void x86_pmu_disable_event(struct perf_event *event)
79727f6d22bSBorislav Petkov {
79827f6d22bSBorislav Petkov 	struct hw_perf_event *hwc = &event->hw;
79927f6d22bSBorislav Petkov 
80027f6d22bSBorislav Petkov 	wrmsrl(hwc->config_base, hwc->config);
80127f6d22bSBorislav Petkov }
80227f6d22bSBorislav Petkov 
80327f6d22bSBorislav Petkov void x86_pmu_enable_event(struct perf_event *event);
80427f6d22bSBorislav Petkov 
80527f6d22bSBorislav Petkov int x86_pmu_handle_irq(struct pt_regs *regs);
80627f6d22bSBorislav Petkov 
80727f6d22bSBorislav Petkov extern struct event_constraint emptyconstraint;
80827f6d22bSBorislav Petkov 
80927f6d22bSBorislav Petkov extern struct event_constraint unconstrained;
81027f6d22bSBorislav Petkov 
81127f6d22bSBorislav Petkov static inline bool kernel_ip(unsigned long ip)
81227f6d22bSBorislav Petkov {
81327f6d22bSBorislav Petkov #ifdef CONFIG_X86_32
81427f6d22bSBorislav Petkov 	return ip > PAGE_OFFSET;
81527f6d22bSBorislav Petkov #else
81627f6d22bSBorislav Petkov 	return (long)ip < 0;
81727f6d22bSBorislav Petkov #endif
81827f6d22bSBorislav Petkov }
81927f6d22bSBorislav Petkov 
82027f6d22bSBorislav Petkov /*
82127f6d22bSBorislav Petkov  * Not all PMUs provide the right context information to place the reported IP
82227f6d22bSBorislav Petkov  * into full context. Specifically segment registers are typically not
82327f6d22bSBorislav Petkov  * supplied.
82427f6d22bSBorislav Petkov  *
82527f6d22bSBorislav Petkov  * Assuming the address is a linear address (it is for IBS), we fake the CS and
82627f6d22bSBorislav Petkov  * vm86 mode using the known zero-based code segment and 'fix up' the registers
82727f6d22bSBorislav Petkov  * to reflect this.
82827f6d22bSBorislav Petkov  *
82927f6d22bSBorislav Petkov  * Intel PEBS/LBR appear to typically provide the effective address, nothing
83027f6d22bSBorislav Petkov  * much we can do about that but pray and treat it like a linear address.
83127f6d22bSBorislav Petkov  */
83227f6d22bSBorislav Petkov static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
83327f6d22bSBorislav Petkov {
83427f6d22bSBorislav Petkov 	regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
83527f6d22bSBorislav Petkov 	if (regs->flags & X86_VM_MASK)
83627f6d22bSBorislav Petkov 		regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
83727f6d22bSBorislav Petkov 	regs->ip = ip;
83827f6d22bSBorislav Petkov }
83927f6d22bSBorislav Petkov 
84027f6d22bSBorislav Petkov ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
84127f6d22bSBorislav Petkov ssize_t intel_event_sysfs_show(char *page, u64 config);
84227f6d22bSBorislav Petkov 
84327f6d22bSBorislav Petkov struct attribute **merge_attr(struct attribute **a, struct attribute **b);
84427f6d22bSBorislav Petkov 
845a49ac9f8SHuang Rui ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
846a49ac9f8SHuang Rui 			  char *page);
847fc07e9f9SAndi Kleen ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
848fc07e9f9SAndi Kleen 			  char *page);
849a49ac9f8SHuang Rui 
85027f6d22bSBorislav Petkov #ifdef CONFIG_CPU_SUP_AMD
85127f6d22bSBorislav Petkov 
85227f6d22bSBorislav Petkov int amd_pmu_init(void);
85327f6d22bSBorislav Petkov 
85427f6d22bSBorislav Petkov #else /* CONFIG_CPU_SUP_AMD */
85527f6d22bSBorislav Petkov 
85627f6d22bSBorislav Petkov static inline int amd_pmu_init(void)
85727f6d22bSBorislav Petkov {
85827f6d22bSBorislav Petkov 	return 0;
85927f6d22bSBorislav Petkov }
86027f6d22bSBorislav Petkov 
86127f6d22bSBorislav Petkov #endif /* CONFIG_CPU_SUP_AMD */
86227f6d22bSBorislav Petkov 
86327f6d22bSBorislav Petkov #ifdef CONFIG_CPU_SUP_INTEL
86427f6d22bSBorislav Petkov 
86527f6d22bSBorislav Petkov static inline bool intel_pmu_has_bts(struct perf_event *event)
86627f6d22bSBorislav Petkov {
86727f6d22bSBorislav Petkov 	if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
86827f6d22bSBorislav Petkov 	    !event->attr.freq && event->hw.sample_period == 1)
86927f6d22bSBorislav Petkov 		return true;
87027f6d22bSBorislav Petkov 
87127f6d22bSBorislav Petkov 	return false;
87227f6d22bSBorislav Petkov }
87327f6d22bSBorislav Petkov 
87427f6d22bSBorislav Petkov int intel_pmu_save_and_restart(struct perf_event *event);
87527f6d22bSBorislav Petkov 
87627f6d22bSBorislav Petkov struct event_constraint *
87727f6d22bSBorislav Petkov x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
87827f6d22bSBorislav Petkov 			  struct perf_event *event);
87927f6d22bSBorislav Petkov 
88027f6d22bSBorislav Petkov struct intel_shared_regs *allocate_shared_regs(int cpu);
88127f6d22bSBorislav Petkov 
88227f6d22bSBorislav Petkov int intel_pmu_init(void);
88327f6d22bSBorislav Petkov 
88427f6d22bSBorislav Petkov void init_debug_store_on_cpu(int cpu);
88527f6d22bSBorislav Petkov 
88627f6d22bSBorislav Petkov void fini_debug_store_on_cpu(int cpu);
88727f6d22bSBorislav Petkov 
88827f6d22bSBorislav Petkov void release_ds_buffers(void);
88927f6d22bSBorislav Petkov 
89027f6d22bSBorislav Petkov void reserve_ds_buffers(void);
89127f6d22bSBorislav Petkov 
89227f6d22bSBorislav Petkov extern struct event_constraint bts_constraint;
89327f6d22bSBorislav Petkov 
89427f6d22bSBorislav Petkov void intel_pmu_enable_bts(u64 config);
89527f6d22bSBorislav Petkov 
89627f6d22bSBorislav Petkov void intel_pmu_disable_bts(void);
89727f6d22bSBorislav Petkov 
89827f6d22bSBorislav Petkov int intel_pmu_drain_bts_buffer(void);
89927f6d22bSBorislav Petkov 
90027f6d22bSBorislav Petkov extern struct event_constraint intel_core2_pebs_event_constraints[];
90127f6d22bSBorislav Petkov 
90227f6d22bSBorislav Petkov extern struct event_constraint intel_atom_pebs_event_constraints[];
90327f6d22bSBorislav Petkov 
90427f6d22bSBorislav Petkov extern struct event_constraint intel_slm_pebs_event_constraints[];
90527f6d22bSBorislav Petkov 
9068b92c3a7SKan Liang extern struct event_constraint intel_glm_pebs_event_constraints[];
9078b92c3a7SKan Liang 
908dd0b06b5SKan Liang extern struct event_constraint intel_glp_pebs_event_constraints[];
909dd0b06b5SKan Liang 
91027f6d22bSBorislav Petkov extern struct event_constraint intel_nehalem_pebs_event_constraints[];
91127f6d22bSBorislav Petkov 
91227f6d22bSBorislav Petkov extern struct event_constraint intel_westmere_pebs_event_constraints[];
91327f6d22bSBorislav Petkov 
91427f6d22bSBorislav Petkov extern struct event_constraint intel_snb_pebs_event_constraints[];
91527f6d22bSBorislav Petkov 
91627f6d22bSBorislav Petkov extern struct event_constraint intel_ivb_pebs_event_constraints[];
91727f6d22bSBorislav Petkov 
91827f6d22bSBorislav Petkov extern struct event_constraint intel_hsw_pebs_event_constraints[];
91927f6d22bSBorislav Petkov 
920b3e62463SStephane Eranian extern struct event_constraint intel_bdw_pebs_event_constraints[];
921b3e62463SStephane Eranian 
92227f6d22bSBorislav Petkov extern struct event_constraint intel_skl_pebs_event_constraints[];
92327f6d22bSBorislav Petkov 
92427f6d22bSBorislav Petkov struct event_constraint *intel_pebs_constraints(struct perf_event *event);
92527f6d22bSBorislav Petkov 
92668f7082fSPeter Zijlstra void intel_pmu_pebs_add(struct perf_event *event);
92768f7082fSPeter Zijlstra 
92868f7082fSPeter Zijlstra void intel_pmu_pebs_del(struct perf_event *event);
92968f7082fSPeter Zijlstra 
93027f6d22bSBorislav Petkov void intel_pmu_pebs_enable(struct perf_event *event);
93127f6d22bSBorislav Petkov 
93227f6d22bSBorislav Petkov void intel_pmu_pebs_disable(struct perf_event *event);
93327f6d22bSBorislav Petkov 
93427f6d22bSBorislav Petkov void intel_pmu_pebs_enable_all(void);
93527f6d22bSBorislav Petkov 
93627f6d22bSBorislav Petkov void intel_pmu_pebs_disable_all(void);
93727f6d22bSBorislav Petkov 
93827f6d22bSBorislav Petkov void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
93927f6d22bSBorislav Petkov 
94027f6d22bSBorislav Petkov void intel_ds_init(void);
94127f6d22bSBorislav Petkov 
94227f6d22bSBorislav Petkov void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
94327f6d22bSBorislav Petkov 
94419fc9dddSDavid Carrillo-Cisneros u64 lbr_from_signext_quirk_wr(u64 val);
94519fc9dddSDavid Carrillo-Cisneros 
94627f6d22bSBorislav Petkov void intel_pmu_lbr_reset(void);
94727f6d22bSBorislav Petkov 
94868f7082fSPeter Zijlstra void intel_pmu_lbr_add(struct perf_event *event);
94927f6d22bSBorislav Petkov 
95068f7082fSPeter Zijlstra void intel_pmu_lbr_del(struct perf_event *event);
95127f6d22bSBorislav Petkov 
95227f6d22bSBorislav Petkov void intel_pmu_lbr_enable_all(bool pmi);
95327f6d22bSBorislav Petkov 
95427f6d22bSBorislav Petkov void intel_pmu_lbr_disable_all(void);
95527f6d22bSBorislav Petkov 
95627f6d22bSBorislav Petkov void intel_pmu_lbr_read(void);
95727f6d22bSBorislav Petkov 
95827f6d22bSBorislav Petkov void intel_pmu_lbr_init_core(void);
95927f6d22bSBorislav Petkov 
96027f6d22bSBorislav Petkov void intel_pmu_lbr_init_nhm(void);
96127f6d22bSBorislav Petkov 
96227f6d22bSBorislav Petkov void intel_pmu_lbr_init_atom(void);
96327f6d22bSBorislav Petkov 
964f21d5adcSKan Liang void intel_pmu_lbr_init_slm(void);
965f21d5adcSKan Liang 
96627f6d22bSBorislav Petkov void intel_pmu_lbr_init_snb(void);
96727f6d22bSBorislav Petkov 
96827f6d22bSBorislav Petkov void intel_pmu_lbr_init_hsw(void);
96927f6d22bSBorislav Petkov 
97027f6d22bSBorislav Petkov void intel_pmu_lbr_init_skl(void);
97127f6d22bSBorislav Petkov 
97227f6d22bSBorislav Petkov void intel_pmu_lbr_init_knl(void);
97327f6d22bSBorislav Petkov 
974e17dc653SAndi Kleen void intel_pmu_pebs_data_source_nhm(void);
975e17dc653SAndi Kleen 
9766ae5fa61SAndi Kleen void intel_pmu_pebs_data_source_skl(bool pmem);
9776ae5fa61SAndi Kleen 
97827f6d22bSBorislav Petkov int intel_pmu_setup_lbr_filter(struct perf_event *event);
97927f6d22bSBorislav Petkov 
98027f6d22bSBorislav Petkov void intel_pt_interrupt(void);
98127f6d22bSBorislav Petkov 
98227f6d22bSBorislav Petkov int intel_bts_interrupt(void);
98327f6d22bSBorislav Petkov 
98427f6d22bSBorislav Petkov void intel_bts_enable_local(void);
98527f6d22bSBorislav Petkov 
98627f6d22bSBorislav Petkov void intel_bts_disable_local(void);
98727f6d22bSBorislav Petkov 
98827f6d22bSBorislav Petkov int p4_pmu_init(void);
98927f6d22bSBorislav Petkov 
99027f6d22bSBorislav Petkov int p6_pmu_init(void);
99127f6d22bSBorislav Petkov 
99227f6d22bSBorislav Petkov int knc_pmu_init(void);
99327f6d22bSBorislav Petkov 
99427f6d22bSBorislav Petkov static inline int is_ht_workaround_enabled(void)
99527f6d22bSBorislav Petkov {
99627f6d22bSBorislav Petkov 	return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
99727f6d22bSBorislav Petkov }
99827f6d22bSBorislav Petkov 
99927f6d22bSBorislav Petkov #else /* CONFIG_CPU_SUP_INTEL */
100027f6d22bSBorislav Petkov 
100127f6d22bSBorislav Petkov static inline void reserve_ds_buffers(void)
100227f6d22bSBorislav Petkov {
100327f6d22bSBorislav Petkov }
100427f6d22bSBorislav Petkov 
100527f6d22bSBorislav Petkov static inline void release_ds_buffers(void)
100627f6d22bSBorislav Petkov {
100727f6d22bSBorislav Petkov }
100827f6d22bSBorislav Petkov 
100927f6d22bSBorislav Petkov static inline int intel_pmu_init(void)
101027f6d22bSBorislav Petkov {
101127f6d22bSBorislav Petkov 	return 0;
101227f6d22bSBorislav Petkov }
101327f6d22bSBorislav Petkov 
101427f6d22bSBorislav Petkov static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
101527f6d22bSBorislav Petkov {
101627f6d22bSBorislav Petkov 	return NULL;
101727f6d22bSBorislav Petkov }
101827f6d22bSBorislav Petkov 
101927f6d22bSBorislav Petkov static inline int is_ht_workaround_enabled(void)
102027f6d22bSBorislav Petkov {
102127f6d22bSBorislav Petkov 	return 0;
102227f6d22bSBorislav Petkov }
102327f6d22bSBorislav Petkov #endif /* CONFIG_CPU_SUP_INTEL */
1024