xref: /linux/arch/x86/events/perf_event.h (revision 79997eda0d31bc68203c95ecb978773ee6ce7a1f)
1 /*
2  * Performance events x86 architecture header
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14 
15 #include <linux/perf_event.h>
16 
17 #include <asm/fpu/xstate.h>
18 #include <asm/intel_ds.h>
19 #include <asm/cpu.h>
20 
21 /* To enable MSR tracing please use the generic trace points. */
22 
23 /*
24  *          |   NHM/WSM    |      SNB     |
25  * register -------------------------------
26  *          |  HT  | no HT |  HT  | no HT |
27  *-----------------------------------------
28  * offcore  | core | core  | cpu  | core  |
29  * lbr_sel  | core | core  | cpu  | core  |
30  * ld_lat   | cpu  | core  | cpu  | core  |
31  *-----------------------------------------
32  *
33  * Given that there is a small number of shared regs,
34  * we can pre-allocate their slot in the per-cpu
35  * per-core reg tables.
36  */
37 enum extra_reg_type {
38 	EXTRA_REG_NONE		= -1, /* not used */
39 
40 	EXTRA_REG_RSP_0		= 0,  /* offcore_response_0 */
41 	EXTRA_REG_RSP_1		= 1,  /* offcore_response_1 */
42 	EXTRA_REG_LBR		= 2,  /* lbr_select */
43 	EXTRA_REG_LDLAT		= 3,  /* ld_lat_threshold */
44 	EXTRA_REG_FE		= 4,  /* fe_* */
45 	EXTRA_REG_SNOOP_0	= 5,  /* snoop response 0 */
46 	EXTRA_REG_SNOOP_1	= 6,  /* snoop response 1 */
47 
48 	EXTRA_REG_MAX		      /* number of entries needed */
49 };
50 
51 struct event_constraint {
52 	union {
53 		unsigned long	idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
54 		u64		idxmsk64;
55 	};
56 	u64		code;
57 	u64		cmask;
58 	int		weight;
59 	int		overlap;
60 	int		flags;
61 	unsigned int	size;
62 };
63 
64 static inline bool constraint_match(struct event_constraint *c, u64 ecode)
65 {
66 	return ((ecode & c->cmask) - c->code) <= (u64)c->size;
67 }
68 
69 #define PERF_ARCH(name, val)	\
70 	PERF_X86_EVENT_##name = val,
71 
72 /*
73  * struct hw_perf_event.flags flags
74  */
75 enum {
76 #include "perf_event_flags.h"
77 };
78 
79 #undef PERF_ARCH
80 
81 #define PERF_ARCH(name, val)						\
82 	static_assert((PERF_X86_EVENT_##name & PERF_EVENT_FLAG_ARCH) ==	\
83 		      PERF_X86_EVENT_##name);
84 
85 #include "perf_event_flags.h"
86 
87 #undef PERF_ARCH
88 
89 static inline bool is_topdown_count(struct perf_event *event)
90 {
91 	return event->hw.flags & PERF_X86_EVENT_TOPDOWN;
92 }
93 
94 static inline bool is_metric_event(struct perf_event *event)
95 {
96 	u64 config = event->attr.config;
97 
98 	return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) &&
99 		((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING)  &&
100 		((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX);
101 }
102 
103 static inline bool is_slots_event(struct perf_event *event)
104 {
105 	return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS;
106 }
107 
108 static inline bool is_topdown_event(struct perf_event *event)
109 {
110 	return is_metric_event(event) || is_slots_event(event);
111 }
112 
113 struct amd_nb {
114 	int nb_id;  /* NorthBridge id */
115 	int refcnt; /* reference count */
116 	struct perf_event *owners[X86_PMC_IDX_MAX];
117 	struct event_constraint event_constraints[X86_PMC_IDX_MAX];
118 };
119 
120 #define PEBS_COUNTER_MASK	((1ULL << MAX_PEBS_EVENTS) - 1)
121 #define PEBS_PMI_AFTER_EACH_RECORD BIT_ULL(60)
122 #define PEBS_OUTPUT_OFFSET	61
123 #define PEBS_OUTPUT_MASK	(3ull << PEBS_OUTPUT_OFFSET)
124 #define PEBS_OUTPUT_PT		(1ull << PEBS_OUTPUT_OFFSET)
125 #define PEBS_VIA_PT_MASK	(PEBS_OUTPUT_PT | PEBS_PMI_AFTER_EACH_RECORD)
126 
127 /*
128  * Flags PEBS can handle without an PMI.
129  *
130  * TID can only be handled by flushing at context switch.
131  * REGS_USER can be handled for events limited to ring 3.
132  *
133  */
134 #define LARGE_PEBS_FLAGS \
135 	(PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
136 	PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
137 	PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
138 	PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
139 	PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
140 	PERF_SAMPLE_PERIOD | PERF_SAMPLE_CODE_PAGE_SIZE | \
141 	PERF_SAMPLE_WEIGHT_TYPE)
142 
143 #define PEBS_GP_REGS			\
144 	((1ULL << PERF_REG_X86_AX)    | \
145 	 (1ULL << PERF_REG_X86_BX)    | \
146 	 (1ULL << PERF_REG_X86_CX)    | \
147 	 (1ULL << PERF_REG_X86_DX)    | \
148 	 (1ULL << PERF_REG_X86_DI)    | \
149 	 (1ULL << PERF_REG_X86_SI)    | \
150 	 (1ULL << PERF_REG_X86_SP)    | \
151 	 (1ULL << PERF_REG_X86_BP)    | \
152 	 (1ULL << PERF_REG_X86_IP)    | \
153 	 (1ULL << PERF_REG_X86_FLAGS) | \
154 	 (1ULL << PERF_REG_X86_R8)    | \
155 	 (1ULL << PERF_REG_X86_R9)    | \
156 	 (1ULL << PERF_REG_X86_R10)   | \
157 	 (1ULL << PERF_REG_X86_R11)   | \
158 	 (1ULL << PERF_REG_X86_R12)   | \
159 	 (1ULL << PERF_REG_X86_R13)   | \
160 	 (1ULL << PERF_REG_X86_R14)   | \
161 	 (1ULL << PERF_REG_X86_R15))
162 
163 /*
164  * Per register state.
165  */
166 struct er_account {
167 	raw_spinlock_t      lock;	/* per-core: protect structure */
168 	u64                 config;	/* extra MSR config */
169 	u64                 reg;	/* extra MSR number */
170 	atomic_t            ref;	/* reference count */
171 };
172 
173 /*
174  * Per core/cpu state
175  *
176  * Used to coordinate shared registers between HT threads or
177  * among events on a single PMU.
178  */
179 struct intel_shared_regs {
180 	struct er_account       regs[EXTRA_REG_MAX];
181 	int                     refcnt;		/* per-core: #HT threads */
182 	unsigned                core_id;	/* per-core: core id */
183 };
184 
185 enum intel_excl_state_type {
186 	INTEL_EXCL_UNUSED    = 0, /* counter is unused */
187 	INTEL_EXCL_SHARED    = 1, /* counter can be used by both threads */
188 	INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
189 };
190 
191 struct intel_excl_states {
192 	enum intel_excl_state_type state[X86_PMC_IDX_MAX];
193 	bool sched_started; /* true if scheduling has started */
194 };
195 
196 struct intel_excl_cntrs {
197 	raw_spinlock_t	lock;
198 
199 	struct intel_excl_states states[2];
200 
201 	union {
202 		u16	has_exclusive[2];
203 		u32	exclusive_present;
204 	};
205 
206 	int		refcnt;		/* per-core: #HT threads */
207 	unsigned	core_id;	/* per-core: core id */
208 };
209 
210 struct x86_perf_task_context;
211 #define MAX_LBR_ENTRIES		32
212 
213 enum {
214 	LBR_FORMAT_32		= 0x00,
215 	LBR_FORMAT_LIP		= 0x01,
216 	LBR_FORMAT_EIP		= 0x02,
217 	LBR_FORMAT_EIP_FLAGS	= 0x03,
218 	LBR_FORMAT_EIP_FLAGS2	= 0x04,
219 	LBR_FORMAT_INFO		= 0x05,
220 	LBR_FORMAT_TIME		= 0x06,
221 	LBR_FORMAT_INFO2	= 0x07,
222 	LBR_FORMAT_MAX_KNOWN    = LBR_FORMAT_INFO2,
223 };
224 
225 enum {
226 	X86_PERF_KFREE_SHARED = 0,
227 	X86_PERF_KFREE_EXCL   = 1,
228 	X86_PERF_KFREE_MAX
229 };
230 
231 struct cpu_hw_events {
232 	/*
233 	 * Generic x86 PMC bits
234 	 */
235 	struct perf_event	*events[X86_PMC_IDX_MAX]; /* in counter order */
236 	unsigned long		active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
237 	unsigned long		dirty[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
238 	int			enabled;
239 
240 	int			n_events; /* the # of events in the below arrays */
241 	int			n_added;  /* the # last events in the below arrays;
242 					     they've never been enabled yet */
243 	int			n_txn;    /* the # last events in the below arrays;
244 					     added in the current transaction */
245 	int			n_txn_pair;
246 	int			n_txn_metric;
247 	int			assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
248 	u64			tags[X86_PMC_IDX_MAX];
249 
250 	struct perf_event	*event_list[X86_PMC_IDX_MAX]; /* in enabled order */
251 	struct event_constraint	*event_constraint[X86_PMC_IDX_MAX];
252 
253 	int			n_excl; /* the number of exclusive events */
254 
255 	unsigned int		txn_flags;
256 	int			is_fake;
257 
258 	/*
259 	 * Intel DebugStore bits
260 	 */
261 	struct debug_store	*ds;
262 	void			*ds_pebs_vaddr;
263 	void			*ds_bts_vaddr;
264 	u64			pebs_enabled;
265 	int			n_pebs;
266 	int			n_large_pebs;
267 	int			n_pebs_via_pt;
268 	int			pebs_output;
269 
270 	/* Current super set of events hardware configuration */
271 	u64			pebs_data_cfg;
272 	u64			active_pebs_data_cfg;
273 	int			pebs_record_size;
274 
275 	/* Intel Fixed counter configuration */
276 	u64			fixed_ctrl_val;
277 	u64			active_fixed_ctrl_val;
278 
279 	/*
280 	 * Intel LBR bits
281 	 */
282 	int				lbr_users;
283 	int				lbr_pebs_users;
284 	struct perf_branch_stack	lbr_stack;
285 	struct perf_branch_entry	lbr_entries[MAX_LBR_ENTRIES];
286 	union {
287 		struct er_account		*lbr_sel;
288 		struct er_account		*lbr_ctl;
289 	};
290 	u64				br_sel;
291 	void				*last_task_ctx;
292 	int				last_log_id;
293 	int				lbr_select;
294 	void				*lbr_xsave;
295 
296 	/*
297 	 * Intel host/guest exclude bits
298 	 */
299 	u64				intel_ctrl_guest_mask;
300 	u64				intel_ctrl_host_mask;
301 	struct perf_guest_switch_msr	guest_switch_msrs[X86_PMC_IDX_MAX];
302 
303 	/*
304 	 * Intel checkpoint mask
305 	 */
306 	u64				intel_cp_status;
307 
308 	/*
309 	 * manage shared (per-core, per-cpu) registers
310 	 * used on Intel NHM/WSM/SNB
311 	 */
312 	struct intel_shared_regs	*shared_regs;
313 	/*
314 	 * manage exclusive counter access between hyperthread
315 	 */
316 	struct event_constraint *constraint_list; /* in enable order */
317 	struct intel_excl_cntrs		*excl_cntrs;
318 	int excl_thread_id; /* 0 or 1 */
319 
320 	/*
321 	 * SKL TSX_FORCE_ABORT shadow
322 	 */
323 	u64				tfa_shadow;
324 
325 	/*
326 	 * Perf Metrics
327 	 */
328 	/* number of accepted metrics events */
329 	int				n_metric;
330 
331 	/*
332 	 * AMD specific bits
333 	 */
334 	struct amd_nb			*amd_nb;
335 	int				brs_active; /* BRS is enabled */
336 
337 	/* Inverted mask of bits to clear in the perf_ctr ctrl registers */
338 	u64				perf_ctr_virt_mask;
339 	int				n_pair; /* Large increment events */
340 
341 	void				*kfree_on_online[X86_PERF_KFREE_MAX];
342 
343 	struct pmu			*pmu;
344 };
345 
346 #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) {	\
347 	{ .idxmsk64 = (n) },		\
348 	.code = (c),			\
349 	.size = (e) - (c),		\
350 	.cmask = (m),			\
351 	.weight = (w),			\
352 	.overlap = (o),			\
353 	.flags = f,			\
354 }
355 
356 #define __EVENT_CONSTRAINT(c, n, m, w, o, f) \
357 	__EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f)
358 
359 #define EVENT_CONSTRAINT(c, n, m)	\
360 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
361 
362 /*
363  * The constraint_match() function only works for 'simple' event codes
364  * and not for extended (AMD64_EVENTSEL_EVENT) events codes.
365  */
366 #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
367 	__EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
368 
369 #define INTEL_EXCLEVT_CONSTRAINT(c, n)	\
370 	__EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
371 			   0, PERF_X86_EVENT_EXCL)
372 
373 /*
374  * The overlap flag marks event constraints with overlapping counter
375  * masks. This is the case if the counter mask of such an event is not
376  * a subset of any other counter mask of a constraint with an equal or
377  * higher weight, e.g.:
378  *
379  *  c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
380  *  c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
381  *  c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
382  *
383  * The event scheduler may not select the correct counter in the first
384  * cycle because it needs to know which subsequent events will be
385  * scheduled. It may fail to schedule the events then. So we set the
386  * overlap flag for such constraints to give the scheduler a hint which
387  * events to select for counter rescheduling.
388  *
389  * Care must be taken as the rescheduling algorithm is O(n!) which
390  * will increase scheduling cycles for an over-committed system
391  * dramatically.  The number of such EVENT_CONSTRAINT_OVERLAP() macros
392  * and its counter masks must be kept at a minimum.
393  */
394 #define EVENT_CONSTRAINT_OVERLAP(c, n, m)	\
395 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
396 
397 /*
398  * Constraint on the Event code.
399  */
400 #define INTEL_EVENT_CONSTRAINT(c, n)	\
401 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
402 
403 /*
404  * Constraint on a range of Event codes
405  */
406 #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n)			\
407 	EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
408 
409 /*
410  * Constraint on the Event code + UMask + fixed-mask
411  *
412  * filter mask to validate fixed counter events.
413  * the following filters disqualify for fixed counters:
414  *  - inv
415  *  - edge
416  *  - cnt-mask
417  *  - in_tx
418  *  - in_tx_checkpointed
419  *  The other filters are supported by fixed counters.
420  *  The any-thread option is supported starting with v3.
421  */
422 #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
423 #define FIXED_EVENT_CONSTRAINT(c, n)	\
424 	EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
425 
426 /*
427  * The special metric counters do not actually exist. They are calculated from
428  * the combination of the FxCtr3 + MSR_PERF_METRICS.
429  *
430  * The special metric counters are mapped to a dummy offset for the scheduler.
431  * The sharing between multiple users of the same metric without multiplexing
432  * is not allowed, even though the hardware supports that in principle.
433  */
434 
435 #define METRIC_EVENT_CONSTRAINT(c, n)					\
436 	EVENT_CONSTRAINT(c, (1ULL << (INTEL_PMC_IDX_METRIC_BASE + n)),	\
437 			 INTEL_ARCH_EVENT_MASK)
438 
439 /*
440  * Constraint on the Event code + UMask
441  */
442 #define INTEL_UEVENT_CONSTRAINT(c, n)	\
443 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
444 
445 /* Constraint on specific umask bit only + event */
446 #define INTEL_UBIT_EVENT_CONSTRAINT(c, n)	\
447 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
448 
449 /* Like UEVENT_CONSTRAINT, but match flags too */
450 #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n)	\
451 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
452 
453 #define INTEL_EXCLUEVT_CONSTRAINT(c, n)	\
454 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
455 			   HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
456 
457 #define INTEL_PLD_CONSTRAINT(c, n)	\
458 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
459 			   HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
460 
461 #define INTEL_PSD_CONSTRAINT(c, n)	\
462 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
463 			   HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_STLAT)
464 
465 #define INTEL_PST_CONSTRAINT(c, n)	\
466 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
467 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
468 
469 #define INTEL_HYBRID_LAT_CONSTRAINT(c, n)	\
470 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
471 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LAT_HYBRID)
472 
473 /* Event constraint, but match on all event flags too. */
474 #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
475 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
476 
477 #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n)			\
478 	EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
479 
480 /* Check only flags, but allow all event/umask */
481 #define INTEL_ALL_EVENT_CONSTRAINT(code, n)	\
482 	EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
483 
484 /* Check flags and event code, and set the HSW store flag */
485 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
486 	__EVENT_CONSTRAINT(code, n, 			\
487 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
488 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
489 
490 /* Check flags and event code, and set the HSW load flag */
491 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
492 	__EVENT_CONSTRAINT(code, n,			\
493 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
494 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
495 
496 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \
497 	__EVENT_CONSTRAINT_RANGE(code, end, n,				\
498 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
499 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
500 
501 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
502 	__EVENT_CONSTRAINT(code, n,			\
503 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
504 			  HWEIGHT(n), 0, \
505 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
506 
507 /* Check flags and event code/umask, and set the HSW store flag */
508 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
509 	__EVENT_CONSTRAINT(code, n, 			\
510 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
511 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
512 
513 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
514 	__EVENT_CONSTRAINT(code, n,			\
515 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
516 			  HWEIGHT(n), 0, \
517 			  PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
518 
519 /* Check flags and event code/umask, and set the HSW load flag */
520 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
521 	__EVENT_CONSTRAINT(code, n, 			\
522 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
523 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
524 
525 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
526 	__EVENT_CONSTRAINT(code, n,			\
527 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
528 			  HWEIGHT(n), 0, \
529 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
530 
531 /* Check flags and event code/umask, and set the HSW N/A flag */
532 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
533 	__EVENT_CONSTRAINT(code, n, 			\
534 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
535 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
536 
537 
538 /*
539  * We define the end marker as having a weight of -1
540  * to enable blacklisting of events using a counter bitmask
541  * of zero and thus a weight of zero.
542  * The end marker has a weight that cannot possibly be
543  * obtained from counting the bits in the bitmask.
544  */
545 #define EVENT_CONSTRAINT_END { .weight = -1 }
546 
547 /*
548  * Check for end marker with weight == -1
549  */
550 #define for_each_event_constraint(e, c)	\
551 	for ((e) = (c); (e)->weight != -1; (e)++)
552 
553 /*
554  * Extra registers for specific events.
555  *
556  * Some events need large masks and require external MSRs.
557  * Those extra MSRs end up being shared for all events on
558  * a PMU and sometimes between PMU of sibling HT threads.
559  * In either case, the kernel needs to handle conflicting
560  * accesses to those extra, shared, regs. The data structure
561  * to manage those registers is stored in cpu_hw_event.
562  */
563 struct extra_reg {
564 	unsigned int		event;
565 	unsigned int		msr;
566 	u64			config_mask;
567 	u64			valid_mask;
568 	int			idx;  /* per_xxx->regs[] reg index */
569 	bool			extra_msr_access;
570 };
571 
572 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {	\
573 	.event = (e),			\
574 	.msr = (ms),			\
575 	.config_mask = (m),		\
576 	.valid_mask = (vm),		\
577 	.idx = EXTRA_REG_##i,		\
578 	.extra_msr_access = true,	\
579 	}
580 
581 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)	\
582 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
583 
584 #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
585 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
586 			ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
587 
588 #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
589 	INTEL_UEVENT_EXTRA_REG(c, \
590 			       MSR_PEBS_LD_LAT_THRESHOLD, \
591 			       0xffff, \
592 			       LDLAT)
593 
594 #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
595 
596 union perf_capabilities {
597 	struct {
598 		u64	lbr_format:6;
599 		u64	pebs_trap:1;
600 		u64	pebs_arch_reg:1;
601 		u64	pebs_format:4;
602 		u64	smm_freeze:1;
603 		/*
604 		 * PMU supports separate counter range for writing
605 		 * values > 32bit.
606 		 */
607 		u64	full_width_write:1;
608 		u64     pebs_baseline:1;
609 		u64	perf_metrics:1;
610 		u64	pebs_output_pt_available:1;
611 		u64	pebs_timing_info:1;
612 		u64	anythread_deprecated:1;
613 	};
614 	u64	capabilities;
615 };
616 
617 struct x86_pmu_quirk {
618 	struct x86_pmu_quirk *next;
619 	void (*func)(void);
620 };
621 
622 union x86_pmu_config {
623 	struct {
624 		u64 event:8,
625 		    umask:8,
626 		    usr:1,
627 		    os:1,
628 		    edge:1,
629 		    pc:1,
630 		    interrupt:1,
631 		    __reserved1:1,
632 		    en:1,
633 		    inv:1,
634 		    cmask:8,
635 		    event2:4,
636 		    __reserved2:4,
637 		    go:1,
638 		    ho:1;
639 	} bits;
640 	u64 value;
641 };
642 
643 #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
644 
645 enum {
646 	x86_lbr_exclusive_lbr,
647 	x86_lbr_exclusive_bts,
648 	x86_lbr_exclusive_pt,
649 	x86_lbr_exclusive_max,
650 };
651 
652 #define PERF_PEBS_DATA_SOURCE_MAX	0x10
653 #define PERF_PEBS_DATA_SOURCE_MASK	(PERF_PEBS_DATA_SOURCE_MAX - 1)
654 
655 enum hybrid_cpu_type {
656 	HYBRID_INTEL_NONE,
657 	HYBRID_INTEL_ATOM	= 0x20,
658 	HYBRID_INTEL_CORE	= 0x40,
659 };
660 
661 enum hybrid_pmu_type {
662 	not_hybrid,
663 	hybrid_small		= BIT(0),
664 	hybrid_big		= BIT(1),
665 
666 	hybrid_big_small	= hybrid_big | hybrid_small, /* only used for matching */
667 };
668 
669 #define X86_HYBRID_PMU_ATOM_IDX		0
670 #define X86_HYBRID_PMU_CORE_IDX		1
671 
672 #define X86_HYBRID_NUM_PMUS		2
673 
674 struct x86_hybrid_pmu {
675 	struct pmu			pmu;
676 	const char			*name;
677 	enum hybrid_pmu_type		pmu_type;
678 	cpumask_t			supported_cpus;
679 	union perf_capabilities		intel_cap;
680 	u64				intel_ctrl;
681 	int				max_pebs_events;
682 	int				num_counters;
683 	int				num_counters_fixed;
684 	struct event_constraint		unconstrained;
685 
686 	u64				hw_cache_event_ids
687 					[PERF_COUNT_HW_CACHE_MAX]
688 					[PERF_COUNT_HW_CACHE_OP_MAX]
689 					[PERF_COUNT_HW_CACHE_RESULT_MAX];
690 	u64				hw_cache_extra_regs
691 					[PERF_COUNT_HW_CACHE_MAX]
692 					[PERF_COUNT_HW_CACHE_OP_MAX]
693 					[PERF_COUNT_HW_CACHE_RESULT_MAX];
694 	struct event_constraint		*event_constraints;
695 	struct event_constraint		*pebs_constraints;
696 	struct extra_reg		*extra_regs;
697 
698 	unsigned int			late_ack	:1,
699 					mid_ack		:1,
700 					enabled_ack	:1;
701 
702 	u64				pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX];
703 };
704 
705 static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
706 {
707 	return container_of(pmu, struct x86_hybrid_pmu, pmu);
708 }
709 
710 extern struct static_key_false perf_is_hybrid;
711 #define is_hybrid()		static_branch_unlikely(&perf_is_hybrid)
712 
713 #define hybrid(_pmu, _field)				\
714 (*({							\
715 	typeof(&x86_pmu._field) __Fp = &x86_pmu._field;	\
716 							\
717 	if (is_hybrid() && (_pmu))			\
718 		__Fp = &hybrid_pmu(_pmu)->_field;	\
719 							\
720 	__Fp;						\
721 }))
722 
723 #define hybrid_var(_pmu, _var)				\
724 (*({							\
725 	typeof(&_var) __Fp = &_var;			\
726 							\
727 	if (is_hybrid() && (_pmu))			\
728 		__Fp = &hybrid_pmu(_pmu)->_var;		\
729 							\
730 	__Fp;						\
731 }))
732 
733 #define hybrid_bit(_pmu, _field)			\
734 ({							\
735 	bool __Fp = x86_pmu._field;			\
736 							\
737 	if (is_hybrid() && (_pmu))			\
738 		__Fp = hybrid_pmu(_pmu)->_field;	\
739 							\
740 	__Fp;						\
741 })
742 
743 /*
744  * struct x86_pmu - generic x86 pmu
745  */
746 struct x86_pmu {
747 	/*
748 	 * Generic x86 PMC bits
749 	 */
750 	const char	*name;
751 	int		version;
752 	int		(*handle_irq)(struct pt_regs *);
753 	void		(*disable_all)(void);
754 	void		(*enable_all)(int added);
755 	void		(*enable)(struct perf_event *);
756 	void		(*disable)(struct perf_event *);
757 	void		(*assign)(struct perf_event *event, int idx);
758 	void		(*add)(struct perf_event *);
759 	void		(*del)(struct perf_event *);
760 	void		(*read)(struct perf_event *event);
761 	int		(*set_period)(struct perf_event *event);
762 	u64		(*update)(struct perf_event *event);
763 	int		(*hw_config)(struct perf_event *event);
764 	int		(*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
765 	unsigned	eventsel;
766 	unsigned	perfctr;
767 	int		(*addr_offset)(int index, bool eventsel);
768 	int		(*rdpmc_index)(int index);
769 	u64		(*event_map)(int);
770 	int		max_events;
771 	int		num_counters;
772 	int		num_counters_fixed;
773 	int		cntval_bits;
774 	u64		cntval_mask;
775 	union {
776 			unsigned long events_maskl;
777 			unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
778 	};
779 	int		events_mask_len;
780 	int		apic;
781 	u64		max_period;
782 	struct event_constraint *
783 			(*get_event_constraints)(struct cpu_hw_events *cpuc,
784 						 int idx,
785 						 struct perf_event *event);
786 
787 	void		(*put_event_constraints)(struct cpu_hw_events *cpuc,
788 						 struct perf_event *event);
789 
790 	void		(*start_scheduling)(struct cpu_hw_events *cpuc);
791 
792 	void		(*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
793 
794 	void		(*stop_scheduling)(struct cpu_hw_events *cpuc);
795 
796 	struct event_constraint *event_constraints;
797 	struct x86_pmu_quirk *quirks;
798 	void		(*limit_period)(struct perf_event *event, s64 *l);
799 
800 	/* PMI handler bits */
801 	unsigned int	late_ack		:1,
802 			mid_ack			:1,
803 			enabled_ack		:1;
804 	/*
805 	 * sysfs attrs
806 	 */
807 	int		attr_rdpmc_broken;
808 	int		attr_rdpmc;
809 	struct attribute **format_attrs;
810 
811 	ssize_t		(*events_sysfs_show)(char *page, u64 config);
812 	const struct attribute_group **attr_update;
813 
814 	unsigned long	attr_freeze_on_smi;
815 
816 	/*
817 	 * CPU Hotplug hooks
818 	 */
819 	int		(*cpu_prepare)(int cpu);
820 	void		(*cpu_starting)(int cpu);
821 	void		(*cpu_dying)(int cpu);
822 	void		(*cpu_dead)(int cpu);
823 
824 	void		(*check_microcode)(void);
825 	void		(*sched_task)(struct perf_event_pmu_context *pmu_ctx,
826 				      bool sched_in);
827 
828 	/*
829 	 * Intel Arch Perfmon v2+
830 	 */
831 	u64			intel_ctrl;
832 	union perf_capabilities intel_cap;
833 
834 	/*
835 	 * Intel DebugStore bits
836 	 */
837 	unsigned int	bts			:1,
838 			bts_active		:1,
839 			pebs			:1,
840 			pebs_active		:1,
841 			pebs_broken		:1,
842 			pebs_prec_dist		:1,
843 			pebs_no_tlb		:1,
844 			pebs_no_isolation	:1,
845 			pebs_block		:1,
846 			pebs_ept		:1;
847 	int		pebs_record_size;
848 	int		pebs_buffer_size;
849 	int		max_pebs_events;
850 	void		(*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data);
851 	struct event_constraint *pebs_constraints;
852 	void		(*pebs_aliases)(struct perf_event *event);
853 	u64		(*pebs_latency_data)(struct perf_event *event, u64 status);
854 	unsigned long	large_pebs_flags;
855 	u64		rtm_abort_event;
856 	u64		pebs_capable;
857 
858 	/*
859 	 * Intel LBR
860 	 */
861 	unsigned int	lbr_tos, lbr_from, lbr_to,
862 			lbr_info, lbr_nr;	   /* LBR base regs and size */
863 	union {
864 		u64	lbr_sel_mask;		   /* LBR_SELECT valid bits */
865 		u64	lbr_ctl_mask;		   /* LBR_CTL valid bits */
866 	};
867 	union {
868 		const int	*lbr_sel_map;	   /* lbr_select mappings */
869 		int		*lbr_ctl_map;	   /* LBR_CTL mappings */
870 	};
871 	bool		lbr_double_abort;	   /* duplicated lbr aborts */
872 	bool		lbr_pt_coexist;		   /* (LBR|BTS) may coexist with PT */
873 
874 	unsigned int	lbr_has_info:1;
875 	unsigned int	lbr_has_tsx:1;
876 	unsigned int	lbr_from_flags:1;
877 	unsigned int	lbr_to_cycles:1;
878 
879 	/*
880 	 * Intel Architectural LBR CPUID Enumeration
881 	 */
882 	unsigned int	lbr_depth_mask:8;
883 	unsigned int	lbr_deep_c_reset:1;
884 	unsigned int	lbr_lip:1;
885 	unsigned int	lbr_cpl:1;
886 	unsigned int	lbr_filter:1;
887 	unsigned int	lbr_call_stack:1;
888 	unsigned int	lbr_mispred:1;
889 	unsigned int	lbr_timed_lbr:1;
890 	unsigned int	lbr_br_type:1;
891 
892 	void		(*lbr_reset)(void);
893 	void		(*lbr_read)(struct cpu_hw_events *cpuc);
894 	void		(*lbr_save)(void *ctx);
895 	void		(*lbr_restore)(void *ctx);
896 
897 	/*
898 	 * Intel PT/LBR/BTS are exclusive
899 	 */
900 	atomic_t	lbr_exclusive[x86_lbr_exclusive_max];
901 
902 	/*
903 	 * Intel perf metrics
904 	 */
905 	int		num_topdown_events;
906 
907 	/*
908 	 * perf task context (i.e. struct perf_event_pmu_context::task_ctx_data)
909 	 * switch helper to bridge calls from perf/core to perf/x86.
910 	 * See struct pmu::swap_task_ctx() usage for examples;
911 	 */
912 	void		(*swap_task_ctx)(struct perf_event_pmu_context *prev_epc,
913 					 struct perf_event_pmu_context *next_epc);
914 
915 	/*
916 	 * AMD bits
917 	 */
918 	unsigned int	amd_nb_constraints : 1;
919 	u64		perf_ctr_pair_en;
920 
921 	/*
922 	 * Extra registers for events
923 	 */
924 	struct extra_reg *extra_regs;
925 	unsigned int flags;
926 
927 	/*
928 	 * Intel host/guest support (KVM)
929 	 */
930 	struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr, void *data);
931 
932 	/*
933 	 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
934 	 */
935 	int (*check_period) (struct perf_event *event, u64 period);
936 
937 	int (*aux_output_match) (struct perf_event *event);
938 
939 	void (*filter)(struct pmu *pmu, int cpu, bool *ret);
940 	/*
941 	 * Hybrid support
942 	 *
943 	 * Most PMU capabilities are the same among different hybrid PMUs.
944 	 * The global x86_pmu saves the architecture capabilities, which
945 	 * are available for all PMUs. The hybrid_pmu only includes the
946 	 * unique capabilities.
947 	 */
948 	int				num_hybrid_pmus;
949 	struct x86_hybrid_pmu		*hybrid_pmu;
950 	enum hybrid_cpu_type (*get_hybrid_cpu_type)	(void);
951 };
952 
953 struct x86_perf_task_context_opt {
954 	int lbr_callstack_users;
955 	int lbr_stack_state;
956 	int log_id;
957 };
958 
959 struct x86_perf_task_context {
960 	u64 lbr_sel;
961 	int tos;
962 	int valid_lbrs;
963 	struct x86_perf_task_context_opt opt;
964 	struct lbr_entry lbr[MAX_LBR_ENTRIES];
965 };
966 
967 struct x86_perf_task_context_arch_lbr {
968 	struct x86_perf_task_context_opt opt;
969 	struct lbr_entry entries[];
970 };
971 
972 /*
973  * Add padding to guarantee the 64-byte alignment of the state buffer.
974  *
975  * The structure is dynamically allocated. The size of the LBR state may vary
976  * based on the number of LBR registers.
977  *
978  * Do not put anything after the LBR state.
979  */
980 struct x86_perf_task_context_arch_lbr_xsave {
981 	struct x86_perf_task_context_opt		opt;
982 
983 	union {
984 		struct xregs_state			xsave;
985 		struct {
986 			struct fxregs_state		i387;
987 			struct xstate_header		header;
988 			struct arch_lbr_state		lbr;
989 		} __attribute__ ((packed, aligned (XSAVE_ALIGNMENT)));
990 	};
991 };
992 
993 #define x86_add_quirk(func_)						\
994 do {									\
995 	static struct x86_pmu_quirk __quirk __initdata = {		\
996 		.func = func_,						\
997 	};								\
998 	__quirk.next = x86_pmu.quirks;					\
999 	x86_pmu.quirks = &__quirk;					\
1000 } while (0)
1001 
1002 /*
1003  * x86_pmu flags
1004  */
1005 #define PMU_FL_NO_HT_SHARING	0x1 /* no hyper-threading resource sharing */
1006 #define PMU_FL_HAS_RSP_1	0x2 /* has 2 equivalent offcore_rsp regs   */
1007 #define PMU_FL_EXCL_CNTRS	0x4 /* has exclusive counter requirements  */
1008 #define PMU_FL_EXCL_ENABLED	0x8 /* exclusive counter active */
1009 #define PMU_FL_PEBS_ALL		0x10 /* all events are valid PEBS events */
1010 #define PMU_FL_TFA		0x20 /* deal with TSX force abort */
1011 #define PMU_FL_PAIR		0x40 /* merge counters for large incr. events */
1012 #define PMU_FL_INSTR_LATENCY	0x80 /* Support Instruction Latency in PEBS Memory Info Record */
1013 #define PMU_FL_MEM_LOADS_AUX	0x100 /* Require an auxiliary event for the complete memory info */
1014 #define PMU_FL_RETIRE_LATENCY	0x200 /* Support Retire Latency in PEBS */
1015 
1016 #define EVENT_VAR(_id)  event_attr_##_id
1017 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
1018 
1019 #define EVENT_ATTR(_name, _id)						\
1020 static struct perf_pmu_events_attr EVENT_VAR(_id) = {			\
1021 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
1022 	.id		= PERF_COUNT_HW_##_id,				\
1023 	.event_str	= NULL,						\
1024 };
1025 
1026 #define EVENT_ATTR_STR(_name, v, str)					\
1027 static struct perf_pmu_events_attr event_attr_##v = {			\
1028 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
1029 	.id		= 0,						\
1030 	.event_str	= str,						\
1031 };
1032 
1033 #define EVENT_ATTR_STR_HT(_name, v, noht, ht)				\
1034 static struct perf_pmu_events_ht_attr event_attr_##v = {		\
1035 	.attr		= __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
1036 	.id		= 0,						\
1037 	.event_str_noht	= noht,						\
1038 	.event_str_ht	= ht,						\
1039 }
1040 
1041 #define EVENT_ATTR_STR_HYBRID(_name, v, str, _pmu)			\
1042 static struct perf_pmu_events_hybrid_attr event_attr_##v = {		\
1043 	.attr		= __ATTR(_name, 0444, events_hybrid_sysfs_show, NULL),\
1044 	.id		= 0,						\
1045 	.event_str	= str,						\
1046 	.pmu_type	= _pmu,						\
1047 }
1048 
1049 #define FORMAT_HYBRID_PTR(_id) (&format_attr_hybrid_##_id.attr.attr)
1050 
1051 #define FORMAT_ATTR_HYBRID(_name, _pmu)					\
1052 static struct perf_pmu_format_hybrid_attr format_attr_hybrid_##_name = {\
1053 	.attr		= __ATTR_RO(_name),				\
1054 	.pmu_type	= _pmu,						\
1055 }
1056 
1057 struct pmu *x86_get_pmu(unsigned int cpu);
1058 extern struct x86_pmu x86_pmu __read_mostly;
1059 
1060 DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period);
1061 DECLARE_STATIC_CALL(x86_pmu_update,     *x86_pmu.update);
1062 
1063 static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
1064 {
1065 	if (static_cpu_has(X86_FEATURE_ARCH_LBR))
1066 		return &((struct x86_perf_task_context_arch_lbr *)ctx)->opt;
1067 
1068 	return &((struct x86_perf_task_context *)ctx)->opt;
1069 }
1070 
1071 static inline bool x86_pmu_has_lbr_callstack(void)
1072 {
1073 	return  x86_pmu.lbr_sel_map &&
1074 		x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
1075 }
1076 
1077 DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
1078 DECLARE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1079 
1080 int x86_perf_event_set_period(struct perf_event *event);
1081 
1082 /*
1083  * Generalized hw caching related hw_event table, filled
1084  * in on a per model basis. A value of 0 means
1085  * 'not supported', -1 means 'hw_event makes no sense on
1086  * this CPU', any other value means the raw hw_event
1087  * ID.
1088  */
1089 
1090 #define C(x) PERF_COUNT_HW_CACHE_##x
1091 
1092 extern u64 __read_mostly hw_cache_event_ids
1093 				[PERF_COUNT_HW_CACHE_MAX]
1094 				[PERF_COUNT_HW_CACHE_OP_MAX]
1095 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
1096 extern u64 __read_mostly hw_cache_extra_regs
1097 				[PERF_COUNT_HW_CACHE_MAX]
1098 				[PERF_COUNT_HW_CACHE_OP_MAX]
1099 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
1100 
1101 u64 x86_perf_event_update(struct perf_event *event);
1102 
1103 static inline unsigned int x86_pmu_config_addr(int index)
1104 {
1105 	return x86_pmu.eventsel + (x86_pmu.addr_offset ?
1106 				   x86_pmu.addr_offset(index, true) : index);
1107 }
1108 
1109 static inline unsigned int x86_pmu_event_addr(int index)
1110 {
1111 	return x86_pmu.perfctr + (x86_pmu.addr_offset ?
1112 				  x86_pmu.addr_offset(index, false) : index);
1113 }
1114 
1115 static inline int x86_pmu_rdpmc_index(int index)
1116 {
1117 	return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
1118 }
1119 
1120 bool check_hw_exists(struct pmu *pmu, int num_counters,
1121 		     int num_counters_fixed);
1122 
1123 int x86_add_exclusive(unsigned int what);
1124 
1125 void x86_del_exclusive(unsigned int what);
1126 
1127 int x86_reserve_hardware(void);
1128 
1129 void x86_release_hardware(void);
1130 
1131 int x86_pmu_max_precise(void);
1132 
1133 void hw_perf_lbr_event_destroy(struct perf_event *event);
1134 
1135 int x86_setup_perfctr(struct perf_event *event);
1136 
1137 int x86_pmu_hw_config(struct perf_event *event);
1138 
1139 void x86_pmu_disable_all(void);
1140 
1141 static inline bool has_amd_brs(struct hw_perf_event *hwc)
1142 {
1143 	return hwc->flags & PERF_X86_EVENT_AMD_BRS;
1144 }
1145 
1146 static inline bool is_counter_pair(struct hw_perf_event *hwc)
1147 {
1148 	return hwc->flags & PERF_X86_EVENT_PAIR;
1149 }
1150 
1151 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
1152 					  u64 enable_mask)
1153 {
1154 	u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
1155 
1156 	if (hwc->extra_reg.reg)
1157 		wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
1158 
1159 	/*
1160 	 * Add enabled Merge event on next counter
1161 	 * if large increment event being enabled on this counter
1162 	 */
1163 	if (is_counter_pair(hwc))
1164 		wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en);
1165 
1166 	wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
1167 }
1168 
1169 void x86_pmu_enable_all(int added);
1170 
1171 int perf_assign_events(struct event_constraint **constraints, int n,
1172 			int wmin, int wmax, int gpmax, int *assign);
1173 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
1174 
1175 void x86_pmu_stop(struct perf_event *event, int flags);
1176 
1177 static inline void x86_pmu_disable_event(struct perf_event *event)
1178 {
1179 	u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
1180 	struct hw_perf_event *hwc = &event->hw;
1181 
1182 	wrmsrl(hwc->config_base, hwc->config & ~disable_mask);
1183 
1184 	if (is_counter_pair(hwc))
1185 		wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0);
1186 }
1187 
1188 void x86_pmu_enable_event(struct perf_event *event);
1189 
1190 int x86_pmu_handle_irq(struct pt_regs *regs);
1191 
1192 void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed,
1193 			  u64 intel_ctrl);
1194 
1195 extern struct event_constraint emptyconstraint;
1196 
1197 extern struct event_constraint unconstrained;
1198 
1199 static inline bool kernel_ip(unsigned long ip)
1200 {
1201 #ifdef CONFIG_X86_32
1202 	return ip > PAGE_OFFSET;
1203 #else
1204 	return (long)ip < 0;
1205 #endif
1206 }
1207 
1208 /*
1209  * Not all PMUs provide the right context information to place the reported IP
1210  * into full context. Specifically segment registers are typically not
1211  * supplied.
1212  *
1213  * Assuming the address is a linear address (it is for IBS), we fake the CS and
1214  * vm86 mode using the known zero-based code segment and 'fix up' the registers
1215  * to reflect this.
1216  *
1217  * Intel PEBS/LBR appear to typically provide the effective address, nothing
1218  * much we can do about that but pray and treat it like a linear address.
1219  */
1220 static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
1221 {
1222 	regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
1223 	if (regs->flags & X86_VM_MASK)
1224 		regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
1225 	regs->ip = ip;
1226 }
1227 
1228 /*
1229  * x86control flow change classification
1230  * x86control flow changes include branches, interrupts, traps, faults
1231  */
1232 enum {
1233 	X86_BR_NONE		= 0,      /* unknown */
1234 
1235 	X86_BR_USER		= 1 << 0, /* branch target is user */
1236 	X86_BR_KERNEL		= 1 << 1, /* branch target is kernel */
1237 
1238 	X86_BR_CALL		= 1 << 2, /* call */
1239 	X86_BR_RET		= 1 << 3, /* return */
1240 	X86_BR_SYSCALL		= 1 << 4, /* syscall */
1241 	X86_BR_SYSRET		= 1 << 5, /* syscall return */
1242 	X86_BR_INT		= 1 << 6, /* sw interrupt */
1243 	X86_BR_IRET		= 1 << 7, /* return from interrupt */
1244 	X86_BR_JCC		= 1 << 8, /* conditional */
1245 	X86_BR_JMP		= 1 << 9, /* jump */
1246 	X86_BR_IRQ		= 1 << 10,/* hw interrupt or trap or fault */
1247 	X86_BR_IND_CALL		= 1 << 11,/* indirect calls */
1248 	X86_BR_ABORT		= 1 << 12,/* transaction abort */
1249 	X86_BR_IN_TX		= 1 << 13,/* in transaction */
1250 	X86_BR_NO_TX		= 1 << 14,/* not in transaction */
1251 	X86_BR_ZERO_CALL	= 1 << 15,/* zero length call */
1252 	X86_BR_CALL_STACK	= 1 << 16,/* call stack */
1253 	X86_BR_IND_JMP		= 1 << 17,/* indirect jump */
1254 
1255 	X86_BR_TYPE_SAVE	= 1 << 18,/* indicate to save branch type */
1256 
1257 };
1258 
1259 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
1260 #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
1261 
1262 #define X86_BR_ANY       \
1263 	(X86_BR_CALL    |\
1264 	 X86_BR_RET     |\
1265 	 X86_BR_SYSCALL |\
1266 	 X86_BR_SYSRET  |\
1267 	 X86_BR_INT     |\
1268 	 X86_BR_IRET    |\
1269 	 X86_BR_JCC     |\
1270 	 X86_BR_JMP	 |\
1271 	 X86_BR_IRQ	 |\
1272 	 X86_BR_ABORT	 |\
1273 	 X86_BR_IND_CALL |\
1274 	 X86_BR_IND_JMP  |\
1275 	 X86_BR_ZERO_CALL)
1276 
1277 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
1278 
1279 #define X86_BR_ANY_CALL		 \
1280 	(X86_BR_CALL		|\
1281 	 X86_BR_IND_CALL	|\
1282 	 X86_BR_ZERO_CALL	|\
1283 	 X86_BR_SYSCALL		|\
1284 	 X86_BR_IRQ		|\
1285 	 X86_BR_INT)
1286 
1287 int common_branch_type(int type);
1288 int branch_type(unsigned long from, unsigned long to, int abort);
1289 int branch_type_fused(unsigned long from, unsigned long to, int abort,
1290 		      int *offset);
1291 
1292 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
1293 ssize_t intel_event_sysfs_show(char *page, u64 config);
1294 
1295 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
1296 			  char *page);
1297 ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
1298 			  char *page);
1299 ssize_t events_hybrid_sysfs_show(struct device *dev,
1300 				 struct device_attribute *attr,
1301 				 char *page);
1302 
1303 static inline bool fixed_counter_disabled(int i, struct pmu *pmu)
1304 {
1305 	u64 intel_ctrl = hybrid(pmu, intel_ctrl);
1306 
1307 	return !(intel_ctrl >> (i + INTEL_PMC_IDX_FIXED));
1308 }
1309 
1310 #ifdef CONFIG_CPU_SUP_AMD
1311 
1312 int amd_pmu_init(void);
1313 
1314 int amd_pmu_lbr_init(void);
1315 void amd_pmu_lbr_reset(void);
1316 void amd_pmu_lbr_read(void);
1317 void amd_pmu_lbr_add(struct perf_event *event);
1318 void amd_pmu_lbr_del(struct perf_event *event);
1319 void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
1320 void amd_pmu_lbr_enable_all(void);
1321 void amd_pmu_lbr_disable_all(void);
1322 int amd_pmu_lbr_hw_config(struct perf_event *event);
1323 
1324 #ifdef CONFIG_PERF_EVENTS_AMD_BRS
1325 
1326 #define AMD_FAM19H_BRS_EVENT 0xc4 /* RETIRED_TAKEN_BRANCH_INSTRUCTIONS */
1327 
1328 int amd_brs_init(void);
1329 void amd_brs_disable(void);
1330 void amd_brs_enable(void);
1331 void amd_brs_enable_all(void);
1332 void amd_brs_disable_all(void);
1333 void amd_brs_drain(void);
1334 void amd_brs_lopwr_init(void);
1335 int amd_brs_hw_config(struct perf_event *event);
1336 void amd_brs_reset(void);
1337 
1338 static inline void amd_pmu_brs_add(struct perf_event *event)
1339 {
1340 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1341 
1342 	perf_sched_cb_inc(event->pmu);
1343 	cpuc->lbr_users++;
1344 	/*
1345 	 * No need to reset BRS because it is reset
1346 	 * on brs_enable() and it is saturating
1347 	 */
1348 }
1349 
1350 static inline void amd_pmu_brs_del(struct perf_event *event)
1351 {
1352 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1353 
1354 	cpuc->lbr_users--;
1355 	WARN_ON_ONCE(cpuc->lbr_users < 0);
1356 
1357 	perf_sched_cb_dec(event->pmu);
1358 }
1359 
1360 void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
1361 #else
1362 static inline int amd_brs_init(void)
1363 {
1364 	return 0;
1365 }
1366 static inline void amd_brs_disable(void) {}
1367 static inline void amd_brs_enable(void) {}
1368 static inline void amd_brs_drain(void) {}
1369 static inline void amd_brs_lopwr_init(void) {}
1370 static inline void amd_brs_disable_all(void) {}
1371 static inline int amd_brs_hw_config(struct perf_event *event)
1372 {
1373 	return 0;
1374 }
1375 static inline void amd_brs_reset(void) {}
1376 
1377 static inline void amd_pmu_brs_add(struct perf_event *event)
1378 {
1379 }
1380 
1381 static inline void amd_pmu_brs_del(struct perf_event *event)
1382 {
1383 }
1384 
1385 static inline void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
1386 {
1387 }
1388 
1389 static inline void amd_brs_enable_all(void)
1390 {
1391 }
1392 
1393 #endif
1394 
1395 #else /* CONFIG_CPU_SUP_AMD */
1396 
1397 static inline int amd_pmu_init(void)
1398 {
1399 	return 0;
1400 }
1401 
1402 static inline int amd_brs_init(void)
1403 {
1404 	return -EOPNOTSUPP;
1405 }
1406 
1407 static inline void amd_brs_drain(void)
1408 {
1409 }
1410 
1411 static inline void amd_brs_enable_all(void)
1412 {
1413 }
1414 
1415 static inline void amd_brs_disable_all(void)
1416 {
1417 }
1418 #endif /* CONFIG_CPU_SUP_AMD */
1419 
1420 static inline int is_pebs_pt(struct perf_event *event)
1421 {
1422 	return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT);
1423 }
1424 
1425 #ifdef CONFIG_CPU_SUP_INTEL
1426 
1427 static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
1428 {
1429 	struct hw_perf_event *hwc = &event->hw;
1430 	unsigned int hw_event, bts_event;
1431 
1432 	if (event->attr.freq)
1433 		return false;
1434 
1435 	hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1436 	bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1437 
1438 	return hw_event == bts_event && period == 1;
1439 }
1440 
1441 static inline bool intel_pmu_has_bts(struct perf_event *event)
1442 {
1443 	struct hw_perf_event *hwc = &event->hw;
1444 
1445 	return intel_pmu_has_bts_period(event, hwc->sample_period);
1446 }
1447 
1448 static __always_inline void __intel_pmu_pebs_disable_all(void)
1449 {
1450 	wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
1451 }
1452 
1453 static __always_inline void __intel_pmu_arch_lbr_disable(void)
1454 {
1455 	wrmsrl(MSR_ARCH_LBR_CTL, 0);
1456 }
1457 
1458 static __always_inline void __intel_pmu_lbr_disable(void)
1459 {
1460 	u64 debugctl;
1461 
1462 	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
1463 	debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
1464 	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
1465 }
1466 
1467 int intel_pmu_save_and_restart(struct perf_event *event);
1468 
1469 struct event_constraint *
1470 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
1471 			  struct perf_event *event);
1472 
1473 extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
1474 extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
1475 
1476 int intel_pmu_init(void);
1477 
1478 void init_debug_store_on_cpu(int cpu);
1479 
1480 void fini_debug_store_on_cpu(int cpu);
1481 
1482 void release_ds_buffers(void);
1483 
1484 void reserve_ds_buffers(void);
1485 
1486 void release_lbr_buffers(void);
1487 
1488 void reserve_lbr_buffers(void);
1489 
1490 extern struct event_constraint bts_constraint;
1491 extern struct event_constraint vlbr_constraint;
1492 
1493 void intel_pmu_enable_bts(u64 config);
1494 
1495 void intel_pmu_disable_bts(void);
1496 
1497 int intel_pmu_drain_bts_buffer(void);
1498 
1499 u64 adl_latency_data_small(struct perf_event *event, u64 status);
1500 
1501 u64 mtl_latency_data_small(struct perf_event *event, u64 status);
1502 
1503 extern struct event_constraint intel_core2_pebs_event_constraints[];
1504 
1505 extern struct event_constraint intel_atom_pebs_event_constraints[];
1506 
1507 extern struct event_constraint intel_slm_pebs_event_constraints[];
1508 
1509 extern struct event_constraint intel_glm_pebs_event_constraints[];
1510 
1511 extern struct event_constraint intel_glp_pebs_event_constraints[];
1512 
1513 extern struct event_constraint intel_grt_pebs_event_constraints[];
1514 
1515 extern struct event_constraint intel_nehalem_pebs_event_constraints[];
1516 
1517 extern struct event_constraint intel_westmere_pebs_event_constraints[];
1518 
1519 extern struct event_constraint intel_snb_pebs_event_constraints[];
1520 
1521 extern struct event_constraint intel_ivb_pebs_event_constraints[];
1522 
1523 extern struct event_constraint intel_hsw_pebs_event_constraints[];
1524 
1525 extern struct event_constraint intel_bdw_pebs_event_constraints[];
1526 
1527 extern struct event_constraint intel_skl_pebs_event_constraints[];
1528 
1529 extern struct event_constraint intel_icl_pebs_event_constraints[];
1530 
1531 extern struct event_constraint intel_glc_pebs_event_constraints[];
1532 
1533 struct event_constraint *intel_pebs_constraints(struct perf_event *event);
1534 
1535 void intel_pmu_pebs_add(struct perf_event *event);
1536 
1537 void intel_pmu_pebs_del(struct perf_event *event);
1538 
1539 void intel_pmu_pebs_enable(struct perf_event *event);
1540 
1541 void intel_pmu_pebs_disable(struct perf_event *event);
1542 
1543 void intel_pmu_pebs_enable_all(void);
1544 
1545 void intel_pmu_pebs_disable_all(void);
1546 
1547 void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
1548 
1549 void intel_pmu_auto_reload_read(struct perf_event *event);
1550 
1551 void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
1552 
1553 void intel_ds_init(void);
1554 
1555 void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
1556 				 struct perf_event_pmu_context *next_epc);
1557 
1558 void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
1559 
1560 u64 lbr_from_signext_quirk_wr(u64 val);
1561 
1562 void intel_pmu_lbr_reset(void);
1563 
1564 void intel_pmu_lbr_reset_32(void);
1565 
1566 void intel_pmu_lbr_reset_64(void);
1567 
1568 void intel_pmu_lbr_add(struct perf_event *event);
1569 
1570 void intel_pmu_lbr_del(struct perf_event *event);
1571 
1572 void intel_pmu_lbr_enable_all(bool pmi);
1573 
1574 void intel_pmu_lbr_disable_all(void);
1575 
1576 void intel_pmu_lbr_read(void);
1577 
1578 void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc);
1579 
1580 void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc);
1581 
1582 void intel_pmu_lbr_save(void *ctx);
1583 
1584 void intel_pmu_lbr_restore(void *ctx);
1585 
1586 void intel_pmu_lbr_init_core(void);
1587 
1588 void intel_pmu_lbr_init_nhm(void);
1589 
1590 void intel_pmu_lbr_init_atom(void);
1591 
1592 void intel_pmu_lbr_init_slm(void);
1593 
1594 void intel_pmu_lbr_init_snb(void);
1595 
1596 void intel_pmu_lbr_init_hsw(void);
1597 
1598 void intel_pmu_lbr_init_skl(void);
1599 
1600 void intel_pmu_lbr_init_knl(void);
1601 
1602 void intel_pmu_lbr_init(void);
1603 
1604 void intel_pmu_arch_lbr_init(void);
1605 
1606 void intel_pmu_pebs_data_source_nhm(void);
1607 
1608 void intel_pmu_pebs_data_source_skl(bool pmem);
1609 
1610 void intel_pmu_pebs_data_source_adl(void);
1611 
1612 void intel_pmu_pebs_data_source_grt(void);
1613 
1614 void intel_pmu_pebs_data_source_mtl(void);
1615 
1616 void intel_pmu_pebs_data_source_cmt(void);
1617 
1618 int intel_pmu_setup_lbr_filter(struct perf_event *event);
1619 
1620 void intel_pt_interrupt(void);
1621 
1622 int intel_bts_interrupt(void);
1623 
1624 void intel_bts_enable_local(void);
1625 
1626 void intel_bts_disable_local(void);
1627 
1628 int p4_pmu_init(void);
1629 
1630 int p6_pmu_init(void);
1631 
1632 int knc_pmu_init(void);
1633 
1634 static inline int is_ht_workaround_enabled(void)
1635 {
1636 	return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
1637 }
1638 
1639 #else /* CONFIG_CPU_SUP_INTEL */
1640 
1641 static inline void reserve_ds_buffers(void)
1642 {
1643 }
1644 
1645 static inline void release_ds_buffers(void)
1646 {
1647 }
1648 
1649 static inline void release_lbr_buffers(void)
1650 {
1651 }
1652 
1653 static inline void reserve_lbr_buffers(void)
1654 {
1655 }
1656 
1657 static inline int intel_pmu_init(void)
1658 {
1659 	return 0;
1660 }
1661 
1662 static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
1663 {
1664 	return 0;
1665 }
1666 
1667 static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
1668 {
1669 }
1670 
1671 static inline int is_ht_workaround_enabled(void)
1672 {
1673 	return 0;
1674 }
1675 #endif /* CONFIG_CPU_SUP_INTEL */
1676 
1677 #if ((defined CONFIG_CPU_SUP_CENTAUR) || (defined CONFIG_CPU_SUP_ZHAOXIN))
1678 int zhaoxin_pmu_init(void);
1679 #else
1680 static inline int zhaoxin_pmu_init(void)
1681 {
1682 	return 0;
1683 }
1684 #endif /*CONFIG_CPU_SUP_CENTAUR or CONFIG_CPU_SUP_ZHAOXIN*/
1685