xref: /linux/arch/x86/events/perf_event.h (revision e5cf0260a7472b4f34a46c418c14bec272aac404)
1 /*
2  * Performance events x86 architecture header
3  *
4  *  Copyright (C) 2008 Linutronix GmbH, Thomas Gleixner <tglx@kernel.org>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14 
15 #include <linux/perf_event.h>
16 
17 #include <asm/fpu/xstate.h>
18 #include <asm/intel_ds.h>
19 #include <asm/cpu.h>
20 #include <asm/msr.h>
21 
22 /* To enable MSR tracing please use the generic trace points. */
23 
24 /*
25  *          |   NHM/WSM    |      SNB     |
26  * register -------------------------------
27  *          |  HT  | no HT |  HT  | no HT |
28  *-----------------------------------------
29  * offcore  | core | core  | cpu  | core  |
30  * lbr_sel  | core | core  | cpu  | core  |
31  * ld_lat   | cpu  | core  | cpu  | core  |
32  *-----------------------------------------
33  *
34  * Given that there is a small number of shared regs,
35  * we can pre-allocate their slot in the per-cpu
36  * per-core reg tables.
37  */
38 enum extra_reg_type {
39 	EXTRA_REG_NONE		= -1, /* not used */
40 
41 	EXTRA_REG_RSP_0		= 0,  /* offcore_response_0 */
42 	EXTRA_REG_RSP_1		= 1,  /* offcore_response_1 */
43 	EXTRA_REG_LBR		= 2,  /* lbr_select */
44 	EXTRA_REG_LDLAT		= 3,  /* ld_lat_threshold */
45 	EXTRA_REG_FE		= 4,  /* fe_* */
46 	EXTRA_REG_SNOOP_0	= 5,  /* snoop response 0 */
47 	EXTRA_REG_SNOOP_1	= 6,  /* snoop response 1 */
48 	EXTRA_REG_OMR_0		= 7,  /* OMR 0 */
49 	EXTRA_REG_OMR_1		= 8,  /* OMR 1 */
50 	EXTRA_REG_OMR_2		= 9,  /* OMR 2 */
51 	EXTRA_REG_OMR_3		= 10,  /* OMR 3 */
52 
53 	EXTRA_REG_MAX		      /* number of entries needed */
54 };
55 
56 struct event_constraint {
57 	union {
58 		unsigned long	idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
59 		u64		idxmsk64;
60 	};
61 	u64		code;
62 	u64		cmask;
63 	int		weight;
64 	int		overlap;
65 	int		flags;
66 	unsigned int	size;
67 };
68 
constraint_match(struct event_constraint * c,u64 ecode)69 static inline bool constraint_match(struct event_constraint *c, u64 ecode)
70 {
71 	return ((ecode & c->cmask) - c->code) <= (u64)c->size;
72 }
73 
74 #define PERF_ARCH(name, val)	\
75 	PERF_X86_EVENT_##name = val,
76 
77 /*
78  * struct hw_perf_event.flags flags
79  */
80 enum {
81 #include "perf_event_flags.h"
82 };
83 
84 #undef PERF_ARCH
85 
86 #define PERF_ARCH(name, val)						\
87 	static_assert((PERF_X86_EVENT_##name & PERF_EVENT_FLAG_ARCH) ==	\
88 		      PERF_X86_EVENT_##name);
89 
90 #include "perf_event_flags.h"
91 
92 #undef PERF_ARCH
93 
is_topdown_count(struct perf_event * event)94 static inline bool is_topdown_count(struct perf_event *event)
95 {
96 	return event->hw.flags & PERF_X86_EVENT_TOPDOWN;
97 }
98 
is_metric_event(struct perf_event * event)99 static inline bool is_metric_event(struct perf_event *event)
100 {
101 	u64 config = event->attr.config;
102 
103 	return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) &&
104 		((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING)  &&
105 		((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX);
106 }
107 
is_slots_event(struct perf_event * event)108 static inline bool is_slots_event(struct perf_event *event)
109 {
110 	return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS;
111 }
112 
is_topdown_event(struct perf_event * event)113 static inline bool is_topdown_event(struct perf_event *event)
114 {
115 	return is_metric_event(event) || is_slots_event(event);
116 }
117 
118 int is_x86_event(struct perf_event *event);
119 
check_leader_group(struct perf_event * leader,int flags)120 static inline bool check_leader_group(struct perf_event *leader, int flags)
121 {
122 	return is_x86_event(leader) ? !!(leader->hw.flags & flags) : false;
123 }
124 
is_branch_counters_group(struct perf_event * event)125 static inline bool is_branch_counters_group(struct perf_event *event)
126 {
127 	return check_leader_group(event->group_leader, PERF_X86_EVENT_BRANCH_COUNTERS);
128 }
129 
is_pebs_counter_event_group(struct perf_event * event)130 static inline bool is_pebs_counter_event_group(struct perf_event *event)
131 {
132 	return check_leader_group(event->group_leader, PERF_X86_EVENT_PEBS_CNTR);
133 }
134 
is_acr_event_group(struct perf_event * event)135 static inline bool is_acr_event_group(struct perf_event *event)
136 {
137 	return check_leader_group(event->group_leader, PERF_X86_EVENT_ACR);
138 }
139 
is_acr_self_reload_event(struct perf_event * event)140 static inline bool is_acr_self_reload_event(struct perf_event *event)
141 {
142 	struct hw_perf_event *hwc = &event->hw;
143 
144 	if (hwc->idx < 0)
145 		return false;
146 
147 	return test_bit(hwc->idx, (unsigned long *)&hwc->config1);
148 }
149 
150 struct amd_nb {
151 	int nb_id;  /* NorthBridge id */
152 	int refcnt; /* reference count */
153 	struct perf_event *owners[X86_PMC_IDX_MAX];
154 	struct event_constraint event_constraints[X86_PMC_IDX_MAX];
155 };
156 
157 #define PEBS_COUNTER_MASK	((1ULL << MAX_PEBS_EVENTS) - 1)
158 #define PEBS_PMI_AFTER_EACH_RECORD BIT_ULL(60)
159 #define PEBS_OUTPUT_OFFSET	61
160 #define PEBS_OUTPUT_MASK	(3ull << PEBS_OUTPUT_OFFSET)
161 #define PEBS_OUTPUT_PT		(1ull << PEBS_OUTPUT_OFFSET)
162 #define PEBS_VIA_PT_MASK	(PEBS_OUTPUT_PT | PEBS_PMI_AFTER_EACH_RECORD)
163 
164 /*
165  * Flags PEBS can handle without an PMI.
166  *
167  * TID can only be handled by flushing at context switch.
168  * REGS_USER can be handled for events limited to ring 3.
169  *
170  */
171 #define LARGE_PEBS_FLAGS \
172 	(PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
173 	PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
174 	PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
175 	PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
176 	PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
177 	PERF_SAMPLE_PERIOD | PERF_SAMPLE_CODE_PAGE_SIZE | \
178 	PERF_SAMPLE_WEIGHT_TYPE)
179 
180 #define PEBS_GP_REGS			\
181 	((1ULL << PERF_REG_X86_AX)    | \
182 	 (1ULL << PERF_REG_X86_BX)    | \
183 	 (1ULL << PERF_REG_X86_CX)    | \
184 	 (1ULL << PERF_REG_X86_DX)    | \
185 	 (1ULL << PERF_REG_X86_DI)    | \
186 	 (1ULL << PERF_REG_X86_SI)    | \
187 	 (1ULL << PERF_REG_X86_SP)    | \
188 	 (1ULL << PERF_REG_X86_BP)    | \
189 	 (1ULL << PERF_REG_X86_IP)    | \
190 	 (1ULL << PERF_REG_X86_FLAGS) | \
191 	 (1ULL << PERF_REG_X86_R8)    | \
192 	 (1ULL << PERF_REG_X86_R9)    | \
193 	 (1ULL << PERF_REG_X86_R10)   | \
194 	 (1ULL << PERF_REG_X86_R11)   | \
195 	 (1ULL << PERF_REG_X86_R12)   | \
196 	 (1ULL << PERF_REG_X86_R13)   | \
197 	 (1ULL << PERF_REG_X86_R14)   | \
198 	 (1ULL << PERF_REG_X86_R15))
199 
200 /* user space rdpmc control values */
201 enum {
202 	X86_USER_RDPMC_NEVER_ENABLE		= 0,
203 	X86_USER_RDPMC_CONDITIONAL_ENABLE	= 1,
204 	X86_USER_RDPMC_ALWAYS_ENABLE		= 2,
205 };
206 
207 /*
208  * Per register state.
209  */
210 struct er_account {
211 	raw_spinlock_t      lock;	/* per-core: protect structure */
212 	u64                 config;	/* extra MSR config */
213 	u64                 reg;	/* extra MSR number */
214 	atomic_t            ref;	/* reference count */
215 };
216 
217 /*
218  * Per core/cpu state
219  *
220  * Used to coordinate shared registers between HT threads or
221  * among events on a single PMU.
222  */
223 struct intel_shared_regs {
224 	struct er_account       regs[EXTRA_REG_MAX];
225 	int                     refcnt;		/* per-core: #HT threads */
226 	unsigned                core_id;	/* per-core: core id */
227 };
228 
229 enum intel_excl_state_type {
230 	INTEL_EXCL_UNUSED    = 0, /* counter is unused */
231 	INTEL_EXCL_SHARED    = 1, /* counter can be used by both threads */
232 	INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
233 };
234 
235 struct intel_excl_states {
236 	enum intel_excl_state_type state[X86_PMC_IDX_MAX];
237 	bool sched_started; /* true if scheduling has started */
238 };
239 
240 struct intel_excl_cntrs {
241 	raw_spinlock_t	lock;
242 
243 	struct intel_excl_states states[2];
244 
245 	union {
246 		u16	has_exclusive[2];
247 		u32	exclusive_present;
248 	};
249 
250 	int		refcnt;		/* per-core: #HT threads */
251 	unsigned	core_id;	/* per-core: core id */
252 };
253 
254 struct x86_perf_task_context;
255 #define MAX_LBR_ENTRIES		32
256 
257 enum {
258 	LBR_FORMAT_32		= 0x00,
259 	LBR_FORMAT_LIP		= 0x01,
260 	LBR_FORMAT_EIP		= 0x02,
261 	LBR_FORMAT_EIP_FLAGS	= 0x03,
262 	LBR_FORMAT_EIP_FLAGS2	= 0x04,
263 	LBR_FORMAT_INFO		= 0x05,
264 	LBR_FORMAT_TIME		= 0x06,
265 	LBR_FORMAT_INFO2	= 0x07,
266 	LBR_FORMAT_MAX_KNOWN    = LBR_FORMAT_INFO2,
267 };
268 
269 enum {
270 	X86_PERF_KFREE_SHARED = 0,
271 	X86_PERF_KFREE_EXCL   = 1,
272 	X86_PERF_KFREE_MAX
273 };
274 
275 struct cpu_hw_events {
276 	/*
277 	 * Generic x86 PMC bits
278 	 */
279 	struct perf_event	*events[X86_PMC_IDX_MAX]; /* in counter order */
280 	unsigned long		active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
281 	unsigned long		dirty[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
282 	int			enabled;
283 
284 	int			n_events; /* the # of events in the below arrays */
285 	int			n_added;  /* the # last events in the below arrays;
286 					     they've never been enabled yet */
287 	int			n_txn;    /* the # last events in the below arrays;
288 					     added in the current transaction */
289 	int			n_txn_pair;
290 	int			n_txn_metric;
291 	int			assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
292 	u64			tags[X86_PMC_IDX_MAX];
293 
294 	struct perf_event	*event_list[X86_PMC_IDX_MAX]; /* in enabled order */
295 	struct event_constraint	*event_constraint[X86_PMC_IDX_MAX];
296 
297 	int			n_excl; /* the number of exclusive events */
298 	int			n_late_setup; /* the num of events needs late setup */
299 
300 	unsigned int		txn_flags;
301 	int			is_fake;
302 
303 	/*
304 	 * Intel DebugStore bits
305 	 */
306 	struct debug_store	*ds;
307 	void			*ds_bts_vaddr;
308 	/* DS based PEBS or arch-PEBS buffer address */
309 	void			*pebs_vaddr;
310 	u64			pebs_enabled;
311 	int			n_pebs;
312 	int			n_large_pebs;
313 	int			n_pebs_via_pt;
314 	int			pebs_output;
315 
316 	/* Current super set of events hardware configuration */
317 	u64			pebs_data_cfg;
318 	u64			active_pebs_data_cfg;
319 	int			pebs_record_size;
320 
321 	/* Intel Fixed counter configuration */
322 	u64			fixed_ctrl_val;
323 	u64			active_fixed_ctrl_val;
324 
325 	/* Intel ACR configuration */
326 	u64			acr_cfg_b[X86_PMC_IDX_MAX];
327 	u64			acr_cfg_c[X86_PMC_IDX_MAX];
328 	/* Cached CFG_C values */
329 	u64			cfg_c_val[X86_PMC_IDX_MAX];
330 
331 	/*
332 	 * Intel LBR bits
333 	 */
334 	int				lbr_users;
335 	int				lbr_pebs_users;
336 	struct perf_branch_stack	lbr_stack;
337 	struct perf_branch_entry	lbr_entries[MAX_LBR_ENTRIES];
338 	u64				lbr_counters[MAX_LBR_ENTRIES]; /* branch stack extra */
339 	union {
340 		struct er_account		*lbr_sel;
341 		struct er_account		*lbr_ctl;
342 	};
343 	u64				br_sel;
344 	void				*last_task_ctx;
345 	int				last_log_id;
346 	int				lbr_select;
347 	void				*lbr_xsave;
348 
349 	/*
350 	 * Intel host/guest exclude bits
351 	 */
352 	u64				intel_ctrl_guest_mask;
353 	u64				intel_ctrl_host_mask;
354 	struct perf_guest_switch_msr	guest_switch_msrs[X86_PMC_IDX_MAX];
355 
356 	/*
357 	 * Intel checkpoint mask
358 	 */
359 	u64				intel_cp_status;
360 
361 	/*
362 	 * manage shared (per-core, per-cpu) registers
363 	 * used on Intel NHM/WSM/SNB
364 	 */
365 	struct intel_shared_regs	*shared_regs;
366 	/*
367 	 * manage exclusive counter access between hyperthread
368 	 */
369 	struct event_constraint *constraint_list; /* in enable order */
370 	struct intel_excl_cntrs		*excl_cntrs;
371 	int excl_thread_id; /* 0 or 1 */
372 
373 	/*
374 	 * SKL TSX_FORCE_ABORT shadow
375 	 */
376 	u64				tfa_shadow;
377 
378 	/*
379 	 * Perf Metrics
380 	 */
381 	/* number of accepted metrics events */
382 	int				n_metric;
383 
384 	/*
385 	 * AMD specific bits
386 	 */
387 	struct amd_nb			*amd_nb;
388 	int				brs_active; /* BRS is enabled */
389 
390 	/* Inverted mask of bits to clear in the perf_ctr ctrl registers */
391 	u64				perf_ctr_virt_mask;
392 	int				n_pair; /* Large increment events */
393 
394 	void				*kfree_on_online[X86_PERF_KFREE_MAX];
395 
396 	struct pmu			*pmu;
397 };
398 
399 #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) {	\
400 	{ .idxmsk64 = (n) },		\
401 	.code = (c),			\
402 	.size = (e) - (c),		\
403 	.cmask = (m),			\
404 	.weight = (w),			\
405 	.overlap = (o),			\
406 	.flags = f,			\
407 }
408 
409 #define __EVENT_CONSTRAINT(c, n, m, w, o, f) \
410 	__EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f)
411 
412 #define EVENT_CONSTRAINT(c, n, m)	\
413 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
414 
415 /*
416  * The constraint_match() function only works for 'simple' event codes
417  * and not for extended (AMD64_EVENTSEL_EVENT) events codes.
418  */
419 #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
420 	__EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
421 
422 #define INTEL_EXCLEVT_CONSTRAINT(c, n)	\
423 	__EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
424 			   0, PERF_X86_EVENT_EXCL)
425 
426 /*
427  * The overlap flag marks event constraints with overlapping counter
428  * masks. This is the case if the counter mask of such an event is not
429  * a subset of any other counter mask of a constraint with an equal or
430  * higher weight, e.g.:
431  *
432  *  c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
433  *  c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
434  *  c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
435  *
436  * The event scheduler may not select the correct counter in the first
437  * cycle because it needs to know which subsequent events will be
438  * scheduled. It may fail to schedule the events then. So we set the
439  * overlap flag for such constraints to give the scheduler a hint which
440  * events to select for counter rescheduling.
441  *
442  * Care must be taken as the rescheduling algorithm is O(n!) which
443  * will increase scheduling cycles for an over-committed system
444  * dramatically.  The number of such EVENT_CONSTRAINT_OVERLAP() macros
445  * and its counter masks must be kept at a minimum.
446  */
447 #define EVENT_CONSTRAINT_OVERLAP(c, n, m)	\
448 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
449 
450 /*
451  * Constraint on the Event code.
452  */
453 #define INTEL_EVENT_CONSTRAINT(c, n)	\
454 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
455 
456 /*
457  * Constraint on a range of Event codes
458  */
459 #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n)			\
460 	EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
461 
462 /*
463  * Constraint on the Event code + UMask + fixed-mask
464  *
465  * filter mask to validate fixed counter events.
466  * the following filters disqualify for fixed counters:
467  *  - inv
468  *  - edge
469  *  - cnt-mask
470  *  - in_tx
471  *  - in_tx_checkpointed
472  *  The other filters are supported by fixed counters.
473  *  The any-thread option is supported starting with v3.
474  */
475 #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
476 #define FIXED_EVENT_CONSTRAINT(c, n)	\
477 	EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
478 
479 /*
480  * The special metric counters do not actually exist. They are calculated from
481  * the combination of the FxCtr3 + MSR_PERF_METRICS.
482  *
483  * The special metric counters are mapped to a dummy offset for the scheduler.
484  * The sharing between multiple users of the same metric without multiplexing
485  * is not allowed, even though the hardware supports that in principle.
486  */
487 
488 #define METRIC_EVENT_CONSTRAINT(c, n)					\
489 	EVENT_CONSTRAINT(c, (1ULL << (INTEL_PMC_IDX_METRIC_BASE + n)),	\
490 			 INTEL_ARCH_EVENT_MASK)
491 
492 /*
493  * Constraint on the Event code + UMask
494  */
495 #define INTEL_UEVENT_CONSTRAINT(c, n)	\
496 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
497 
498 /* Constraint on specific umask bit only + event */
499 #define INTEL_UBIT_EVENT_CONSTRAINT(c, n)	\
500 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
501 
502 /* Like UEVENT_CONSTRAINT, but match flags too */
503 #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n)	\
504 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
505 
506 #define INTEL_EXCLUEVT_CONSTRAINT(c, n)	\
507 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
508 			   HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
509 
510 #define INTEL_PLD_CONSTRAINT(c, n)	\
511 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
512 			   HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
513 
514 #define INTEL_PSD_CONSTRAINT(c, n)	\
515 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
516 			   HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_STLAT)
517 
518 #define INTEL_PST_CONSTRAINT(c, n)	\
519 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
520 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
521 
522 #define INTEL_HYBRID_LAT_CONSTRAINT(c, n)	\
523 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
524 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LAT_HYBRID)
525 
526 #define INTEL_HYBRID_LDLAT_CONSTRAINT(c, n)	\
527 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
528 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LAT_HYBRID|PERF_X86_EVENT_PEBS_LD_HSW)
529 
530 #define INTEL_HYBRID_STLAT_CONSTRAINT(c, n)	\
531 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
532 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LAT_HYBRID|PERF_X86_EVENT_PEBS_ST_HSW)
533 
534 /* Event constraint, but match on all event flags too. */
535 #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
536 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
537 
538 #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n)			\
539 	EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
540 
541 /* Check only flags, but allow all event/umask */
542 #define INTEL_ALL_EVENT_CONSTRAINT(code, n)	\
543 	EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
544 
545 /* Check flags and event code, and set the HSW store flag */
546 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
547 	__EVENT_CONSTRAINT(code, n, 			\
548 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
549 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
550 
551 /* Check flags and event code, and set the HSW load flag */
552 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
553 	__EVENT_CONSTRAINT(code, n,			\
554 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
555 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
556 
557 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \
558 	__EVENT_CONSTRAINT_RANGE(code, end, n,				\
559 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
560 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
561 
562 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
563 	__EVENT_CONSTRAINT(code, n,			\
564 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
565 			  HWEIGHT(n), 0, \
566 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
567 
568 /* Check flags and event code/umask, and set the HSW store flag */
569 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
570 	__EVENT_CONSTRAINT(code, n, 			\
571 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
572 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
573 
574 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
575 	__EVENT_CONSTRAINT(code, n,			\
576 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
577 			  HWEIGHT(n), 0, \
578 			  PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
579 
580 /* Check flags and event code/umask, and set the HSW load flag */
581 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
582 	__EVENT_CONSTRAINT(code, n, 			\
583 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
584 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
585 
586 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
587 	__EVENT_CONSTRAINT(code, n,			\
588 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
589 			  HWEIGHT(n), 0, \
590 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
591 
592 /* Check flags and event code/umask, and set the HSW N/A flag */
593 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
594 	__EVENT_CONSTRAINT(code, n, 			\
595 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
596 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
597 
598 
599 /*
600  * We define the end marker as having a weight of -1
601  * to enable blacklisting of events using a counter bitmask
602  * of zero and thus a weight of zero.
603  * The end marker has a weight that cannot possibly be
604  * obtained from counting the bits in the bitmask.
605  */
606 #define EVENT_CONSTRAINT_END { .weight = -1 }
607 
608 /*
609  * Check for end marker with weight == -1
610  */
611 #define for_each_event_constraint(e, c)	\
612 	for ((e) = (c); (e)->weight != -1; (e)++)
613 
614 /*
615  * Extra registers for specific events.
616  *
617  * Some events need large masks and require external MSRs.
618  * Those extra MSRs end up being shared for all events on
619  * a PMU and sometimes between PMU of sibling HT threads.
620  * In either case, the kernel needs to handle conflicting
621  * accesses to those extra, shared, regs. The data structure
622  * to manage those registers is stored in cpu_hw_event.
623  */
624 struct extra_reg {
625 	unsigned int		event;
626 	unsigned int		msr;
627 	u64			config_mask;
628 	u64			valid_mask;
629 	int			idx;  /* per_xxx->regs[] reg index */
630 	bool			extra_msr_access;
631 };
632 
633 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {	\
634 	.event = (e),			\
635 	.msr = (ms),			\
636 	.config_mask = (m),		\
637 	.valid_mask = (vm),		\
638 	.idx = EXTRA_REG_##i,		\
639 	.extra_msr_access = true,	\
640 	}
641 
642 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)	\
643 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
644 
645 #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
646 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
647 			ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
648 
649 #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
650 	INTEL_UEVENT_EXTRA_REG(c, \
651 			       MSR_PEBS_LD_LAT_THRESHOLD, \
652 			       0xffff, \
653 			       LDLAT)
654 
655 #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
656 
657 union perf_capabilities {
658 	struct {
659 		u64	lbr_format:6;
660 		u64	pebs_trap:1;
661 		u64	pebs_arch_reg:1;
662 		u64	pebs_format:4;
663 		u64	smm_freeze:1;
664 		/*
665 		 * PMU supports separate counter range for writing
666 		 * values > 32bit.
667 		 */
668 		u64	full_width_write:1;
669 		u64     pebs_baseline:1;
670 		u64	perf_metrics:1;
671 		u64	pebs_output_pt_available:1;
672 		u64	pebs_timing_info:1;
673 		u64	anythread_deprecated:1;
674 		u64	rdpmc_metrics_clear:1;
675 	};
676 	u64	capabilities;
677 };
678 
679 struct x86_pmu_quirk {
680 	struct x86_pmu_quirk *next;
681 	void (*func)(void);
682 };
683 
684 union x86_pmu_config {
685 	struct {
686 		u64 event:8,
687 		    umask:8,
688 		    usr:1,
689 		    os:1,
690 		    edge:1,
691 		    pc:1,
692 		    interrupt:1,
693 		    __reserved1:1,
694 		    en:1,
695 		    inv:1,
696 		    cmask:8,
697 		    event2:4,
698 		    __reserved2:4,
699 		    go:1,
700 		    ho:1;
701 	} bits;
702 	u64 value;
703 };
704 
705 #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
706 
707 enum {
708 	x86_lbr_exclusive_lbr,
709 	x86_lbr_exclusive_bts,
710 	x86_lbr_exclusive_pt,
711 	x86_lbr_exclusive_max,
712 };
713 
714 #define PERF_PEBS_DATA_SOURCE_MAX	0x100
715 #define PERF_PEBS_DATA_SOURCE_MASK	(PERF_PEBS_DATA_SOURCE_MAX - 1)
716 #define PERF_PEBS_DATA_SOURCE_GRT_MAX	0x10
717 #define PERF_PEBS_DATA_SOURCE_GRT_MASK	(PERF_PEBS_DATA_SOURCE_GRT_MAX - 1)
718 
719 #define X86_HYBRID_PMU_ATOM_IDX		0
720 #define X86_HYBRID_PMU_CORE_IDX		1
721 #define X86_HYBRID_PMU_TINY_IDX		2
722 
723 enum hybrid_pmu_type {
724 	not_hybrid,
725 	hybrid_small		= BIT(X86_HYBRID_PMU_ATOM_IDX),
726 	hybrid_big		= BIT(X86_HYBRID_PMU_CORE_IDX),
727 	hybrid_tiny		= BIT(X86_HYBRID_PMU_TINY_IDX),
728 
729 	/* The belows are only used for matching */
730 	hybrid_big_small	= hybrid_big   | hybrid_small,
731 	hybrid_small_tiny	= hybrid_small | hybrid_tiny,
732 	hybrid_big_small_tiny	= hybrid_big   | hybrid_small_tiny,
733 };
734 
735 struct arch_pebs_cap {
736 	u64 caps;
737 	u64 counters;
738 	u64 pdists;
739 };
740 
741 struct x86_hybrid_pmu {
742 	struct pmu			pmu;
743 	const char			*name;
744 	enum hybrid_pmu_type		pmu_type;
745 	cpumask_t			supported_cpus;
746 	union perf_capabilities		intel_cap;
747 	u64				intel_ctrl;
748 	u64				pebs_events_mask;
749 	u64				config_mask;
750 	union {
751 			u64		cntr_mask64;
752 			unsigned long	cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
753 	};
754 	union {
755 			u64		fixed_cntr_mask64;
756 			unsigned long	fixed_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
757 	};
758 
759 	union {
760 			u64		acr_cntr_mask64;
761 			unsigned long	acr_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
762 	};
763 	union {
764 			u64		acr_cause_mask64;
765 			unsigned long	acr_cause_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
766 	};
767 	struct event_constraint		unconstrained;
768 
769 	u64				hw_cache_event_ids
770 					[PERF_COUNT_HW_CACHE_MAX]
771 					[PERF_COUNT_HW_CACHE_OP_MAX]
772 					[PERF_COUNT_HW_CACHE_RESULT_MAX];
773 	u64				hw_cache_extra_regs
774 					[PERF_COUNT_HW_CACHE_MAX]
775 					[PERF_COUNT_HW_CACHE_OP_MAX]
776 					[PERF_COUNT_HW_CACHE_RESULT_MAX];
777 	struct event_constraint		*event_constraints;
778 	struct event_constraint		*pebs_constraints;
779 	struct extra_reg		*extra_regs;
780 
781 	unsigned int			late_ack	:1,
782 					mid_ack		:1,
783 					enabled_ack	:1;
784 
785 	struct arch_pebs_cap		arch_pebs_cap;
786 
787 	u64				pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX];
788 };
789 
hybrid_pmu(struct pmu * pmu)790 static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
791 {
792 	return container_of(pmu, struct x86_hybrid_pmu, pmu);
793 }
794 
795 extern struct static_key_false perf_is_hybrid;
796 #define is_hybrid()		static_branch_unlikely(&perf_is_hybrid)
797 
798 #define hybrid(_pmu, _field)				\
799 (*({							\
800 	typeof(&x86_pmu._field) __Fp = &x86_pmu._field;	\
801 							\
802 	if (is_hybrid() && (_pmu))			\
803 		__Fp = &hybrid_pmu(_pmu)->_field;	\
804 							\
805 	__Fp;						\
806 }))
807 
808 #define hybrid_var(_pmu, _var)				\
809 (*({							\
810 	typeof(&_var) __Fp = &_var;			\
811 							\
812 	if (is_hybrid() && (_pmu))			\
813 		__Fp = &hybrid_pmu(_pmu)->_var;		\
814 							\
815 	__Fp;						\
816 }))
817 
818 #define hybrid_bit(_pmu, _field)			\
819 ({							\
820 	bool __Fp = x86_pmu._field;			\
821 							\
822 	if (is_hybrid() && (_pmu))			\
823 		__Fp = hybrid_pmu(_pmu)->_field;	\
824 							\
825 	__Fp;						\
826 })
827 
828 /*
829  * struct x86_pmu - generic x86 pmu
830  */
831 struct x86_pmu {
832 	/*
833 	 * Generic x86 PMC bits
834 	 */
835 	const char	*name;
836 	int		version;
837 	int		(*handle_irq)(struct pt_regs *);
838 	void		(*disable_all)(void);
839 	void		(*enable_all)(int added);
840 	void		(*enable)(struct perf_event *);
841 	void		(*disable)(struct perf_event *);
842 	void		(*assign)(struct perf_event *event, int idx);
843 	void		(*add)(struct perf_event *);
844 	void		(*del)(struct perf_event *);
845 	void		(*read)(struct perf_event *event);
846 	int		(*set_period)(struct perf_event *event);
847 	u64		(*update)(struct perf_event *event);
848 	int		(*hw_config)(struct perf_event *event);
849 	int		(*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
850 	void		(*late_setup)(void);
851 	void		(*pebs_enable)(struct perf_event *event);
852 	void		(*pebs_disable)(struct perf_event *event);
853 	void		(*pebs_enable_all)(void);
854 	void		(*pebs_disable_all)(void);
855 	unsigned	eventsel;
856 	unsigned	perfctr;
857 	unsigned	fixedctr;
858 	int		(*addr_offset)(int index, bool eventsel);
859 	int		(*rdpmc_index)(int index);
860 	u64		(*event_map)(int);
861 	int		max_events;
862 	u64		config_mask;
863 	union {
864 			u64		cntr_mask64;
865 			unsigned long	cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
866 	};
867 	union {
868 			u64		fixed_cntr_mask64;
869 			unsigned long	fixed_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
870 	};
871 	union {
872 			u64		acr_cntr_mask64;
873 			unsigned long	acr_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
874 	};
875 	union {
876 			u64		acr_cause_mask64;
877 			unsigned long	acr_cause_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
878 	};
879 	int		cntval_bits;
880 	u64		cntval_mask;
881 	union {
882 			unsigned long events_maskl;
883 			unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
884 	};
885 	int		events_mask_len;
886 	int		apic;
887 	u64		max_period;
888 	struct event_constraint *
889 			(*get_event_constraints)(struct cpu_hw_events *cpuc,
890 						 int idx,
891 						 struct perf_event *event);
892 
893 	void		(*put_event_constraints)(struct cpu_hw_events *cpuc,
894 						 struct perf_event *event);
895 
896 	void		(*start_scheduling)(struct cpu_hw_events *cpuc);
897 
898 	void		(*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
899 
900 	void		(*stop_scheduling)(struct cpu_hw_events *cpuc);
901 
902 	struct event_constraint *event_constraints;
903 	struct x86_pmu_quirk *quirks;
904 	void		(*limit_period)(struct perf_event *event, s64 *l);
905 
906 	/* PMI handler bits */
907 	unsigned int	late_ack		:1,
908 			mid_ack			:1,
909 			enabled_ack		:1;
910 	/*
911 	 * sysfs attrs
912 	 */
913 	int		attr_rdpmc_broken;
914 	int		attr_rdpmc;
915 	struct attribute **format_attrs;
916 
917 	ssize_t		(*events_sysfs_show)(char *page, u64 config);
918 	const struct attribute_group **attr_update;
919 
920 	unsigned long	attr_freeze_on_smi;
921 
922 	/*
923 	 * CPU Hotplug hooks
924 	 */
925 	int		(*cpu_prepare)(int cpu);
926 	void		(*cpu_starting)(int cpu);
927 	void		(*cpu_dying)(int cpu);
928 	void		(*cpu_dead)(int cpu);
929 
930 	void		(*check_microcode)(void);
931 	void		(*sched_task)(struct perf_event_pmu_context *pmu_ctx,
932 				      struct task_struct *task, bool sched_in);
933 
934 	/*
935 	 * Intel Arch Perfmon v2+
936 	 */
937 	u64			intel_ctrl;
938 	union perf_capabilities intel_cap;
939 
940 	/*
941 	 * Intel DebugStore and PEBS bits
942 	 */
943 	unsigned int	bts			:1,
944 			bts_active		:1,
945 			ds_pebs			:1,
946 			pebs_active		:1,
947 			pebs_broken		:1,
948 			pebs_prec_dist		:1,
949 			pebs_no_tlb		:1,
950 			pebs_no_isolation	:1,
951 			pebs_block		:1,
952 			pebs_ept		:1,
953 			arch_pebs		:1;
954 	int		pebs_record_size;
955 	int		pebs_buffer_size;
956 	u64		pebs_events_mask;
957 	void		(*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data);
958 	struct event_constraint *pebs_constraints;
959 	void		(*pebs_aliases)(struct perf_event *event);
960 	u64		(*pebs_latency_data)(struct perf_event *event, u64 status);
961 	unsigned long	large_pebs_flags;
962 	u64		rtm_abort_event;
963 	u64		pebs_capable;
964 
965 	/*
966 	 * Intel Architectural PEBS
967 	 */
968 	struct arch_pebs_cap arch_pebs_cap;
969 
970 	/*
971 	 * Intel LBR
972 	 */
973 	unsigned int	lbr_tos, lbr_from, lbr_to,
974 			lbr_info, lbr_nr;	   /* LBR base regs and size */
975 	union {
976 		u64	lbr_sel_mask;		   /* LBR_SELECT valid bits */
977 		u64	lbr_ctl_mask;		   /* LBR_CTL valid bits */
978 	};
979 	union {
980 		const int	*lbr_sel_map;	   /* lbr_select mappings */
981 		int		*lbr_ctl_map;	   /* LBR_CTL mappings */
982 	};
983 	u64		lbr_callstack_users;	   /* lbr callstack system wide users */
984 	bool		lbr_double_abort;	   /* duplicated lbr aborts */
985 	bool		lbr_pt_coexist;		   /* (LBR|BTS) may coexist with PT */
986 
987 	unsigned int	lbr_has_info:1;
988 	unsigned int	lbr_has_tsx:1;
989 	unsigned int	lbr_from_flags:1;
990 	unsigned int	lbr_to_cycles:1;
991 
992 	/*
993 	 * Intel Architectural LBR CPUID Enumeration
994 	 */
995 	unsigned int	lbr_depth_mask:8;
996 	unsigned int	lbr_deep_c_reset:1;
997 	unsigned int	lbr_lip:1;
998 	unsigned int	lbr_cpl:1;
999 	unsigned int	lbr_filter:1;
1000 	unsigned int	lbr_call_stack:1;
1001 	unsigned int	lbr_mispred:1;
1002 	unsigned int	lbr_timed_lbr:1;
1003 	unsigned int	lbr_br_type:1;
1004 	unsigned int	lbr_counters:4;
1005 
1006 	void		(*lbr_reset)(void);
1007 	void		(*lbr_read)(struct cpu_hw_events *cpuc);
1008 	void		(*lbr_save)(void *ctx);
1009 	void		(*lbr_restore)(void *ctx);
1010 
1011 	/*
1012 	 * Intel PT/LBR/BTS are exclusive
1013 	 */
1014 	atomic_t	lbr_exclusive[x86_lbr_exclusive_max];
1015 
1016 	/*
1017 	 * Intel perf metrics
1018 	 */
1019 	int		num_topdown_events;
1020 
1021 	/*
1022 	 * AMD bits
1023 	 */
1024 	unsigned int	amd_nb_constraints : 1;
1025 	u64		perf_ctr_pair_en;
1026 
1027 	/*
1028 	 * Extra registers for events
1029 	 */
1030 	struct extra_reg *extra_regs;
1031 	unsigned int flags;
1032 
1033 	/*
1034 	 * Intel host/guest support (KVM)
1035 	 */
1036 	struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr, void *data);
1037 
1038 	/*
1039 	 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
1040 	 */
1041 	int (*check_period) (struct perf_event *event, u64 period);
1042 
1043 	int (*aux_output_match) (struct perf_event *event);
1044 
1045 	void (*filter)(struct pmu *pmu, int cpu, bool *ret);
1046 	/*
1047 	 * Hybrid support
1048 	 *
1049 	 * Most PMU capabilities are the same among different hybrid PMUs.
1050 	 * The global x86_pmu saves the architecture capabilities, which
1051 	 * are available for all PMUs. The hybrid_pmu only includes the
1052 	 * unique capabilities.
1053 	 */
1054 	int				num_hybrid_pmus;
1055 	struct x86_hybrid_pmu		*hybrid_pmu;
1056 	enum intel_cpu_type (*get_hybrid_cpu_type)	(void);
1057 };
1058 
1059 struct x86_perf_task_context_opt {
1060 	int lbr_callstack_users;
1061 	int lbr_stack_state;
1062 	int log_id;
1063 };
1064 
1065 struct x86_perf_task_context {
1066 	u64 lbr_sel;
1067 	int tos;
1068 	int valid_lbrs;
1069 	struct x86_perf_task_context_opt opt;
1070 	struct lbr_entry lbr[MAX_LBR_ENTRIES];
1071 };
1072 
1073 struct x86_perf_task_context_arch_lbr {
1074 	struct x86_perf_task_context_opt opt;
1075 	struct lbr_entry entries[];
1076 };
1077 
1078 /*
1079  * Add padding to guarantee the 64-byte alignment of the state buffer.
1080  *
1081  * The structure is dynamically allocated. The size of the LBR state may vary
1082  * based on the number of LBR registers.
1083  *
1084  * Do not put anything after the LBR state.
1085  */
1086 struct x86_perf_task_context_arch_lbr_xsave {
1087 	struct x86_perf_task_context_opt		opt;
1088 
1089 	union {
1090 		struct xregs_state			xsave;
1091 		struct {
1092 			struct fxregs_state		i387;
1093 			struct xstate_header		header;
1094 			struct arch_lbr_state		lbr;
1095 		} __attribute__ ((packed, aligned (XSAVE_ALIGNMENT)));
1096 	};
1097 };
1098 
1099 #define x86_add_quirk(func_)						\
1100 do {									\
1101 	static struct x86_pmu_quirk __quirk __initdata = {		\
1102 		.func = func_,						\
1103 	};								\
1104 	__quirk.next = x86_pmu.quirks;					\
1105 	x86_pmu.quirks = &__quirk;					\
1106 } while (0)
1107 
1108 /*
1109  * x86_pmu flags
1110  */
1111 #define PMU_FL_NO_HT_SHARING	0x1 /* no hyper-threading resource sharing */
1112 #define PMU_FL_HAS_RSP_1	0x2 /* has 2 equivalent offcore_rsp regs   */
1113 #define PMU_FL_EXCL_CNTRS	0x4 /* has exclusive counter requirements  */
1114 #define PMU_FL_EXCL_ENABLED	0x8 /* exclusive counter active */
1115 #define PMU_FL_PEBS_ALL		0x10 /* all events are valid PEBS events */
1116 #define PMU_FL_TFA		0x20 /* deal with TSX force abort */
1117 #define PMU_FL_PAIR		0x40 /* merge counters for large incr. events */
1118 #define PMU_FL_INSTR_LATENCY	0x80 /* Support Instruction Latency in PEBS Memory Info Record */
1119 #define PMU_FL_MEM_LOADS_AUX	0x100 /* Require an auxiliary event for the complete memory info */
1120 #define PMU_FL_RETIRE_LATENCY	0x200 /* Support Retire Latency in PEBS */
1121 #define PMU_FL_BR_CNTR		0x400 /* Support branch counter logging */
1122 #define PMU_FL_DYN_CONSTRAINT	0x800 /* Needs dynamic constraint */
1123 #define PMU_FL_HAS_OMR		0x1000 /* has 4 equivalent OMR regs */
1124 
1125 #define EVENT_VAR(_id)  event_attr_##_id
1126 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
1127 
1128 #define EVENT_ATTR(_name, _id)						\
1129 static struct perf_pmu_events_attr EVENT_VAR(_id) = {			\
1130 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
1131 	.id		= PERF_COUNT_HW_##_id,				\
1132 	.event_str	= NULL,						\
1133 };
1134 
1135 #define EVENT_ATTR_STR(_name, v, str)					\
1136 static struct perf_pmu_events_attr event_attr_##v = {			\
1137 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
1138 	.id		= 0,						\
1139 	.event_str	= str,						\
1140 };
1141 
1142 #define EVENT_ATTR_STR_HT(_name, v, noht, ht)				\
1143 static struct perf_pmu_events_ht_attr event_attr_##v = {		\
1144 	.attr		= __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
1145 	.id		= 0,						\
1146 	.event_str_noht	= noht,						\
1147 	.event_str_ht	= ht,						\
1148 }
1149 
1150 #define EVENT_ATTR_STR_HYBRID(_name, v, str, _pmu)			\
1151 static struct perf_pmu_events_hybrid_attr event_attr_##v = {		\
1152 	.attr		= __ATTR(_name, 0444, events_hybrid_sysfs_show, NULL),\
1153 	.id		= 0,						\
1154 	.event_str	= str,						\
1155 	.pmu_type	= _pmu,						\
1156 }
1157 
1158 #define FORMAT_HYBRID_PTR(_id) (&format_attr_hybrid_##_id.attr.attr)
1159 
1160 #define FORMAT_ATTR_HYBRID(_name, _pmu)					\
1161 static struct perf_pmu_format_hybrid_attr format_attr_hybrid_##_name = {\
1162 	.attr		= __ATTR_RO(_name),				\
1163 	.pmu_type	= _pmu,						\
1164 }
1165 
1166 struct pmu *x86_get_pmu(unsigned int cpu);
1167 extern struct x86_pmu x86_pmu __read_mostly;
1168 
1169 DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period);
1170 DECLARE_STATIC_CALL(x86_pmu_update,     *x86_pmu.update);
1171 DECLARE_STATIC_CALL(x86_pmu_drain_pebs,	*x86_pmu.drain_pebs);
1172 DECLARE_STATIC_CALL(x86_pmu_late_setup,	*x86_pmu.late_setup);
1173 DECLARE_STATIC_CALL(x86_pmu_pebs_enable, *x86_pmu.pebs_enable);
1174 DECLARE_STATIC_CALL(x86_pmu_pebs_disable, *x86_pmu.pebs_disable);
1175 DECLARE_STATIC_CALL(x86_pmu_pebs_enable_all, *x86_pmu.pebs_enable_all);
1176 DECLARE_STATIC_CALL(x86_pmu_pebs_disable_all, *x86_pmu.pebs_disable_all);
1177 
task_context_opt(void * ctx)1178 static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
1179 {
1180 	if (static_cpu_has(X86_FEATURE_ARCH_LBR))
1181 		return &((struct x86_perf_task_context_arch_lbr *)ctx)->opt;
1182 
1183 	return &((struct x86_perf_task_context *)ctx)->opt;
1184 }
1185 
x86_pmu_has_lbr_callstack(void)1186 static inline bool x86_pmu_has_lbr_callstack(void)
1187 {
1188 	return  x86_pmu.lbr_sel_map &&
1189 		x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
1190 }
1191 
1192 DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
1193 DECLARE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1194 
1195 int x86_perf_event_set_period(struct perf_event *event);
1196 
1197 /*
1198  * Generalized hw caching related hw_event table, filled
1199  * in on a per model basis. A value of 0 means
1200  * 'not supported', -1 means 'hw_event makes no sense on
1201  * this CPU', any other value means the raw hw_event
1202  * ID.
1203  */
1204 
1205 #define C(x) PERF_COUNT_HW_CACHE_##x
1206 
1207 extern u64 __read_mostly hw_cache_event_ids
1208 				[PERF_COUNT_HW_CACHE_MAX]
1209 				[PERF_COUNT_HW_CACHE_OP_MAX]
1210 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
1211 extern u64 __read_mostly hw_cache_extra_regs
1212 				[PERF_COUNT_HW_CACHE_MAX]
1213 				[PERF_COUNT_HW_CACHE_OP_MAX]
1214 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
1215 
1216 u64 x86_perf_event_update(struct perf_event *event);
1217 
intel_pmu_topdown_event_update(struct perf_event * event,u64 * val)1218 static inline u64 intel_pmu_topdown_event_update(struct perf_event *event, u64 *val)
1219 {
1220 	return x86_perf_event_update(event);
1221 }
1222 DECLARE_STATIC_CALL(intel_pmu_update_topdown_event, intel_pmu_topdown_event_update);
1223 
x86_pmu_config_addr(int index)1224 static inline unsigned int x86_pmu_config_addr(int index)
1225 {
1226 	return x86_pmu.eventsel + (x86_pmu.addr_offset ?
1227 				   x86_pmu.addr_offset(index, true) : index);
1228 }
1229 
x86_pmu_event_addr(int index)1230 static inline unsigned int x86_pmu_event_addr(int index)
1231 {
1232 	return x86_pmu.perfctr + (x86_pmu.addr_offset ?
1233 				  x86_pmu.addr_offset(index, false) : index);
1234 }
1235 
x86_pmu_fixed_ctr_addr(int index)1236 static inline unsigned int x86_pmu_fixed_ctr_addr(int index)
1237 {
1238 	return x86_pmu.fixedctr + (x86_pmu.addr_offset ?
1239 				   x86_pmu.addr_offset(index, false) : index);
1240 }
1241 
x86_pmu_rdpmc_index(int index)1242 static inline int x86_pmu_rdpmc_index(int index)
1243 {
1244 	return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
1245 }
1246 
1247 bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask,
1248 		     unsigned long *fixed_cntr_mask);
1249 
1250 int x86_add_exclusive(unsigned int what);
1251 
1252 void x86_del_exclusive(unsigned int what);
1253 
1254 int x86_reserve_hardware(void);
1255 
1256 void x86_release_hardware(void);
1257 
1258 int x86_pmu_max_precise(struct pmu *pmu);
1259 
1260 void hw_perf_lbr_event_destroy(struct perf_event *event);
1261 
1262 int x86_setup_perfctr(struct perf_event *event);
1263 
1264 int x86_pmu_hw_config(struct perf_event *event);
1265 
1266 void x86_pmu_disable_all(void);
1267 
has_amd_brs(struct hw_perf_event * hwc)1268 static inline bool has_amd_brs(struct hw_perf_event *hwc)
1269 {
1270 	return hwc->flags & PERF_X86_EVENT_AMD_BRS;
1271 }
1272 
is_counter_pair(struct hw_perf_event * hwc)1273 static inline bool is_counter_pair(struct hw_perf_event *hwc)
1274 {
1275 	return hwc->flags & PERF_X86_EVENT_PAIR;
1276 }
1277 
__x86_pmu_enable_event(struct hw_perf_event * hwc,u64 enable_mask)1278 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
1279 					  u64 enable_mask)
1280 {
1281 	u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
1282 
1283 	if (hwc->extra_reg.reg)
1284 		wrmsrq(hwc->extra_reg.reg, hwc->extra_reg.config);
1285 
1286 	/*
1287 	 * Add enabled Merge event on next counter
1288 	 * if large increment event being enabled on this counter
1289 	 */
1290 	if (is_counter_pair(hwc))
1291 		wrmsrq(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en);
1292 
1293 	wrmsrq(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
1294 }
1295 
1296 void x86_pmu_enable_all(int added);
1297 
1298 int perf_assign_events(struct event_constraint **constraints, int n,
1299 			int wmin, int wmax, int gpmax, int *assign);
1300 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
1301 
1302 void x86_pmu_stop(struct perf_event *event, int flags);
1303 
x86_pmu_disable_event(struct perf_event * event)1304 static inline void x86_pmu_disable_event(struct perf_event *event)
1305 {
1306 	u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
1307 	struct hw_perf_event *hwc = &event->hw;
1308 
1309 	wrmsrq(hwc->config_base, hwc->config & ~disable_mask);
1310 
1311 	if (is_counter_pair(hwc))
1312 		wrmsrq(x86_pmu_config_addr(hwc->idx + 1), 0);
1313 }
1314 
1315 void x86_pmu_enable_event(struct perf_event *event);
1316 
1317 int x86_pmu_handle_irq(struct pt_regs *regs);
1318 
1319 void x86_pmu_show_pmu_cap(struct pmu *pmu);
1320 
x86_pmu_num_counters(struct pmu * pmu)1321 static inline int x86_pmu_num_counters(struct pmu *pmu)
1322 {
1323 	return hweight64(hybrid(pmu, cntr_mask64));
1324 }
1325 
x86_pmu_max_num_counters(struct pmu * pmu)1326 static inline int x86_pmu_max_num_counters(struct pmu *pmu)
1327 {
1328 	return fls64(hybrid(pmu, cntr_mask64));
1329 }
1330 
x86_pmu_num_counters_fixed(struct pmu * pmu)1331 static inline int x86_pmu_num_counters_fixed(struct pmu *pmu)
1332 {
1333 	return hweight64(hybrid(pmu, fixed_cntr_mask64));
1334 }
1335 
x86_pmu_max_num_counters_fixed(struct pmu * pmu)1336 static inline int x86_pmu_max_num_counters_fixed(struct pmu *pmu)
1337 {
1338 	return fls64(hybrid(pmu, fixed_cntr_mask64));
1339 }
1340 
x86_pmu_get_event_config(struct perf_event * event)1341 static inline u64 x86_pmu_get_event_config(struct perf_event *event)
1342 {
1343 	return event->attr.config & hybrid(event->pmu, config_mask);
1344 }
1345 
x86_pmu_has_rdpmc_user_disable(struct pmu * pmu)1346 static inline bool x86_pmu_has_rdpmc_user_disable(struct pmu *pmu)
1347 {
1348 	return !!(hybrid(pmu, config_mask) &
1349 		 ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE);
1350 }
1351 
1352 extern struct event_constraint emptyconstraint;
1353 
1354 extern struct event_constraint unconstrained;
1355 
kernel_ip(unsigned long ip)1356 static inline bool kernel_ip(unsigned long ip)
1357 {
1358 #ifdef CONFIG_X86_32
1359 	return ip > PAGE_OFFSET;
1360 #else
1361 	return (long)ip < 0;
1362 #endif
1363 }
1364 
1365 /*
1366  * Not all PMUs provide the right context information to place the reported IP
1367  * into full context. Specifically segment registers are typically not
1368  * supplied.
1369  *
1370  * Assuming the address is a linear address (it is for IBS), we fake the CS and
1371  * vm86 mode using the known zero-based code segment and 'fix up' the registers
1372  * to reflect this.
1373  *
1374  * Intel PEBS/LBR appear to typically provide the effective address, nothing
1375  * much we can do about that but pray and treat it like a linear address.
1376  */
set_linear_ip(struct pt_regs * regs,unsigned long ip)1377 static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
1378 {
1379 	regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
1380 	if (regs->flags & X86_VM_MASK)
1381 		regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
1382 	regs->ip = ip;
1383 }
1384 
1385 /*
1386  * x86control flow change classification
1387  * x86control flow changes include branches, interrupts, traps, faults
1388  */
1389 enum {
1390 	X86_BR_NONE		= 0,      /* unknown */
1391 
1392 	X86_BR_USER		= 1 << 0, /* branch target is user */
1393 	X86_BR_KERNEL		= 1 << 1, /* branch target is kernel */
1394 
1395 	X86_BR_CALL		= 1 << 2, /* call */
1396 	X86_BR_RET		= 1 << 3, /* return */
1397 	X86_BR_SYSCALL		= 1 << 4, /* syscall */
1398 	X86_BR_SYSRET		= 1 << 5, /* syscall return */
1399 	X86_BR_INT		= 1 << 6, /* sw interrupt */
1400 	X86_BR_IRET		= 1 << 7, /* return from interrupt */
1401 	X86_BR_JCC		= 1 << 8, /* conditional */
1402 	X86_BR_JMP		= 1 << 9, /* jump */
1403 	X86_BR_IRQ		= 1 << 10,/* hw interrupt or trap or fault */
1404 	X86_BR_IND_CALL		= 1 << 11,/* indirect calls */
1405 	X86_BR_ABORT		= 1 << 12,/* transaction abort */
1406 	X86_BR_IN_TX		= 1 << 13,/* in transaction */
1407 	X86_BR_NO_TX		= 1 << 14,/* not in transaction */
1408 	X86_BR_ZERO_CALL	= 1 << 15,/* zero length call */
1409 	X86_BR_CALL_STACK	= 1 << 16,/* call stack */
1410 	X86_BR_IND_JMP		= 1 << 17,/* indirect jump */
1411 
1412 	X86_BR_TYPE_SAVE	= 1 << 18,/* indicate to save branch type */
1413 
1414 };
1415 
1416 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
1417 #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
1418 
1419 #define X86_BR_ANY       \
1420 	(X86_BR_CALL    |\
1421 	 X86_BR_RET     |\
1422 	 X86_BR_SYSCALL |\
1423 	 X86_BR_SYSRET  |\
1424 	 X86_BR_INT     |\
1425 	 X86_BR_IRET    |\
1426 	 X86_BR_JCC     |\
1427 	 X86_BR_JMP	 |\
1428 	 X86_BR_IRQ	 |\
1429 	 X86_BR_ABORT	 |\
1430 	 X86_BR_IND_CALL |\
1431 	 X86_BR_IND_JMP  |\
1432 	 X86_BR_ZERO_CALL)
1433 
1434 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
1435 
1436 #define X86_BR_ANY_CALL		 \
1437 	(X86_BR_CALL		|\
1438 	 X86_BR_IND_CALL	|\
1439 	 X86_BR_ZERO_CALL	|\
1440 	 X86_BR_SYSCALL		|\
1441 	 X86_BR_IRQ		|\
1442 	 X86_BR_INT)
1443 
1444 int common_branch_type(int type);
1445 int branch_type(unsigned long from, unsigned long to, int abort);
1446 int branch_type_fused(unsigned long from, unsigned long to, int abort,
1447 		      int *offset);
1448 
1449 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
1450 ssize_t intel_event_sysfs_show(char *page, u64 config);
1451 
1452 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
1453 			  char *page);
1454 ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
1455 			  char *page);
1456 ssize_t events_hybrid_sysfs_show(struct device *dev,
1457 				 struct device_attribute *attr,
1458 				 char *page);
1459 
fixed_counter_disabled(int i,struct pmu * pmu)1460 static inline bool fixed_counter_disabled(int i, struct pmu *pmu)
1461 {
1462 	u64 intel_ctrl = hybrid(pmu, intel_ctrl);
1463 
1464 	return !(intel_ctrl >> (i + INTEL_PMC_IDX_FIXED));
1465 }
1466 
1467 #ifdef CONFIG_CPU_SUP_AMD
1468 
1469 int amd_pmu_init(void);
1470 
1471 int amd_pmu_lbr_init(void);
1472 void amd_pmu_lbr_reset(void);
1473 void amd_pmu_lbr_read(void);
1474 void amd_pmu_lbr_add(struct perf_event *event);
1475 void amd_pmu_lbr_del(struct perf_event *event);
1476 void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
1477 			    struct task_struct *task, bool sched_in);
1478 void amd_pmu_lbr_enable_all(void);
1479 void amd_pmu_lbr_disable_all(void);
1480 int amd_pmu_lbr_hw_config(struct perf_event *event);
1481 
__amd_pmu_lbr_disable(void)1482 static __always_inline void __amd_pmu_lbr_disable(void)
1483 {
1484 	u64 dbg_ctl, dbg_extn_cfg;
1485 
1486 	rdmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
1487 	wrmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN);
1488 
1489 	if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
1490 		rdmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
1491 		wrmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
1492 	}
1493 }
1494 
1495 #ifdef CONFIG_PERF_EVENTS_AMD_BRS
1496 
1497 #define AMD_FAM19H_BRS_EVENT 0xc4 /* RETIRED_TAKEN_BRANCH_INSTRUCTIONS */
1498 
1499 int amd_brs_init(void);
1500 void amd_brs_disable(void);
1501 void amd_brs_enable(void);
1502 void amd_brs_enable_all(void);
1503 void amd_brs_disable_all(void);
1504 void amd_brs_drain(void);
1505 void amd_brs_lopwr_init(void);
1506 int amd_brs_hw_config(struct perf_event *event);
1507 void amd_brs_reset(void);
1508 
amd_pmu_brs_add(struct perf_event * event)1509 static inline void amd_pmu_brs_add(struct perf_event *event)
1510 {
1511 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1512 
1513 	perf_sched_cb_inc(event->pmu);
1514 	cpuc->lbr_users++;
1515 	/*
1516 	 * No need to reset BRS because it is reset
1517 	 * on brs_enable() and it is saturating
1518 	 */
1519 }
1520 
amd_pmu_brs_del(struct perf_event * event)1521 static inline void amd_pmu_brs_del(struct perf_event *event)
1522 {
1523 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1524 
1525 	cpuc->lbr_users--;
1526 	WARN_ON_ONCE(cpuc->lbr_users < 0);
1527 
1528 	perf_sched_cb_dec(event->pmu);
1529 }
1530 
1531 void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx,
1532 			    struct task_struct *task, bool sched_in);
1533 #else
amd_brs_init(void)1534 static inline int amd_brs_init(void)
1535 {
1536 	return 0;
1537 }
amd_brs_disable(void)1538 static inline void amd_brs_disable(void) {}
amd_brs_enable(void)1539 static inline void amd_brs_enable(void) {}
amd_brs_drain(void)1540 static inline void amd_brs_drain(void) {}
amd_brs_lopwr_init(void)1541 static inline void amd_brs_lopwr_init(void) {}
amd_brs_disable_all(void)1542 static inline void amd_brs_disable_all(void) {}
amd_brs_hw_config(struct perf_event * event)1543 static inline int amd_brs_hw_config(struct perf_event *event)
1544 {
1545 	return 0;
1546 }
amd_brs_reset(void)1547 static inline void amd_brs_reset(void) {}
1548 
amd_pmu_brs_add(struct perf_event * event)1549 static inline void amd_pmu_brs_add(struct perf_event *event)
1550 {
1551 }
1552 
amd_pmu_brs_del(struct perf_event * event)1553 static inline void amd_pmu_brs_del(struct perf_event *event)
1554 {
1555 }
1556 
amd_pmu_brs_sched_task(struct perf_event_pmu_context * pmu_ctx,struct task_struct * task,bool sched_in)1557 static inline void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx,
1558 					  struct task_struct *task, bool sched_in)
1559 {
1560 }
1561 
amd_brs_enable_all(void)1562 static inline void amd_brs_enable_all(void)
1563 {
1564 }
1565 
1566 #endif
1567 
1568 #else /* CONFIG_CPU_SUP_AMD */
1569 
amd_pmu_init(void)1570 static inline int amd_pmu_init(void)
1571 {
1572 	return 0;
1573 }
1574 
amd_brs_init(void)1575 static inline int amd_brs_init(void)
1576 {
1577 	return -EOPNOTSUPP;
1578 }
1579 
amd_brs_drain(void)1580 static inline void amd_brs_drain(void)
1581 {
1582 }
1583 
amd_brs_enable_all(void)1584 static inline void amd_brs_enable_all(void)
1585 {
1586 }
1587 
amd_brs_disable_all(void)1588 static inline void amd_brs_disable_all(void)
1589 {
1590 }
1591 #endif /* CONFIG_CPU_SUP_AMD */
1592 
is_pebs_pt(struct perf_event * event)1593 static inline int is_pebs_pt(struct perf_event *event)
1594 {
1595 	return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT);
1596 }
1597 
1598 #ifdef CONFIG_CPU_SUP_INTEL
1599 
intel_pmu_has_bts_period(struct perf_event * event,u64 period)1600 static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
1601 {
1602 	struct hw_perf_event *hwc = &event->hw;
1603 	unsigned int hw_event, bts_event;
1604 
1605 	/*
1606 	 * Only use BTS for fixed rate period==1 events.
1607 	 */
1608 	if (event->attr.freq || period != 1)
1609 		return false;
1610 
1611 	/*
1612 	 * BTS doesn't virtualize.
1613 	 */
1614 	if (event->attr.exclude_host)
1615 		return false;
1616 
1617 	hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1618 	bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1619 
1620 	return hw_event == bts_event;
1621 }
1622 
intel_pmu_has_bts(struct perf_event * event)1623 static inline bool intel_pmu_has_bts(struct perf_event *event)
1624 {
1625 	struct hw_perf_event *hwc = &event->hw;
1626 
1627 	return intel_pmu_has_bts_period(event, hwc->sample_period);
1628 }
1629 
__intel_pmu_pebs_disable_all(void)1630 static __always_inline void __intel_pmu_pebs_disable_all(void)
1631 {
1632 	wrmsrq(MSR_IA32_PEBS_ENABLE, 0);
1633 }
1634 
__intel_pmu_arch_lbr_disable(void)1635 static __always_inline void __intel_pmu_arch_lbr_disable(void)
1636 {
1637 	wrmsrq(MSR_ARCH_LBR_CTL, 0);
1638 }
1639 
__intel_pmu_lbr_disable(void)1640 static __always_inline void __intel_pmu_lbr_disable(void)
1641 {
1642 	u64 debugctl;
1643 
1644 	rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
1645 	debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
1646 	wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
1647 }
1648 
1649 int intel_pmu_save_and_restart(struct perf_event *event);
1650 
1651 struct event_constraint *
1652 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
1653 			  struct perf_event *event);
1654 
1655 extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
1656 extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
1657 
1658 int intel_pmu_init(void);
1659 
1660 int alloc_arch_pebs_buf_on_cpu(int cpu);
1661 
1662 void release_arch_pebs_buf_on_cpu(int cpu);
1663 
1664 void init_arch_pebs_on_cpu(int cpu);
1665 
1666 void fini_arch_pebs_on_cpu(int cpu);
1667 
1668 void init_debug_store_on_cpu(int cpu);
1669 
1670 void fini_debug_store_on_cpu(int cpu);
1671 
1672 void release_ds_buffers(void);
1673 
1674 void reserve_ds_buffers(void);
1675 
1676 void release_lbr_buffers(void);
1677 
1678 void reserve_lbr_buffers(void);
1679 
1680 extern struct event_constraint bts_constraint;
1681 extern struct event_constraint vlbr_constraint;
1682 
1683 void intel_pmu_enable_bts(u64 config);
1684 
1685 void intel_pmu_disable_bts(void);
1686 
1687 int intel_pmu_drain_bts_buffer(void);
1688 
1689 void intel_pmu_late_setup(void);
1690 
1691 u64 grt_latency_data(struct perf_event *event, u64 status);
1692 
1693 u64 cmt_latency_data(struct perf_event *event, u64 status);
1694 
1695 u64 lnl_latency_data(struct perf_event *event, u64 status);
1696 
1697 u64 arl_h_latency_data(struct perf_event *event, u64 status);
1698 
1699 u64 pnc_latency_data(struct perf_event *event, u64 status);
1700 
1701 u64 nvl_latency_data(struct perf_event *event, u64 status);
1702 
1703 extern struct event_constraint intel_core2_pebs_event_constraints[];
1704 
1705 extern struct event_constraint intel_atom_pebs_event_constraints[];
1706 
1707 extern struct event_constraint intel_slm_pebs_event_constraints[];
1708 
1709 extern struct event_constraint intel_glm_pebs_event_constraints[];
1710 
1711 extern struct event_constraint intel_glp_pebs_event_constraints[];
1712 
1713 extern struct event_constraint intel_grt_pebs_event_constraints[];
1714 
1715 extern struct event_constraint intel_arw_pebs_event_constraints[];
1716 
1717 extern struct event_constraint intel_nehalem_pebs_event_constraints[];
1718 
1719 extern struct event_constraint intel_westmere_pebs_event_constraints[];
1720 
1721 extern struct event_constraint intel_snb_pebs_event_constraints[];
1722 
1723 extern struct event_constraint intel_ivb_pebs_event_constraints[];
1724 
1725 extern struct event_constraint intel_hsw_pebs_event_constraints[];
1726 
1727 extern struct event_constraint intel_bdw_pebs_event_constraints[];
1728 
1729 extern struct event_constraint intel_skl_pebs_event_constraints[];
1730 
1731 extern struct event_constraint intel_icl_pebs_event_constraints[];
1732 
1733 extern struct event_constraint intel_glc_pebs_event_constraints[];
1734 
1735 extern struct event_constraint intel_lnc_pebs_event_constraints[];
1736 
1737 extern struct event_constraint intel_pnc_pebs_event_constraints[];
1738 
1739 struct event_constraint *intel_pebs_constraints(struct perf_event *event);
1740 
1741 void intel_pmu_pebs_add(struct perf_event *event);
1742 
1743 void intel_pmu_pebs_del(struct perf_event *event);
1744 
1745 void intel_pmu_pebs_enable(struct perf_event *event);
1746 
1747 void intel_pmu_pebs_disable(struct perf_event *event);
1748 
1749 void intel_pmu_pebs_enable_all(void);
1750 
1751 void intel_pmu_pebs_disable_all(void);
1752 
1753 void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
1754 
1755 void intel_pmu_pebs_late_setup(struct cpu_hw_events *cpuc);
1756 
1757 void intel_pmu_drain_pebs_buffer(void);
1758 
1759 void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
1760 
1761 void intel_pebs_init(void);
1762 
1763 void intel_pmu_lbr_save_brstack(struct perf_sample_data *data,
1764 				struct cpu_hw_events *cpuc,
1765 				struct perf_event *event);
1766 
1767 void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
1768 			      struct task_struct *task, bool sched_in);
1769 
1770 u64 lbr_from_signext_quirk_wr(u64 val);
1771 
1772 void intel_pmu_lbr_reset(void);
1773 
1774 void intel_pmu_lbr_reset_32(void);
1775 
1776 void intel_pmu_lbr_reset_64(void);
1777 
1778 void intel_pmu_lbr_add(struct perf_event *event);
1779 
1780 void intel_pmu_lbr_del(struct perf_event *event);
1781 
1782 void intel_pmu_lbr_enable_all(bool pmi);
1783 
1784 void intel_pmu_lbr_disable_all(void);
1785 
1786 void intel_pmu_lbr_read(void);
1787 
1788 void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc);
1789 
1790 void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc);
1791 
1792 void intel_pmu_lbr_save(void *ctx);
1793 
1794 void intel_pmu_lbr_restore(void *ctx);
1795 
1796 void intel_pmu_lbr_init_core(void);
1797 
1798 void intel_pmu_lbr_init_nhm(void);
1799 
1800 void intel_pmu_lbr_init_atom(void);
1801 
1802 void intel_pmu_lbr_init_slm(void);
1803 
1804 void intel_pmu_lbr_init_snb(void);
1805 
1806 void intel_pmu_lbr_init_hsw(void);
1807 
1808 void intel_pmu_lbr_init_skl(void);
1809 
1810 void intel_pmu_lbr_init_knl(void);
1811 
1812 void intel_pmu_lbr_init(void);
1813 
1814 void intel_pmu_arch_lbr_init(void);
1815 
1816 void intel_pmu_pebs_data_source_nhm(void);
1817 
1818 void intel_pmu_pebs_data_source_skl(bool pmem);
1819 
1820 void intel_pmu_pebs_data_source_adl(void);
1821 
1822 void intel_pmu_pebs_data_source_grt(void);
1823 
1824 void intel_pmu_pebs_data_source_mtl(void);
1825 
1826 void intel_pmu_pebs_data_source_arl_h(void);
1827 
1828 void intel_pmu_pebs_data_source_cmt(void);
1829 
1830 void intel_pmu_pebs_data_source_lnl(void);
1831 
1832 u64 intel_get_arch_pebs_data_config(struct perf_event *event);
1833 
1834 int intel_pmu_setup_lbr_filter(struct perf_event *event);
1835 
1836 void intel_pt_interrupt(void);
1837 
1838 int intel_bts_interrupt(void);
1839 
1840 void intel_bts_enable_local(void);
1841 
1842 void intel_bts_disable_local(void);
1843 
1844 int p4_pmu_init(void);
1845 
1846 int p6_pmu_init(void);
1847 
1848 int knc_pmu_init(void);
1849 
is_ht_workaround_enabled(void)1850 static inline int is_ht_workaround_enabled(void)
1851 {
1852 	return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
1853 }
1854 
intel_pmu_pebs_mask(u64 cntr_mask)1855 static inline u64 intel_pmu_pebs_mask(u64 cntr_mask)
1856 {
1857 	return MAX_PEBS_EVENTS_MASK & cntr_mask;
1858 }
1859 
intel_pmu_max_num_pebs(struct pmu * pmu)1860 static inline int intel_pmu_max_num_pebs(struct pmu *pmu)
1861 {
1862 	static_assert(MAX_PEBS_EVENTS == 32);
1863 	return fls((u32)hybrid(pmu, pebs_events_mask));
1864 }
1865 
intel_pmu_has_pebs(void)1866 static inline bool intel_pmu_has_pebs(void)
1867 {
1868 	return x86_pmu.ds_pebs || x86_pmu.arch_pebs;
1869 }
1870 
1871 #else /* CONFIG_CPU_SUP_INTEL */
1872 
reserve_ds_buffers(void)1873 static inline void reserve_ds_buffers(void)
1874 {
1875 }
1876 
release_ds_buffers(void)1877 static inline void release_ds_buffers(void)
1878 {
1879 }
1880 
release_lbr_buffers(void)1881 static inline void release_lbr_buffers(void)
1882 {
1883 }
1884 
reserve_lbr_buffers(void)1885 static inline void reserve_lbr_buffers(void)
1886 {
1887 }
1888 
intel_pmu_init(void)1889 static inline int intel_pmu_init(void)
1890 {
1891 	return 0;
1892 }
1893 
intel_cpuc_prepare(struct cpu_hw_events * cpuc,int cpu)1894 static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
1895 {
1896 	return 0;
1897 }
1898 
intel_cpuc_finish(struct cpu_hw_events * cpuc)1899 static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
1900 {
1901 }
1902 
is_ht_workaround_enabled(void)1903 static inline int is_ht_workaround_enabled(void)
1904 {
1905 	return 0;
1906 }
1907 #endif /* CONFIG_CPU_SUP_INTEL */
1908 
1909 #if ((defined CONFIG_CPU_SUP_CENTAUR) || (defined CONFIG_CPU_SUP_ZHAOXIN))
1910 int zhaoxin_pmu_init(void);
1911 #else
zhaoxin_pmu_init(void)1912 static inline int zhaoxin_pmu_init(void)
1913 {
1914 	return 0;
1915 }
1916 #endif /*CONFIG_CPU_SUP_CENTAUR or CONFIG_CPU_SUP_ZHAOXIN*/
1917