xref: /linux/tools/include/uapi/linux/perf_event.h (revision 01abac26dccd77eddffec6b032e51f501714dee3)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  * Performance events:
4  *
5  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
6  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
7  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
8  *
9  * Data type definitions, declarations, prototypes.
10  *
11  *    Started by: Thomas Gleixner and Ingo Molnar
12  *
13  * For licencing details see kernel-base/COPYING
14  */
15 #ifndef _UAPI_LINUX_PERF_EVENT_H
16 #define _UAPI_LINUX_PERF_EVENT_H
17 
18 #include <linux/types.h>
19 #include <linux/ioctl.h>
20 #include <asm/byteorder.h>
21 
22 /*
23  * User-space ABI bits:
24  */
25 
26 /*
27  * attr.type
28  */
29 enum perf_type_id {
30 	PERF_TYPE_HARDWARE			= 0,
31 	PERF_TYPE_SOFTWARE			= 1,
32 	PERF_TYPE_TRACEPOINT			= 2,
33 	PERF_TYPE_HW_CACHE			= 3,
34 	PERF_TYPE_RAW				= 4,
35 	PERF_TYPE_BREAKPOINT			= 5,
36 
37 	PERF_TYPE_MAX,				/* non-ABI */
38 };
39 
40 /*
41  * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
42  * PERF_TYPE_HARDWARE:			0xEEEEEEEE000000AA
43  *					AA: hardware event ID
44  *					EEEEEEEE: PMU type ID
45  * PERF_TYPE_HW_CACHE:			0xEEEEEEEE00DDCCBB
46  *					BB: hardware cache ID
47  *					CC: hardware cache op ID
48  *					DD: hardware cache op result ID
49  *					EEEEEEEE: PMU type ID
50  * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
51  */
52 #define PERF_PMU_TYPE_SHIFT		32
53 #define PERF_HW_EVENT_MASK		0xffffffff
54 
55 /*
56  * Generalized performance event event_id types, used by the
57  * attr.event_id parameter of the sys_perf_event_open()
58  * syscall:
59  */
60 enum perf_hw_id {
61 	/*
62 	 * Common hardware events, generalized by the kernel:
63 	 */
64 	PERF_COUNT_HW_CPU_CYCLES		= 0,
65 	PERF_COUNT_HW_INSTRUCTIONS		= 1,
66 	PERF_COUNT_HW_CACHE_REFERENCES		= 2,
67 	PERF_COUNT_HW_CACHE_MISSES		= 3,
68 	PERF_COUNT_HW_BRANCH_INSTRUCTIONS	= 4,
69 	PERF_COUNT_HW_BRANCH_MISSES		= 5,
70 	PERF_COUNT_HW_BUS_CYCLES		= 6,
71 	PERF_COUNT_HW_STALLED_CYCLES_FRONTEND	= 7,
72 	PERF_COUNT_HW_STALLED_CYCLES_BACKEND	= 8,
73 	PERF_COUNT_HW_REF_CPU_CYCLES		= 9,
74 
75 	PERF_COUNT_HW_MAX,			/* non-ABI */
76 };
77 
78 /*
79  * Generalized hardware cache events:
80  *
81  *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
82  *       { read, write, prefetch } x
83  *       { accesses, misses }
84  */
85 enum perf_hw_cache_id {
86 	PERF_COUNT_HW_CACHE_L1D			= 0,
87 	PERF_COUNT_HW_CACHE_L1I			= 1,
88 	PERF_COUNT_HW_CACHE_LL			= 2,
89 	PERF_COUNT_HW_CACHE_DTLB		= 3,
90 	PERF_COUNT_HW_CACHE_ITLB		= 4,
91 	PERF_COUNT_HW_CACHE_BPU			= 5,
92 	PERF_COUNT_HW_CACHE_NODE		= 6,
93 
94 	PERF_COUNT_HW_CACHE_MAX,		/* non-ABI */
95 };
96 
97 enum perf_hw_cache_op_id {
98 	PERF_COUNT_HW_CACHE_OP_READ		= 0,
99 	PERF_COUNT_HW_CACHE_OP_WRITE		= 1,
100 	PERF_COUNT_HW_CACHE_OP_PREFETCH		= 2,
101 
102 	PERF_COUNT_HW_CACHE_OP_MAX,		/* non-ABI */
103 };
104 
105 enum perf_hw_cache_op_result_id {
106 	PERF_COUNT_HW_CACHE_RESULT_ACCESS	= 0,
107 	PERF_COUNT_HW_CACHE_RESULT_MISS		= 1,
108 
109 	PERF_COUNT_HW_CACHE_RESULT_MAX,		/* non-ABI */
110 };
111 
112 /*
113  * Special "software" events provided by the kernel, even if the hardware
114  * does not support performance events. These events measure various
115  * physical and sw events of the kernel (and allow the profiling of them as
116  * well):
117  */
118 enum perf_sw_ids {
119 	PERF_COUNT_SW_CPU_CLOCK			= 0,
120 	PERF_COUNT_SW_TASK_CLOCK		= 1,
121 	PERF_COUNT_SW_PAGE_FAULTS		= 2,
122 	PERF_COUNT_SW_CONTEXT_SWITCHES		= 3,
123 	PERF_COUNT_SW_CPU_MIGRATIONS		= 4,
124 	PERF_COUNT_SW_PAGE_FAULTS_MIN		= 5,
125 	PERF_COUNT_SW_PAGE_FAULTS_MAJ		= 6,
126 	PERF_COUNT_SW_ALIGNMENT_FAULTS		= 7,
127 	PERF_COUNT_SW_EMULATION_FAULTS		= 8,
128 	PERF_COUNT_SW_DUMMY			= 9,
129 	PERF_COUNT_SW_BPF_OUTPUT		= 10,
130 	PERF_COUNT_SW_CGROUP_SWITCHES		= 11,
131 
132 	PERF_COUNT_SW_MAX,			/* non-ABI */
133 };
134 
135 /*
136  * Bits that can be set in attr.sample_type to request information
137  * in the overflow packets.
138  */
139 enum perf_event_sample_format {
140 	PERF_SAMPLE_IP				= 1U << 0,
141 	PERF_SAMPLE_TID				= 1U << 1,
142 	PERF_SAMPLE_TIME			= 1U << 2,
143 	PERF_SAMPLE_ADDR			= 1U << 3,
144 	PERF_SAMPLE_READ			= 1U << 4,
145 	PERF_SAMPLE_CALLCHAIN			= 1U << 5,
146 	PERF_SAMPLE_ID				= 1U << 6,
147 	PERF_SAMPLE_CPU				= 1U << 7,
148 	PERF_SAMPLE_PERIOD			= 1U << 8,
149 	PERF_SAMPLE_STREAM_ID			= 1U << 9,
150 	PERF_SAMPLE_RAW				= 1U << 10,
151 	PERF_SAMPLE_BRANCH_STACK		= 1U << 11,
152 	PERF_SAMPLE_REGS_USER			= 1U << 12,
153 	PERF_SAMPLE_STACK_USER			= 1U << 13,
154 	PERF_SAMPLE_WEIGHT			= 1U << 14,
155 	PERF_SAMPLE_DATA_SRC			= 1U << 15,
156 	PERF_SAMPLE_IDENTIFIER			= 1U << 16,
157 	PERF_SAMPLE_TRANSACTION			= 1U << 17,
158 	PERF_SAMPLE_REGS_INTR			= 1U << 18,
159 	PERF_SAMPLE_PHYS_ADDR			= 1U << 19,
160 	PERF_SAMPLE_AUX				= 1U << 20,
161 	PERF_SAMPLE_CGROUP			= 1U << 21,
162 	PERF_SAMPLE_DATA_PAGE_SIZE		= 1U << 22,
163 	PERF_SAMPLE_CODE_PAGE_SIZE		= 1U << 23,
164 	PERF_SAMPLE_WEIGHT_STRUCT		= 1U << 24,
165 
166 	PERF_SAMPLE_MAX = 1U << 25,		/* non-ABI */
167 };
168 
169 #define PERF_SAMPLE_WEIGHT_TYPE	(PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT)
170 /*
171  * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
172  *
173  * If the user does not pass priv level information via branch_sample_type,
174  * the kernel uses the event's priv level. Branch and event priv levels do
175  * not have to match. Branch priv level is checked for permissions.
176  *
177  * The branch types can be combined, however BRANCH_ANY covers all types
178  * of branches and therefore it supersedes all the other types.
179  */
180 enum perf_branch_sample_type_shift {
181 	PERF_SAMPLE_BRANCH_USER_SHIFT		= 0, /* user branches */
182 	PERF_SAMPLE_BRANCH_KERNEL_SHIFT		= 1, /* kernel branches */
183 	PERF_SAMPLE_BRANCH_HV_SHIFT		= 2, /* hypervisor branches */
184 
185 	PERF_SAMPLE_BRANCH_ANY_SHIFT		= 3, /* any branch types */
186 	PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT	= 4, /* any call branch */
187 	PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT	= 5, /* any return branch */
188 	PERF_SAMPLE_BRANCH_IND_CALL_SHIFT	= 6, /* indirect calls */
189 	PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT	= 7, /* transaction aborts */
190 	PERF_SAMPLE_BRANCH_IN_TX_SHIFT		= 8, /* in transaction */
191 	PERF_SAMPLE_BRANCH_NO_TX_SHIFT		= 9, /* not in transaction */
192 	PERF_SAMPLE_BRANCH_COND_SHIFT		= 10, /* conditional branches */
193 
194 	PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT	= 11, /* call/ret stack */
195 	PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT	= 12, /* indirect jumps */
196 	PERF_SAMPLE_BRANCH_CALL_SHIFT		= 13, /* direct call */
197 
198 	PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT	= 14, /* no flags */
199 	PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT	= 15, /* no cycles */
200 
201 	PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT	= 16, /* save branch type */
202 
203 	PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT	= 17, /* save low level index of raw branch records */
204 
205 	PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT	= 18, /* save privilege mode */
206 
207 	PERF_SAMPLE_BRANCH_COUNTERS_SHIFT	= 19, /* save occurrences of events on a branch */
208 
209 	PERF_SAMPLE_BRANCH_MAX_SHIFT		/* non-ABI */
210 };
211 
212 enum perf_branch_sample_type {
213 	PERF_SAMPLE_BRANCH_USER		= 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
214 	PERF_SAMPLE_BRANCH_KERNEL	= 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
215 	PERF_SAMPLE_BRANCH_HV		= 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
216 
217 	PERF_SAMPLE_BRANCH_ANY		= 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
218 	PERF_SAMPLE_BRANCH_ANY_CALL	= 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
219 	PERF_SAMPLE_BRANCH_ANY_RETURN	= 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
220 	PERF_SAMPLE_BRANCH_IND_CALL	= 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
221 	PERF_SAMPLE_BRANCH_ABORT_TX	= 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
222 	PERF_SAMPLE_BRANCH_IN_TX	= 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
223 	PERF_SAMPLE_BRANCH_NO_TX	= 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
224 	PERF_SAMPLE_BRANCH_COND		= 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
225 
226 	PERF_SAMPLE_BRANCH_CALL_STACK	= 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
227 	PERF_SAMPLE_BRANCH_IND_JUMP	= 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
228 	PERF_SAMPLE_BRANCH_CALL		= 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
229 
230 	PERF_SAMPLE_BRANCH_NO_FLAGS	= 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
231 	PERF_SAMPLE_BRANCH_NO_CYCLES	= 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
232 
233 	PERF_SAMPLE_BRANCH_TYPE_SAVE	=
234 		1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
235 
236 	PERF_SAMPLE_BRANCH_HW_INDEX	= 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT,
237 
238 	PERF_SAMPLE_BRANCH_PRIV_SAVE	= 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
239 
240 	PERF_SAMPLE_BRANCH_COUNTERS	= 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT,
241 
242 	PERF_SAMPLE_BRANCH_MAX		= 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
243 };
244 
245 /*
246  * Common flow change classification
247  */
248 enum {
249 	PERF_BR_UNKNOWN		= 0,	/* unknown */
250 	PERF_BR_COND		= 1,	/* conditional */
251 	PERF_BR_UNCOND		= 2,	/* unconditional  */
252 	PERF_BR_IND		= 3,	/* indirect */
253 	PERF_BR_CALL		= 4,	/* function call */
254 	PERF_BR_IND_CALL	= 5,	/* indirect function call */
255 	PERF_BR_RET		= 6,	/* function return */
256 	PERF_BR_SYSCALL		= 7,	/* syscall */
257 	PERF_BR_SYSRET		= 8,	/* syscall return */
258 	PERF_BR_COND_CALL	= 9,	/* conditional function call */
259 	PERF_BR_COND_RET	= 10,	/* conditional function return */
260 	PERF_BR_ERET		= 11,	/* exception return */
261 	PERF_BR_IRQ		= 12,	/* irq */
262 	PERF_BR_SERROR		= 13,	/* system error */
263 	PERF_BR_NO_TX		= 14,	/* not in transaction */
264 	PERF_BR_EXTEND_ABI	= 15,	/* extend ABI */
265 	PERF_BR_MAX,
266 };
267 
268 /*
269  * Common branch speculation outcome classification
270  */
271 enum {
272 	PERF_BR_SPEC_NA			= 0,	/* Not available */
273 	PERF_BR_SPEC_WRONG_PATH		= 1,	/* Speculative but on wrong path */
274 	PERF_BR_NON_SPEC_CORRECT_PATH	= 2,	/* Non-speculative but on correct path */
275 	PERF_BR_SPEC_CORRECT_PATH	= 3,	/* Speculative and on correct path */
276 	PERF_BR_SPEC_MAX,
277 };
278 
279 enum {
280 	PERF_BR_NEW_FAULT_ALGN		= 0,    /* Alignment fault */
281 	PERF_BR_NEW_FAULT_DATA		= 1,    /* Data fault */
282 	PERF_BR_NEW_FAULT_INST		= 2,    /* Inst fault */
283 	PERF_BR_NEW_ARCH_1		= 3,    /* Architecture specific */
284 	PERF_BR_NEW_ARCH_2		= 4,    /* Architecture specific */
285 	PERF_BR_NEW_ARCH_3		= 5,    /* Architecture specific */
286 	PERF_BR_NEW_ARCH_4		= 6,    /* Architecture specific */
287 	PERF_BR_NEW_ARCH_5		= 7,    /* Architecture specific */
288 	PERF_BR_NEW_MAX,
289 };
290 
291 enum {
292 	PERF_BR_PRIV_UNKNOWN	= 0,
293 	PERF_BR_PRIV_USER	= 1,
294 	PERF_BR_PRIV_KERNEL	= 2,
295 	PERF_BR_PRIV_HV		= 3,
296 };
297 
298 #define PERF_BR_ARM64_FIQ		PERF_BR_NEW_ARCH_1
299 #define PERF_BR_ARM64_DEBUG_HALT	PERF_BR_NEW_ARCH_2
300 #define PERF_BR_ARM64_DEBUG_EXIT	PERF_BR_NEW_ARCH_3
301 #define PERF_BR_ARM64_DEBUG_INST	PERF_BR_NEW_ARCH_4
302 #define PERF_BR_ARM64_DEBUG_DATA	PERF_BR_NEW_ARCH_5
303 
304 #define PERF_SAMPLE_BRANCH_PLM_ALL \
305 	(PERF_SAMPLE_BRANCH_USER|\
306 	 PERF_SAMPLE_BRANCH_KERNEL|\
307 	 PERF_SAMPLE_BRANCH_HV)
308 
309 /*
310  * Values to determine ABI of the registers dump.
311  */
312 enum perf_sample_regs_abi {
313 	PERF_SAMPLE_REGS_ABI_NONE	= 0,
314 	PERF_SAMPLE_REGS_ABI_32		= 1,
315 	PERF_SAMPLE_REGS_ABI_64		= 2,
316 };
317 
318 /*
319  * Values for the memory transaction event qualifier, mostly for
320  * abort events. Multiple bits can be set.
321  */
322 enum {
323 	PERF_TXN_ELISION        = (1 << 0), /* From elision */
324 	PERF_TXN_TRANSACTION    = (1 << 1), /* From transaction */
325 	PERF_TXN_SYNC           = (1 << 2), /* Instruction is related */
326 	PERF_TXN_ASYNC          = (1 << 3), /* Instruction not related */
327 	PERF_TXN_RETRY          = (1 << 4), /* Retry possible */
328 	PERF_TXN_CONFLICT       = (1 << 5), /* Conflict abort */
329 	PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
330 	PERF_TXN_CAPACITY_READ  = (1 << 7), /* Capacity read abort */
331 
332 	PERF_TXN_MAX	        = (1 << 8), /* non-ABI */
333 
334 	/* bits 32..63 are reserved for the abort code */
335 
336 	PERF_TXN_ABORT_MASK  = (0xffffffffULL << 32),
337 	PERF_TXN_ABORT_SHIFT = 32,
338 };
339 
340 /*
341  * The format of the data returned by read() on a perf event fd,
342  * as specified by attr.read_format:
343  *
344  * struct read_format {
345  *	{ u64		value;
346  *	  { u64		time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
347  *	  { u64		time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
348  *	  { u64		id;           } && PERF_FORMAT_ID
349  *	  { u64		lost;         } && PERF_FORMAT_LOST
350  *	} && !PERF_FORMAT_GROUP
351  *
352  *	{ u64		nr;
353  *	  { u64		time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
354  *	  { u64		time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
355  *	  { u64		value;
356  *	    { u64	id;           } && PERF_FORMAT_ID
357  *	    { u64	lost;         } && PERF_FORMAT_LOST
358  *	  }		cntr[nr];
359  *	} && PERF_FORMAT_GROUP
360  * };
361  */
362 enum perf_event_read_format {
363 	PERF_FORMAT_TOTAL_TIME_ENABLED		= 1U << 0,
364 	PERF_FORMAT_TOTAL_TIME_RUNNING		= 1U << 1,
365 	PERF_FORMAT_ID				= 1U << 2,
366 	PERF_FORMAT_GROUP			= 1U << 3,
367 	PERF_FORMAT_LOST			= 1U << 4,
368 
369 	PERF_FORMAT_MAX = 1U << 5,		/* non-ABI */
370 };
371 
372 #define PERF_ATTR_SIZE_VER0	64	/* sizeof first published struct */
373 #define PERF_ATTR_SIZE_VER1	72	/* add: config2 */
374 #define PERF_ATTR_SIZE_VER2	80	/* add: branch_sample_type */
375 #define PERF_ATTR_SIZE_VER3	96	/* add: sample_regs_user */
376 					/* add: sample_stack_user */
377 #define PERF_ATTR_SIZE_VER4	104	/* add: sample_regs_intr */
378 #define PERF_ATTR_SIZE_VER5	112	/* add: aux_watermark */
379 #define PERF_ATTR_SIZE_VER6	120	/* add: aux_sample_size */
380 #define PERF_ATTR_SIZE_VER7	128	/* add: sig_data */
381 #define PERF_ATTR_SIZE_VER8	136	/* add: config3 */
382 
383 /*
384  * Hardware event_id to monitor via a performance monitoring event:
385  *
386  * @sample_max_stack: Max number of frame pointers in a callchain,
387  *		      should be < /proc/sys/kernel/perf_event_max_stack
388  */
389 struct perf_event_attr {
390 
391 	/*
392 	 * Major type: hardware/software/tracepoint/etc.
393 	 */
394 	__u32			type;
395 
396 	/*
397 	 * Size of the attr structure, for fwd/bwd compat.
398 	 */
399 	__u32			size;
400 
401 	/*
402 	 * Type specific configuration information.
403 	 */
404 	__u64			config;
405 
406 	union {
407 		__u64		sample_period;
408 		__u64		sample_freq;
409 	};
410 
411 	__u64			sample_type;
412 	__u64			read_format;
413 
414 	__u64			disabled       :  1, /* off by default        */
415 				inherit	       :  1, /* children inherit it   */
416 				pinned	       :  1, /* must always be on PMU */
417 				exclusive      :  1, /* only group on PMU     */
418 				exclude_user   :  1, /* don't count user      */
419 				exclude_kernel :  1, /* ditto kernel          */
420 				exclude_hv     :  1, /* ditto hypervisor      */
421 				exclude_idle   :  1, /* don't count when idle */
422 				mmap           :  1, /* include mmap data     */
423 				comm	       :  1, /* include comm data     */
424 				freq           :  1, /* use freq, not period  */
425 				inherit_stat   :  1, /* per task counts       */
426 				enable_on_exec :  1, /* next exec enables     */
427 				task           :  1, /* trace fork/exit       */
428 				watermark      :  1, /* wakeup_watermark      */
429 				/*
430 				 * precise_ip:
431 				 *
432 				 *  0 - SAMPLE_IP can have arbitrary skid
433 				 *  1 - SAMPLE_IP must have constant skid
434 				 *  2 - SAMPLE_IP requested to have 0 skid
435 				 *  3 - SAMPLE_IP must have 0 skid
436 				 *
437 				 *  See also PERF_RECORD_MISC_EXACT_IP
438 				 */
439 				precise_ip     :  2, /* skid constraint       */
440 				mmap_data      :  1, /* non-exec mmap data    */
441 				sample_id_all  :  1, /* sample_type all events */
442 
443 				exclude_host   :  1, /* don't count in host   */
444 				exclude_guest  :  1, /* don't count in guest  */
445 
446 				exclude_callchain_kernel : 1, /* exclude kernel callchains */
447 				exclude_callchain_user   : 1, /* exclude user callchains */
448 				mmap2          :  1, /* include mmap with inode data     */
449 				comm_exec      :  1, /* flag comm events that are due to an exec */
450 				use_clockid    :  1, /* use @clockid for time fields */
451 				context_switch :  1, /* context switch data */
452 				write_backward :  1, /* Write ring buffer from end to beginning */
453 				namespaces     :  1, /* include namespaces data */
454 				ksymbol        :  1, /* include ksymbol events */
455 				bpf_event      :  1, /* include bpf events */
456 				aux_output     :  1, /* generate AUX records instead of events */
457 				cgroup         :  1, /* include cgroup events */
458 				text_poke      :  1, /* include text poke events */
459 				build_id       :  1, /* use build id in mmap2 events */
460 				inherit_thread :  1, /* children only inherit if cloned with CLONE_THREAD */
461 				remove_on_exec :  1, /* event is removed from task on exec */
462 				sigtrap        :  1, /* send synchronous SIGTRAP on event */
463 				__reserved_1   : 26;
464 
465 	union {
466 		__u32		wakeup_events;	  /* wakeup every n events */
467 		__u32		wakeup_watermark; /* bytes before wakeup   */
468 	};
469 
470 	__u32			bp_type;
471 	union {
472 		__u64		bp_addr;
473 		__u64		kprobe_func; /* for perf_kprobe */
474 		__u64		uprobe_path; /* for perf_uprobe */
475 		__u64		config1; /* extension of config */
476 	};
477 	union {
478 		__u64		bp_len;
479 		__u64		kprobe_addr; /* when kprobe_func == NULL */
480 		__u64		probe_offset; /* for perf_[k,u]probe */
481 		__u64		config2; /* extension of config1 */
482 	};
483 	__u64	branch_sample_type; /* enum perf_branch_sample_type */
484 
485 	/*
486 	 * Defines set of user regs to dump on samples.
487 	 * See asm/perf_regs.h for details.
488 	 */
489 	__u64	sample_regs_user;
490 
491 	/*
492 	 * Defines size of the user stack to dump on samples.
493 	 */
494 	__u32	sample_stack_user;
495 
496 	__s32	clockid;
497 	/*
498 	 * Defines set of regs to dump for each sample
499 	 * state captured on:
500 	 *  - precise = 0: PMU interrupt
501 	 *  - precise > 0: sampled instruction
502 	 *
503 	 * See asm/perf_regs.h for details.
504 	 */
505 	__u64	sample_regs_intr;
506 
507 	/*
508 	 * Wakeup watermark for AUX area
509 	 */
510 	__u32	aux_watermark;
511 	__u16	sample_max_stack;
512 	__u16	__reserved_2;
513 	__u32	aux_sample_size;
514 
515 	union {
516 		__u32	aux_action;
517 		struct {
518 			__u32	aux_start_paused :  1, /* start AUX area tracing paused */
519 				aux_pause        :  1, /* on overflow, pause AUX area tracing */
520 				aux_resume       :  1, /* on overflow, resume AUX area tracing */
521 				__reserved_3     : 29;
522 		};
523 	};
524 
525 	/*
526 	 * User provided data if sigtrap=1, passed back to user via
527 	 * siginfo_t::si_perf_data, e.g. to permit user to identify the event.
528 	 * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be
529 	 * truncated accordingly on 32 bit architectures.
530 	 */
531 	__u64	sig_data;
532 
533 	__u64	config3; /* extension of config2 */
534 };
535 
536 /*
537  * Structure used by below PERF_EVENT_IOC_QUERY_BPF command
538  * to query bpf programs attached to the same perf tracepoint
539  * as the given perf event.
540  */
541 struct perf_event_query_bpf {
542 	/*
543 	 * The below ids array length
544 	 */
545 	__u32	ids_len;
546 	/*
547 	 * Set by the kernel to indicate the number of
548 	 * available programs
549 	 */
550 	__u32	prog_cnt;
551 	/*
552 	 * User provided buffer to store program ids
553 	 */
554 	__u32	ids[];
555 };
556 
557 /*
558  * Ioctls that can be done on a perf event fd:
559  */
560 #define PERF_EVENT_IOC_ENABLE			_IO ('$', 0)
561 #define PERF_EVENT_IOC_DISABLE			_IO ('$', 1)
562 #define PERF_EVENT_IOC_REFRESH			_IO ('$', 2)
563 #define PERF_EVENT_IOC_RESET			_IO ('$', 3)
564 #define PERF_EVENT_IOC_PERIOD			_IOW('$', 4, __u64)
565 #define PERF_EVENT_IOC_SET_OUTPUT		_IO ('$', 5)
566 #define PERF_EVENT_IOC_SET_FILTER		_IOW('$', 6, char *)
567 #define PERF_EVENT_IOC_ID			_IOR('$', 7, __u64 *)
568 #define PERF_EVENT_IOC_SET_BPF			_IOW('$', 8, __u32)
569 #define PERF_EVENT_IOC_PAUSE_OUTPUT		_IOW('$', 9, __u32)
570 #define PERF_EVENT_IOC_QUERY_BPF		_IOWR('$', 10, struct perf_event_query_bpf *)
571 #define PERF_EVENT_IOC_MODIFY_ATTRIBUTES	_IOW('$', 11, struct perf_event_attr *)
572 
573 enum perf_event_ioc_flags {
574 	PERF_IOC_FLAG_GROUP		= 1U << 0,
575 };
576 
577 /*
578  * Structure of the page that can be mapped via mmap
579  */
580 struct perf_event_mmap_page {
581 	__u32	version;		/* version number of this structure */
582 	__u32	compat_version;		/* lowest version this is compat with */
583 
584 	/*
585 	 * Bits needed to read the hw events in user-space.
586 	 *
587 	 *   u32 seq, time_mult, time_shift, index, width;
588 	 *   u64 count, enabled, running;
589 	 *   u64 cyc, time_offset;
590 	 *   s64 pmc = 0;
591 	 *
592 	 *   do {
593 	 *     seq = pc->lock;
594 	 *     barrier()
595 	 *
596 	 *     enabled = pc->time_enabled;
597 	 *     running = pc->time_running;
598 	 *
599 	 *     if (pc->cap_usr_time && enabled != running) {
600 	 *       cyc = rdtsc();
601 	 *       time_offset = pc->time_offset;
602 	 *       time_mult   = pc->time_mult;
603 	 *       time_shift  = pc->time_shift;
604 	 *     }
605 	 *
606 	 *     index = pc->index;
607 	 *     count = pc->offset;
608 	 *     if (pc->cap_user_rdpmc && index) {
609 	 *       width = pc->pmc_width;
610 	 *       pmc = rdpmc(index - 1);
611 	 *     }
612 	 *
613 	 *     barrier();
614 	 *   } while (pc->lock != seq);
615 	 *
616 	 * NOTE: for obvious reason this only works on self-monitoring
617 	 *       processes.
618 	 */
619 	__u32	lock;			/* seqlock for synchronization */
620 	__u32	index;			/* hardware event identifier */
621 	__s64	offset;			/* add to hardware event value */
622 	__u64	time_enabled;		/* time event active */
623 	__u64	time_running;		/* time event on cpu */
624 	union {
625 		__u64	capabilities;
626 		struct {
627 			__u64	cap_bit0		: 1, /* Always 0, deprecated, see commit 860f085b74e9 */
628 				cap_bit0_is_deprecated	: 1, /* Always 1, signals that bit 0 is zero */
629 
630 				cap_user_rdpmc		: 1, /* The RDPMC instruction can be used to read counts */
631 				cap_user_time		: 1, /* The time_{shift,mult,offset} fields are used */
632 				cap_user_time_zero	: 1, /* The time_zero field is used */
633 				cap_user_time_short	: 1, /* the time_{cycle,mask} fields are used */
634 				cap_____res		: 58;
635 		};
636 	};
637 
638 	/*
639 	 * If cap_user_rdpmc this field provides the bit-width of the value
640 	 * read using the rdpmc() or equivalent instruction. This can be used
641 	 * to sign extend the result like:
642 	 *
643 	 *   pmc <<= 64 - width;
644 	 *   pmc >>= 64 - width; // signed shift right
645 	 *   count += pmc;
646 	 */
647 	__u16	pmc_width;
648 
649 	/*
650 	 * If cap_usr_time the below fields can be used to compute the time
651 	 * delta since time_enabled (in ns) using rdtsc or similar.
652 	 *
653 	 *   u64 quot, rem;
654 	 *   u64 delta;
655 	 *
656 	 *   quot = (cyc >> time_shift);
657 	 *   rem = cyc & (((u64)1 << time_shift) - 1);
658 	 *   delta = time_offset + quot * time_mult +
659 	 *              ((rem * time_mult) >> time_shift);
660 	 *
661 	 * Where time_offset,time_mult,time_shift and cyc are read in the
662 	 * seqcount loop described above. This delta can then be added to
663 	 * enabled and possible running (if index), improving the scaling:
664 	 *
665 	 *   enabled += delta;
666 	 *   if (index)
667 	 *     running += delta;
668 	 *
669 	 *   quot = count / running;
670 	 *   rem  = count % running;
671 	 *   count = quot * enabled + (rem * enabled) / running;
672 	 */
673 	__u16	time_shift;
674 	__u32	time_mult;
675 	__u64	time_offset;
676 	/*
677 	 * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
678 	 * from sample timestamps.
679 	 *
680 	 *   time = timestamp - time_zero;
681 	 *   quot = time / time_mult;
682 	 *   rem  = time % time_mult;
683 	 *   cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
684 	 *
685 	 * And vice versa:
686 	 *
687 	 *   quot = cyc >> time_shift;
688 	 *   rem  = cyc & (((u64)1 << time_shift) - 1);
689 	 *   timestamp = time_zero + quot * time_mult +
690 	 *               ((rem * time_mult) >> time_shift);
691 	 */
692 	__u64	time_zero;
693 
694 	__u32	size;			/* Header size up to __reserved[] fields. */
695 	__u32	__reserved_1;
696 
697 	/*
698 	 * If cap_usr_time_short, the hardware clock is less than 64bit wide
699 	 * and we must compute the 'cyc' value, as used by cap_usr_time, as:
700 	 *
701 	 *   cyc = time_cycles + ((cyc - time_cycles) & time_mask)
702 	 *
703 	 * NOTE: this form is explicitly chosen such that cap_usr_time_short
704 	 *       is a correction on top of cap_usr_time, and code that doesn't
705 	 *       know about cap_usr_time_short still works under the assumption
706 	 *       the counter doesn't wrap.
707 	 */
708 	__u64	time_cycles;
709 	__u64	time_mask;
710 
711 		/*
712 		 * Hole for extension of the self monitor capabilities
713 		 */
714 
715 	__u8	__reserved[116*8];	/* align to 1k. */
716 
717 	/*
718 	 * Control data for the mmap() data buffer.
719 	 *
720 	 * User-space reading the @data_head value should issue an smp_rmb(),
721 	 * after reading this value.
722 	 *
723 	 * When the mapping is PROT_WRITE the @data_tail value should be
724 	 * written by userspace to reflect the last read data, after issueing
725 	 * an smp_mb() to separate the data read from the ->data_tail store.
726 	 * In this case the kernel will not over-write unread data.
727 	 *
728 	 * See perf_output_put_handle() for the data ordering.
729 	 *
730 	 * data_{offset,size} indicate the location and size of the perf record
731 	 * buffer within the mmapped area.
732 	 */
733 	__u64   data_head;		/* head in the data section */
734 	__u64	data_tail;		/* user-space written tail */
735 	__u64	data_offset;		/* where the buffer starts */
736 	__u64	data_size;		/* data buffer size */
737 
738 	/*
739 	 * AUX area is defined by aux_{offset,size} fields that should be set
740 	 * by the userspace, so that
741 	 *
742 	 *   aux_offset >= data_offset + data_size
743 	 *
744 	 * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
745 	 *
746 	 * Ring buffer pointers aux_{head,tail} have the same semantics as
747 	 * data_{head,tail} and same ordering rules apply.
748 	 */
749 	__u64	aux_head;
750 	__u64	aux_tail;
751 	__u64	aux_offset;
752 	__u64	aux_size;
753 };
754 
755 /*
756  * The current state of perf_event_header::misc bits usage:
757  * ('|' used bit, '-' unused bit)
758  *
759  *  012         CDEF
760  *  |||---------||||
761  *
762  *  Where:
763  *    0-2     CPUMODE_MASK
764  *
765  *    C       PROC_MAP_PARSE_TIMEOUT
766  *    D       MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT
767  *    E       MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT
768  *    F       (reserved)
769  */
770 
771 #define PERF_RECORD_MISC_CPUMODE_MASK		(7 << 0)
772 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN	(0 << 0)
773 #define PERF_RECORD_MISC_KERNEL			(1 << 0)
774 #define PERF_RECORD_MISC_USER			(2 << 0)
775 #define PERF_RECORD_MISC_HYPERVISOR		(3 << 0)
776 #define PERF_RECORD_MISC_GUEST_KERNEL		(4 << 0)
777 #define PERF_RECORD_MISC_GUEST_USER		(5 << 0)
778 
779 /*
780  * Indicates that /proc/PID/maps parsing are truncated by time out.
781  */
782 #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT	(1 << 12)
783 /*
784  * Following PERF_RECORD_MISC_* are used on different
785  * events, so can reuse the same bit position:
786  *
787  *   PERF_RECORD_MISC_MMAP_DATA  - PERF_RECORD_MMAP* events
788  *   PERF_RECORD_MISC_COMM_EXEC  - PERF_RECORD_COMM event
789  *   PERF_RECORD_MISC_FORK_EXEC  - PERF_RECORD_FORK event (perf internal)
790  *   PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
791  */
792 #define PERF_RECORD_MISC_MMAP_DATA		(1 << 13)
793 #define PERF_RECORD_MISC_COMM_EXEC		(1 << 13)
794 #define PERF_RECORD_MISC_FORK_EXEC		(1 << 13)
795 #define PERF_RECORD_MISC_SWITCH_OUT		(1 << 13)
796 /*
797  * These PERF_RECORD_MISC_* flags below are safely reused
798  * for the following events:
799  *
800  *   PERF_RECORD_MISC_EXACT_IP           - PERF_RECORD_SAMPLE of precise events
801  *   PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
802  *   PERF_RECORD_MISC_MMAP_BUILD_ID      - PERF_RECORD_MMAP2 event
803  *
804  *
805  * PERF_RECORD_MISC_EXACT_IP:
806  *   Indicates that the content of PERF_SAMPLE_IP points to
807  *   the actual instruction that triggered the event. See also
808  *   perf_event_attr::precise_ip.
809  *
810  * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
811  *   Indicates that thread was preempted in TASK_RUNNING state.
812  *
813  * PERF_RECORD_MISC_MMAP_BUILD_ID:
814  *   Indicates that mmap2 event carries build id data.
815  */
816 #define PERF_RECORD_MISC_EXACT_IP		(1 << 14)
817 #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT	(1 << 14)
818 #define PERF_RECORD_MISC_MMAP_BUILD_ID		(1 << 14)
819 /*
820  * Reserve the last bit to indicate some extended misc field
821  */
822 #define PERF_RECORD_MISC_EXT_RESERVED		(1 << 15)
823 
824 struct perf_event_header {
825 	__u32	type;
826 	__u16	misc;
827 	__u16	size;
828 };
829 
830 struct perf_ns_link_info {
831 	__u64	dev;
832 	__u64	ino;
833 };
834 
835 enum {
836 	NET_NS_INDEX		= 0,
837 	UTS_NS_INDEX		= 1,
838 	IPC_NS_INDEX		= 2,
839 	PID_NS_INDEX		= 3,
840 	USER_NS_INDEX		= 4,
841 	MNT_NS_INDEX		= 5,
842 	CGROUP_NS_INDEX		= 6,
843 
844 	NR_NAMESPACES,		/* number of available namespaces */
845 };
846 
847 enum perf_event_type {
848 
849 	/*
850 	 * If perf_event_attr.sample_id_all is set then all event types will
851 	 * have the sample_type selected fields related to where/when
852 	 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
853 	 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
854 	 * just after the perf_event_header and the fields already present for
855 	 * the existing fields, i.e. at the end of the payload. That way a newer
856 	 * perf.data file will be supported by older perf tools, with these new
857 	 * optional fields being ignored.
858 	 *
859 	 * struct sample_id {
860 	 * 	{ u32			pid, tid; } && PERF_SAMPLE_TID
861 	 * 	{ u64			time;     } && PERF_SAMPLE_TIME
862 	 * 	{ u64			id;       } && PERF_SAMPLE_ID
863 	 * 	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID
864 	 * 	{ u32			cpu, res; } && PERF_SAMPLE_CPU
865 	 *	{ u64			id;	  } && PERF_SAMPLE_IDENTIFIER
866 	 * } && perf_event_attr::sample_id_all
867 	 *
868 	 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.  The
869 	 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
870 	 * relative to header.size.
871 	 */
872 
873 	/*
874 	 * The MMAP events record the PROT_EXEC mappings so that we can
875 	 * correlate userspace IPs to code. They have the following structure:
876 	 *
877 	 * struct {
878 	 *	struct perf_event_header	header;
879 	 *
880 	 *	u32				pid, tid;
881 	 *	u64				addr;
882 	 *	u64				len;
883 	 *	u64				pgoff;
884 	 *	char				filename[];
885 	 * 	struct sample_id		sample_id;
886 	 * };
887 	 */
888 	PERF_RECORD_MMAP			= 1,
889 
890 	/*
891 	 * struct {
892 	 *	struct perf_event_header	header;
893 	 *	u64				id;
894 	 *	u64				lost;
895 	 * 	struct sample_id		sample_id;
896 	 * };
897 	 */
898 	PERF_RECORD_LOST			= 2,
899 
900 	/*
901 	 * struct {
902 	 *	struct perf_event_header	header;
903 	 *
904 	 *	u32				pid, tid;
905 	 *	char				comm[];
906 	 * 	struct sample_id		sample_id;
907 	 * };
908 	 */
909 	PERF_RECORD_COMM			= 3,
910 
911 	/*
912 	 * struct {
913 	 *	struct perf_event_header	header;
914 	 *	u32				pid, ppid;
915 	 *	u32				tid, ptid;
916 	 *	u64				time;
917 	 * 	struct sample_id		sample_id;
918 	 * };
919 	 */
920 	PERF_RECORD_EXIT			= 4,
921 
922 	/*
923 	 * struct {
924 	 *	struct perf_event_header	header;
925 	 *	u64				time;
926 	 *	u64				id;
927 	 *	u64				stream_id;
928 	 * 	struct sample_id		sample_id;
929 	 * };
930 	 */
931 	PERF_RECORD_THROTTLE			= 5,
932 	PERF_RECORD_UNTHROTTLE			= 6,
933 
934 	/*
935 	 * struct {
936 	 *	struct perf_event_header	header;
937 	 *	u32				pid, ppid;
938 	 *	u32				tid, ptid;
939 	 *	u64				time;
940 	 * 	struct sample_id		sample_id;
941 	 * };
942 	 */
943 	PERF_RECORD_FORK			= 7,
944 
945 	/*
946 	 * struct {
947 	 *	struct perf_event_header	header;
948 	 *	u32				pid, tid;
949 	 *
950 	 *	struct read_format		values;
951 	 * 	struct sample_id		sample_id;
952 	 * };
953 	 */
954 	PERF_RECORD_READ			= 8,
955 
956 	/*
957 	 * struct {
958 	 *	struct perf_event_header	header;
959 	 *
960 	 *	#
961 	 *	# Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
962 	 *	# The advantage of PERF_SAMPLE_IDENTIFIER is that its position
963 	 *	# is fixed relative to header.
964 	 *	#
965 	 *
966 	 *	{ u64			id;	  } && PERF_SAMPLE_IDENTIFIER
967 	 *	{ u64			ip;	  } && PERF_SAMPLE_IP
968 	 *	{ u32			pid, tid; } && PERF_SAMPLE_TID
969 	 *	{ u64			time;     } && PERF_SAMPLE_TIME
970 	 *	{ u64			addr;     } && PERF_SAMPLE_ADDR
971 	 *	{ u64			id;	  } && PERF_SAMPLE_ID
972 	 *	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID
973 	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU
974 	 *	{ u64			period;   } && PERF_SAMPLE_PERIOD
975 	 *
976 	 *	{ struct read_format	values;	  } && PERF_SAMPLE_READ
977 	 *
978 	 *	{ u64			nr,
979 	 *	  u64			ips[nr];  } && PERF_SAMPLE_CALLCHAIN
980 	 *
981 	 *	#
982 	 *	# The RAW record below is opaque data wrt the ABI
983 	 *	#
984 	 *	# That is, the ABI doesn't make any promises wrt to
985 	 *	# the stability of its content, it may vary depending
986 	 *	# on event, hardware, kernel version and phase of
987 	 *	# the moon.
988 	 *	#
989 	 *	# In other words, PERF_SAMPLE_RAW contents are not an ABI.
990 	 *	#
991 	 *
992 	 *	{ u32			size;
993 	 *	  char                  data[size];}&& PERF_SAMPLE_RAW
994 	 *
995 	 *	{ u64                   nr;
996 	 *	  { u64	hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
997 	 *        { u64 from, to, flags } lbr[nr];
998 	 *        #
999 	 *        # The format of the counters is decided by the
1000 	 *        # "branch_counter_nr" and "branch_counter_width",
1001 	 *        # which are defined in the ABI.
1002 	 *        #
1003 	 *        { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS
1004 	 *      } && PERF_SAMPLE_BRANCH_STACK
1005 	 *
1006 	 * 	{ u64			abi; # enum perf_sample_regs_abi
1007 	 * 	  u64			regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
1008 	 *
1009 	 * 	{ u64			size;
1010 	 * 	  char			data[size];
1011 	 * 	  u64			dyn_size; } && PERF_SAMPLE_STACK_USER
1012 	 *
1013 	 *	{ union perf_sample_weight
1014 	 *	 {
1015 	 *		u64		full; && PERF_SAMPLE_WEIGHT
1016 	 *	#if defined(__LITTLE_ENDIAN_BITFIELD)
1017 	 *		struct {
1018 	 *			u32	var1_dw;
1019 	 *			u16	var2_w;
1020 	 *			u16	var3_w;
1021 	 *		} && PERF_SAMPLE_WEIGHT_STRUCT
1022 	 *	#elif defined(__BIG_ENDIAN_BITFIELD)
1023 	 *		struct {
1024 	 *			u16	var3_w;
1025 	 *			u16	var2_w;
1026 	 *			u32	var1_dw;
1027 	 *		} && PERF_SAMPLE_WEIGHT_STRUCT
1028 	 *	#endif
1029 	 *	 }
1030 	 *	}
1031 	 *	{ u64			data_src; } && PERF_SAMPLE_DATA_SRC
1032 	 *	{ u64			transaction; } && PERF_SAMPLE_TRANSACTION
1033 	 *	{ u64			abi; # enum perf_sample_regs_abi
1034 	 *	  u64			regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
1035 	 *	{ u64			phys_addr;} && PERF_SAMPLE_PHYS_ADDR
1036 	 *	{ u64			size;
1037 	 *	  char			data[size]; } && PERF_SAMPLE_AUX
1038 	 *	{ u64			data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE
1039 	 *	{ u64			code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE
1040 	 * };
1041 	 */
1042 	PERF_RECORD_SAMPLE			= 9,
1043 
1044 	/*
1045 	 * The MMAP2 records are an augmented version of MMAP, they add
1046 	 * maj, min, ino numbers to be used to uniquely identify each mapping
1047 	 *
1048 	 * struct {
1049 	 *	struct perf_event_header	header;
1050 	 *
1051 	 *	u32				pid, tid;
1052 	 *	u64				addr;
1053 	 *	u64				len;
1054 	 *	u64				pgoff;
1055 	 *	union {
1056 	 *		struct {
1057 	 *			u32		maj;
1058 	 *			u32		min;
1059 	 *			u64		ino;
1060 	 *			u64		ino_generation;
1061 	 *		};
1062 	 *		struct {
1063 	 *			u8		build_id_size;
1064 	 *			u8		__reserved_1;
1065 	 *			u16		__reserved_2;
1066 	 *			u8		build_id[20];
1067 	 *		};
1068 	 *	};
1069 	 *	u32				prot, flags;
1070 	 *	char				filename[];
1071 	 * 	struct sample_id		sample_id;
1072 	 * };
1073 	 */
1074 	PERF_RECORD_MMAP2			= 10,
1075 
1076 	/*
1077 	 * Records that new data landed in the AUX buffer part.
1078 	 *
1079 	 * struct {
1080 	 * 	struct perf_event_header	header;
1081 	 *
1082 	 * 	u64				aux_offset;
1083 	 * 	u64				aux_size;
1084 	 *	u64				flags;
1085 	 * 	struct sample_id		sample_id;
1086 	 * };
1087 	 */
1088 	PERF_RECORD_AUX				= 11,
1089 
1090 	/*
1091 	 * Indicates that instruction trace has started
1092 	 *
1093 	 * struct {
1094 	 *	struct perf_event_header	header;
1095 	 *	u32				pid;
1096 	 *	u32				tid;
1097 	 *	struct sample_id		sample_id;
1098 	 * };
1099 	 */
1100 	PERF_RECORD_ITRACE_START		= 12,
1101 
1102 	/*
1103 	 * Records the dropped/lost sample number.
1104 	 *
1105 	 * struct {
1106 	 *	struct perf_event_header	header;
1107 	 *
1108 	 *	u64				lost;
1109 	 *	struct sample_id		sample_id;
1110 	 * };
1111 	 */
1112 	PERF_RECORD_LOST_SAMPLES		= 13,
1113 
1114 	/*
1115 	 * Records a context switch in or out (flagged by
1116 	 * PERF_RECORD_MISC_SWITCH_OUT). See also
1117 	 * PERF_RECORD_SWITCH_CPU_WIDE.
1118 	 *
1119 	 * struct {
1120 	 *	struct perf_event_header	header;
1121 	 *	struct sample_id		sample_id;
1122 	 * };
1123 	 */
1124 	PERF_RECORD_SWITCH			= 14,
1125 
1126 	/*
1127 	 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
1128 	 * next_prev_tid that are the next (switching out) or previous
1129 	 * (switching in) pid/tid.
1130 	 *
1131 	 * struct {
1132 	 *	struct perf_event_header	header;
1133 	 *	u32				next_prev_pid;
1134 	 *	u32				next_prev_tid;
1135 	 *	struct sample_id		sample_id;
1136 	 * };
1137 	 */
1138 	PERF_RECORD_SWITCH_CPU_WIDE		= 15,
1139 
1140 	/*
1141 	 * struct {
1142 	 *	struct perf_event_header	header;
1143 	 *	u32				pid;
1144 	 *	u32				tid;
1145 	 *	u64				nr_namespaces;
1146 	 *	{ u64				dev, inode; } [nr_namespaces];
1147 	 *	struct sample_id		sample_id;
1148 	 * };
1149 	 */
1150 	PERF_RECORD_NAMESPACES			= 16,
1151 
1152 	/*
1153 	 * Record ksymbol register/unregister events:
1154 	 *
1155 	 * struct {
1156 	 *	struct perf_event_header	header;
1157 	 *	u64				addr;
1158 	 *	u32				len;
1159 	 *	u16				ksym_type;
1160 	 *	u16				flags;
1161 	 *	char				name[];
1162 	 *	struct sample_id		sample_id;
1163 	 * };
1164 	 */
1165 	PERF_RECORD_KSYMBOL			= 17,
1166 
1167 	/*
1168 	 * Record bpf events:
1169 	 *  enum perf_bpf_event_type {
1170 	 *	PERF_BPF_EVENT_UNKNOWN		= 0,
1171 	 *	PERF_BPF_EVENT_PROG_LOAD	= 1,
1172 	 *	PERF_BPF_EVENT_PROG_UNLOAD	= 2,
1173 	 *  };
1174 	 *
1175 	 * struct {
1176 	 *	struct perf_event_header	header;
1177 	 *	u16				type;
1178 	 *	u16				flags;
1179 	 *	u32				id;
1180 	 *	u8				tag[BPF_TAG_SIZE];
1181 	 *	struct sample_id		sample_id;
1182 	 * };
1183 	 */
1184 	PERF_RECORD_BPF_EVENT			= 18,
1185 
1186 	/*
1187 	 * struct {
1188 	 *	struct perf_event_header	header;
1189 	 *	u64				id;
1190 	 *	char				path[];
1191 	 *	struct sample_id		sample_id;
1192 	 * };
1193 	 */
1194 	PERF_RECORD_CGROUP			= 19,
1195 
1196 	/*
1197 	 * Records changes to kernel text i.e. self-modified code. 'old_len' is
1198 	 * the number of old bytes, 'new_len' is the number of new bytes. Either
1199 	 * 'old_len' or 'new_len' may be zero to indicate, for example, the
1200 	 * addition or removal of a trampoline. 'bytes' contains the old bytes
1201 	 * followed immediately by the new bytes.
1202 	 *
1203 	 * struct {
1204 	 *	struct perf_event_header	header;
1205 	 *	u64				addr;
1206 	 *	u16				old_len;
1207 	 *	u16				new_len;
1208 	 *	u8				bytes[];
1209 	 *	struct sample_id		sample_id;
1210 	 * };
1211 	 */
1212 	PERF_RECORD_TEXT_POKE			= 20,
1213 
1214 	/*
1215 	 * Data written to the AUX area by hardware due to aux_output, may need
1216 	 * to be matched to the event by an architecture-specific hardware ID.
1217 	 * This records the hardware ID, but requires sample_id to provide the
1218 	 * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT
1219 	 * records from multiple events.
1220 	 *
1221 	 * struct {
1222 	 *	struct perf_event_header	header;
1223 	 *	u64				hw_id;
1224 	 *	struct sample_id		sample_id;
1225 	 * };
1226 	 */
1227 	PERF_RECORD_AUX_OUTPUT_HW_ID		= 21,
1228 
1229 	PERF_RECORD_MAX,			/* non-ABI */
1230 };
1231 
1232 enum perf_record_ksymbol_type {
1233 	PERF_RECORD_KSYMBOL_TYPE_UNKNOWN	= 0,
1234 	PERF_RECORD_KSYMBOL_TYPE_BPF		= 1,
1235 	/*
1236 	 * Out of line code such as kprobe-replaced instructions or optimized
1237 	 * kprobes or ftrace trampolines.
1238 	 */
1239 	PERF_RECORD_KSYMBOL_TYPE_OOL		= 2,
1240 	PERF_RECORD_KSYMBOL_TYPE_MAX		/* non-ABI */
1241 };
1242 
1243 #define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER	(1 << 0)
1244 
1245 enum perf_bpf_event_type {
1246 	PERF_BPF_EVENT_UNKNOWN		= 0,
1247 	PERF_BPF_EVENT_PROG_LOAD	= 1,
1248 	PERF_BPF_EVENT_PROG_UNLOAD	= 2,
1249 	PERF_BPF_EVENT_MAX,		/* non-ABI */
1250 };
1251 
1252 #define PERF_MAX_STACK_DEPTH		127
1253 #define PERF_MAX_CONTEXTS_PER_STACK	  8
1254 
1255 enum perf_callchain_context {
1256 	PERF_CONTEXT_HV			= (__u64)-32,
1257 	PERF_CONTEXT_KERNEL		= (__u64)-128,
1258 	PERF_CONTEXT_USER		= (__u64)-512,
1259 
1260 	PERF_CONTEXT_GUEST		= (__u64)-2048,
1261 	PERF_CONTEXT_GUEST_KERNEL	= (__u64)-2176,
1262 	PERF_CONTEXT_GUEST_USER		= (__u64)-2560,
1263 
1264 	PERF_CONTEXT_MAX		= (__u64)-4095,
1265 };
1266 
1267 /**
1268  * PERF_RECORD_AUX::flags bits
1269  */
1270 #define PERF_AUX_FLAG_TRUNCATED			0x01	/* record was truncated to fit */
1271 #define PERF_AUX_FLAG_OVERWRITE			0x02	/* snapshot from overwrite mode */
1272 #define PERF_AUX_FLAG_PARTIAL			0x04	/* record contains gaps */
1273 #define PERF_AUX_FLAG_COLLISION			0x08	/* sample collided with another */
1274 #define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK	0xff00	/* PMU specific trace format type */
1275 
1276 /* CoreSight PMU AUX buffer formats */
1277 #define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT	0x0000 /* Default for backward compatibility */
1278 #define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW		0x0100 /* Raw format of the source */
1279 
1280 #define PERF_FLAG_FD_NO_GROUP		(1UL << 0)
1281 #define PERF_FLAG_FD_OUTPUT		(1UL << 1)
1282 #define PERF_FLAG_PID_CGROUP		(1UL << 2) /* pid=cgroup id, per-cpu mode only */
1283 #define PERF_FLAG_FD_CLOEXEC		(1UL << 3) /* O_CLOEXEC */
1284 
1285 #if defined(__LITTLE_ENDIAN_BITFIELD)
1286 union perf_mem_data_src {
1287 	__u64 val;
1288 	struct {
1289 		__u64   mem_op:5,	/* type of opcode */
1290 			mem_lvl:14,	/* memory hierarchy level */
1291 			mem_snoop:5,	/* snoop mode */
1292 			mem_lock:2,	/* lock instr */
1293 			mem_dtlb:7,	/* tlb access */
1294 			mem_lvl_num:4,	/* memory hierarchy level number */
1295 			mem_remote:1,   /* remote */
1296 			mem_snoopx:2,	/* snoop mode, ext */
1297 			mem_blk:3,	/* access blocked */
1298 			mem_hops:3,	/* hop level */
1299 			mem_rsvd:18;
1300 	};
1301 };
1302 #elif defined(__BIG_ENDIAN_BITFIELD)
1303 union perf_mem_data_src {
1304 	__u64 val;
1305 	struct {
1306 		__u64	mem_rsvd:18,
1307 			mem_hops:3,	/* hop level */
1308 			mem_blk:3,	/* access blocked */
1309 			mem_snoopx:2,	/* snoop mode, ext */
1310 			mem_remote:1,   /* remote */
1311 			mem_lvl_num:4,	/* memory hierarchy level number */
1312 			mem_dtlb:7,	/* tlb access */
1313 			mem_lock:2,	/* lock instr */
1314 			mem_snoop:5,	/* snoop mode */
1315 			mem_lvl:14,	/* memory hierarchy level */
1316 			mem_op:5;	/* type of opcode */
1317 	};
1318 };
1319 #else
1320 #error "Unknown endianness"
1321 #endif
1322 
1323 /* type of opcode (load/store/prefetch,code) */
1324 #define PERF_MEM_OP_NA		0x01 /* not available */
1325 #define PERF_MEM_OP_LOAD	0x02 /* load instruction */
1326 #define PERF_MEM_OP_STORE	0x04 /* store instruction */
1327 #define PERF_MEM_OP_PFETCH	0x08 /* prefetch */
1328 #define PERF_MEM_OP_EXEC	0x10 /* code (execution) */
1329 #define PERF_MEM_OP_SHIFT	0
1330 
1331 /*
1332  * PERF_MEM_LVL_* namespace being depricated to some extent in the
1333  * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields.
1334  * Supporting this namespace inorder to not break defined ABIs.
1335  *
1336  * memory hierarchy (memory level, hit or miss)
1337  */
1338 #define PERF_MEM_LVL_NA		0x01  /* not available */
1339 #define PERF_MEM_LVL_HIT	0x02  /* hit level */
1340 #define PERF_MEM_LVL_MISS	0x04  /* miss level  */
1341 #define PERF_MEM_LVL_L1		0x08  /* L1 */
1342 #define PERF_MEM_LVL_LFB	0x10  /* Line Fill Buffer */
1343 #define PERF_MEM_LVL_L2		0x20  /* L2 */
1344 #define PERF_MEM_LVL_L3		0x40  /* L3 */
1345 #define PERF_MEM_LVL_LOC_RAM	0x80  /* Local DRAM */
1346 #define PERF_MEM_LVL_REM_RAM1	0x100 /* Remote DRAM (1 hop) */
1347 #define PERF_MEM_LVL_REM_RAM2	0x200 /* Remote DRAM (2 hops) */
1348 #define PERF_MEM_LVL_REM_CCE1	0x400 /* Remote Cache (1 hop) */
1349 #define PERF_MEM_LVL_REM_CCE2	0x800 /* Remote Cache (2 hops) */
1350 #define PERF_MEM_LVL_IO		0x1000 /* I/O memory */
1351 #define PERF_MEM_LVL_UNC	0x2000 /* Uncached memory */
1352 #define PERF_MEM_LVL_SHIFT	5
1353 
1354 #define PERF_MEM_REMOTE_REMOTE	0x01  /* Remote */
1355 #define PERF_MEM_REMOTE_SHIFT	37
1356 
1357 #define PERF_MEM_LVLNUM_L1	0x01 /* L1 */
1358 #define PERF_MEM_LVLNUM_L2	0x02 /* L2 */
1359 #define PERF_MEM_LVLNUM_L3	0x03 /* L3 */
1360 #define PERF_MEM_LVLNUM_L4	0x04 /* L4 */
1361 #define PERF_MEM_LVLNUM_L2_MHB	0x05 /* L2 Miss Handling Buffer */
1362 #define PERF_MEM_LVLNUM_MSC	0x06 /* Memory-side Cache */
1363 /* 0x7 available */
1364 #define PERF_MEM_LVLNUM_UNC	0x08 /* Uncached */
1365 #define PERF_MEM_LVLNUM_CXL	0x09 /* CXL */
1366 #define PERF_MEM_LVLNUM_IO	0x0a /* I/O */
1367 #define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
1368 #define PERF_MEM_LVLNUM_LFB	0x0c /* LFB / L1 Miss Handling Buffer */
1369 #define PERF_MEM_LVLNUM_RAM	0x0d /* RAM */
1370 #define PERF_MEM_LVLNUM_PMEM	0x0e /* PMEM */
1371 #define PERF_MEM_LVLNUM_NA	0x0f /* N/A */
1372 
1373 #define PERF_MEM_LVLNUM_SHIFT	33
1374 
1375 /* snoop mode */
1376 #define PERF_MEM_SNOOP_NA	0x01 /* not available */
1377 #define PERF_MEM_SNOOP_NONE	0x02 /* no snoop */
1378 #define PERF_MEM_SNOOP_HIT	0x04 /* snoop hit */
1379 #define PERF_MEM_SNOOP_MISS	0x08 /* snoop miss */
1380 #define PERF_MEM_SNOOP_HITM	0x10 /* snoop hit modified */
1381 #define PERF_MEM_SNOOP_SHIFT	19
1382 
1383 #define PERF_MEM_SNOOPX_FWD	0x01 /* forward */
1384 #define PERF_MEM_SNOOPX_PEER	0x02 /* xfer from peer */
1385 #define PERF_MEM_SNOOPX_SHIFT  38
1386 
1387 /* locked instruction */
1388 #define PERF_MEM_LOCK_NA	0x01 /* not available */
1389 #define PERF_MEM_LOCK_LOCKED	0x02 /* locked transaction */
1390 #define PERF_MEM_LOCK_SHIFT	24
1391 
1392 /* TLB access */
1393 #define PERF_MEM_TLB_NA		0x01 /* not available */
1394 #define PERF_MEM_TLB_HIT	0x02 /* hit level */
1395 #define PERF_MEM_TLB_MISS	0x04 /* miss level */
1396 #define PERF_MEM_TLB_L1		0x08 /* L1 */
1397 #define PERF_MEM_TLB_L2		0x10 /* L2 */
1398 #define PERF_MEM_TLB_WK		0x20 /* Hardware Walker*/
1399 #define PERF_MEM_TLB_OS		0x40 /* OS fault handler */
1400 #define PERF_MEM_TLB_SHIFT	26
1401 
1402 /* Access blocked */
1403 #define PERF_MEM_BLK_NA		0x01 /* not available */
1404 #define PERF_MEM_BLK_DATA	0x02 /* data could not be forwarded */
1405 #define PERF_MEM_BLK_ADDR	0x04 /* address conflict */
1406 #define PERF_MEM_BLK_SHIFT	40
1407 
1408 /* hop level */
1409 #define PERF_MEM_HOPS_0		0x01 /* remote core, same node */
1410 #define PERF_MEM_HOPS_1		0x02 /* remote node, same socket */
1411 #define PERF_MEM_HOPS_2		0x03 /* remote socket, same board */
1412 #define PERF_MEM_HOPS_3		0x04 /* remote board */
1413 /* 5-7 available */
1414 #define PERF_MEM_HOPS_SHIFT	43
1415 
1416 #define PERF_MEM_S(a, s) \
1417 	(((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
1418 
1419 /*
1420  * single taken branch record layout:
1421  *
1422  *      from: source instruction (may not always be a branch insn)
1423  *        to: branch target
1424  *   mispred: branch target was mispredicted
1425  * predicted: branch target was predicted
1426  *
1427  * support for mispred, predicted is optional. In case it
1428  * is not supported mispred = predicted = 0.
1429  *
1430  *     in_tx: running in a hardware transaction
1431  *     abort: aborting a hardware transaction
1432  *    cycles: cycles from last branch (or 0 if not supported)
1433  *      type: branch type
1434  *      spec: branch speculation info (or 0 if not supported)
1435  */
1436 struct perf_branch_entry {
1437 	__u64	from;
1438 	__u64	to;
1439 	__u64	mispred:1,  /* target mispredicted */
1440 		predicted:1,/* target predicted */
1441 		in_tx:1,    /* in transaction */
1442 		abort:1,    /* transaction abort */
1443 		cycles:16,  /* cycle count to last branch */
1444 		type:4,     /* branch type */
1445 		spec:2,     /* branch speculation info */
1446 		new_type:4, /* additional branch type */
1447 		priv:3,     /* privilege level */
1448 		reserved:31;
1449 };
1450 
1451 /* Size of used info bits in struct perf_branch_entry */
1452 #define PERF_BRANCH_ENTRY_INFO_BITS_MAX		33
1453 
1454 union perf_sample_weight {
1455 	__u64		full;
1456 #if defined(__LITTLE_ENDIAN_BITFIELD)
1457 	struct {
1458 		__u32	var1_dw;
1459 		__u16	var2_w;
1460 		__u16	var3_w;
1461 	};
1462 #elif defined(__BIG_ENDIAN_BITFIELD)
1463 	struct {
1464 		__u16	var3_w;
1465 		__u16	var2_w;
1466 		__u32	var1_dw;
1467 	};
1468 #else
1469 #error "Unknown endianness"
1470 #endif
1471 };
1472 
1473 #endif /* _UAPI_LINUX_PERF_EVENT_H */
1474