1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* 3 * Performance events: 4 * 5 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 6 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar 7 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra 8 * 9 * Data type definitions, declarations, prototypes. 10 * 11 * Started by: Thomas Gleixner and Ingo Molnar 12 * 13 * For licencing details see kernel-base/COPYING 14 */ 15 #ifndef _UAPI_LINUX_PERF_EVENT_H 16 #define _UAPI_LINUX_PERF_EVENT_H 17 18 #include <linux/types.h> 19 #include <linux/ioctl.h> 20 #include <asm/byteorder.h> 21 22 /* 23 * User-space ABI bits: 24 */ 25 26 /* 27 * attr.type 28 */ 29 enum perf_type_id { 30 PERF_TYPE_HARDWARE = 0, 31 PERF_TYPE_SOFTWARE = 1, 32 PERF_TYPE_TRACEPOINT = 2, 33 PERF_TYPE_HW_CACHE = 3, 34 PERF_TYPE_RAW = 4, 35 PERF_TYPE_BREAKPOINT = 5, 36 37 PERF_TYPE_MAX, /* non-ABI */ 38 }; 39 40 /* 41 * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE 42 * 43 * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA 44 * AA: hardware event ID 45 * EEEEEEEE: PMU type ID 46 * 47 * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB 48 * BB: hardware cache ID 49 * CC: hardware cache op ID 50 * DD: hardware cache op result ID 51 * EEEEEEEE: PMU type ID 52 * 53 * If the PMU type ID is 0, PERF_TYPE_RAW will be applied. 54 */ 55 #define PERF_PMU_TYPE_SHIFT 32 56 #define PERF_HW_EVENT_MASK 0xffffffff 57 58 /* 59 * Generalized performance event event_id types, used by the 60 * attr.event_id parameter of the sys_perf_event_open() 61 * syscall: 62 */ 63 enum perf_hw_id { 64 /* 65 * Common hardware events, generalized by the kernel: 66 */ 67 PERF_COUNT_HW_CPU_CYCLES = 0, 68 PERF_COUNT_HW_INSTRUCTIONS = 1, 69 PERF_COUNT_HW_CACHE_REFERENCES = 2, 70 PERF_COUNT_HW_CACHE_MISSES = 3, 71 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, 72 PERF_COUNT_HW_BRANCH_MISSES = 5, 73 PERF_COUNT_HW_BUS_CYCLES = 6, 74 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, 75 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, 76 PERF_COUNT_HW_REF_CPU_CYCLES = 9, 77 78 PERF_COUNT_HW_MAX, /* non-ABI */ 79 }; 80 81 /* 82 * Generalized hardware cache events: 83 * 84 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x 85 * { read, write, prefetch } x 86 * { accesses, misses } 87 */ 88 enum perf_hw_cache_id { 89 PERF_COUNT_HW_CACHE_L1D = 0, 90 PERF_COUNT_HW_CACHE_L1I = 1, 91 PERF_COUNT_HW_CACHE_LL = 2, 92 PERF_COUNT_HW_CACHE_DTLB = 3, 93 PERF_COUNT_HW_CACHE_ITLB = 4, 94 PERF_COUNT_HW_CACHE_BPU = 5, 95 PERF_COUNT_HW_CACHE_NODE = 6, 96 97 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ 98 }; 99 100 enum perf_hw_cache_op_id { 101 PERF_COUNT_HW_CACHE_OP_READ = 0, 102 PERF_COUNT_HW_CACHE_OP_WRITE = 1, 103 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, 104 105 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ 106 }; 107 108 enum perf_hw_cache_op_result_id { 109 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, 110 PERF_COUNT_HW_CACHE_RESULT_MISS = 1, 111 112 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ 113 }; 114 115 /* 116 * Special "software" events provided by the kernel, even if the hardware 117 * does not support performance events. These events measure various 118 * physical and SW events of the kernel (and allow the profiling of them as 119 * well): 120 */ 121 enum perf_sw_ids { 122 PERF_COUNT_SW_CPU_CLOCK = 0, 123 PERF_COUNT_SW_TASK_CLOCK = 1, 124 PERF_COUNT_SW_PAGE_FAULTS = 2, 125 PERF_COUNT_SW_CONTEXT_SWITCHES = 3, 126 PERF_COUNT_SW_CPU_MIGRATIONS = 4, 127 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, 128 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, 129 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, 130 PERF_COUNT_SW_EMULATION_FAULTS = 8, 131 PERF_COUNT_SW_DUMMY = 9, 132 PERF_COUNT_SW_BPF_OUTPUT = 10, 133 PERF_COUNT_SW_CGROUP_SWITCHES = 11, 134 135 PERF_COUNT_SW_MAX, /* non-ABI */ 136 }; 137 138 /* 139 * Bits that can be set in attr.sample_type to request information 140 * in the overflow packets. 141 */ 142 enum perf_event_sample_format { 143 PERF_SAMPLE_IP = 1U << 0, 144 PERF_SAMPLE_TID = 1U << 1, 145 PERF_SAMPLE_TIME = 1U << 2, 146 PERF_SAMPLE_ADDR = 1U << 3, 147 PERF_SAMPLE_READ = 1U << 4, 148 PERF_SAMPLE_CALLCHAIN = 1U << 5, 149 PERF_SAMPLE_ID = 1U << 6, 150 PERF_SAMPLE_CPU = 1U << 7, 151 PERF_SAMPLE_PERIOD = 1U << 8, 152 PERF_SAMPLE_STREAM_ID = 1U << 9, 153 PERF_SAMPLE_RAW = 1U << 10, 154 PERF_SAMPLE_BRANCH_STACK = 1U << 11, 155 PERF_SAMPLE_REGS_USER = 1U << 12, 156 PERF_SAMPLE_STACK_USER = 1U << 13, 157 PERF_SAMPLE_WEIGHT = 1U << 14, 158 PERF_SAMPLE_DATA_SRC = 1U << 15, 159 PERF_SAMPLE_IDENTIFIER = 1U << 16, 160 PERF_SAMPLE_TRANSACTION = 1U << 17, 161 PERF_SAMPLE_REGS_INTR = 1U << 18, 162 PERF_SAMPLE_PHYS_ADDR = 1U << 19, 163 PERF_SAMPLE_AUX = 1U << 20, 164 PERF_SAMPLE_CGROUP = 1U << 21, 165 PERF_SAMPLE_DATA_PAGE_SIZE = 1U << 22, 166 PERF_SAMPLE_CODE_PAGE_SIZE = 1U << 23, 167 PERF_SAMPLE_WEIGHT_STRUCT = 1U << 24, 168 169 PERF_SAMPLE_MAX = 1U << 25, /* non-ABI */ 170 }; 171 172 #define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT) 173 174 /* 175 * Values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set. 176 * 177 * If the user does not pass priv level information via branch_sample_type, 178 * the kernel uses the event's priv level. Branch and event priv levels do 179 * not have to match. Branch priv level is checked for permissions. 180 * 181 * The branch types can be combined, however BRANCH_ANY covers all types 182 * of branches and therefore it supersedes all the other types. 183 */ 184 enum perf_branch_sample_type_shift { 185 PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */ 186 PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */ 187 PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */ 188 189 PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */ 190 PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */ 191 PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */ 192 PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */ 193 PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */ 194 PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */ 195 PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ 196 PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */ 197 198 PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* CALL/RET stack */ 199 PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */ 200 PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */ 201 202 PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */ 203 PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */ 204 205 PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */ 206 207 PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, /* save low level index of raw branch records */ 208 209 PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */ 210 211 PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */ 212 213 PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ 214 }; 215 216 enum perf_branch_sample_type { 217 PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, 218 PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, 219 PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, 220 221 PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, 222 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, 223 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, 224 PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, 225 PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, 226 PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, 227 PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, 228 PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, 229 230 PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, 231 PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, 232 PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, 233 234 PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, 235 PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, 236 237 PERF_SAMPLE_BRANCH_TYPE_SAVE = 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, 238 239 PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT, 240 241 PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, 242 243 PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, 244 245 PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, 246 }; 247 248 /* 249 * Common control flow change classifications: 250 */ 251 enum { 252 PERF_BR_UNKNOWN = 0, /* Unknown */ 253 PERF_BR_COND = 1, /* Conditional */ 254 PERF_BR_UNCOND = 2, /* Unconditional */ 255 PERF_BR_IND = 3, /* Indirect */ 256 PERF_BR_CALL = 4, /* Function call */ 257 PERF_BR_IND_CALL = 5, /* Indirect function call */ 258 PERF_BR_RET = 6, /* Function return */ 259 PERF_BR_SYSCALL = 7, /* Syscall */ 260 PERF_BR_SYSRET = 8, /* Syscall return */ 261 PERF_BR_COND_CALL = 9, /* Conditional function call */ 262 PERF_BR_COND_RET = 10, /* Conditional function return */ 263 PERF_BR_ERET = 11, /* Exception return */ 264 PERF_BR_IRQ = 12, /* IRQ */ 265 PERF_BR_SERROR = 13, /* System error */ 266 PERF_BR_NO_TX = 14, /* Not in transaction */ 267 PERF_BR_EXTEND_ABI = 15, /* Extend ABI */ 268 PERF_BR_MAX, 269 }; 270 271 /* 272 * Common branch speculation outcome classifications: 273 */ 274 enum { 275 PERF_BR_SPEC_NA = 0, /* Not available */ 276 PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */ 277 PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */ 278 PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */ 279 PERF_BR_SPEC_MAX, 280 }; 281 282 enum { 283 PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */ 284 PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */ 285 PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */ 286 PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */ 287 PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */ 288 PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */ 289 PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */ 290 PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */ 291 PERF_BR_NEW_MAX, 292 }; 293 294 enum { 295 PERF_BR_PRIV_UNKNOWN = 0, 296 PERF_BR_PRIV_USER = 1, 297 PERF_BR_PRIV_KERNEL = 2, 298 PERF_BR_PRIV_HV = 3, 299 }; 300 301 #define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1 302 #define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2 303 #define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3 304 #define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4 305 #define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5 306 307 #define PERF_SAMPLE_BRANCH_PLM_ALL \ 308 (PERF_SAMPLE_BRANCH_USER|\ 309 PERF_SAMPLE_BRANCH_KERNEL|\ 310 PERF_SAMPLE_BRANCH_HV) 311 312 /* 313 * Values to determine ABI of the registers dump. 314 */ 315 enum perf_sample_regs_abi { 316 PERF_SAMPLE_REGS_ABI_NONE = 0, 317 PERF_SAMPLE_REGS_ABI_32 = 1, 318 PERF_SAMPLE_REGS_ABI_64 = 2, 319 }; 320 321 /* 322 * Values for the memory transaction event qualifier, mostly for 323 * abort events. Multiple bits can be set. 324 */ 325 enum { 326 PERF_TXN_ELISION = (1 << 0), /* From elision */ 327 PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ 328 PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ 329 PERF_TXN_ASYNC = (1 << 3), /* Instruction is not related */ 330 PERF_TXN_RETRY = (1 << 4), /* Retry possible */ 331 PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ 332 PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ 333 PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ 334 335 PERF_TXN_MAX = (1 << 8), /* non-ABI */ 336 337 /* Bits 32..63 are reserved for the abort code */ 338 339 PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), 340 PERF_TXN_ABORT_SHIFT = 32, 341 }; 342 343 /* 344 * The format of the data returned by read() on a perf event fd, 345 * as specified by attr.read_format: 346 * 347 * struct read_format { 348 * { u64 value; 349 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 350 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 351 * { u64 id; } && PERF_FORMAT_ID 352 * { u64 lost; } && PERF_FORMAT_LOST 353 * } && !PERF_FORMAT_GROUP 354 * 355 * { u64 nr; 356 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 357 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 358 * { u64 value; 359 * { u64 id; } && PERF_FORMAT_ID 360 * { u64 lost; } && PERF_FORMAT_LOST 361 * } cntr[nr]; 362 * } && PERF_FORMAT_GROUP 363 * }; 364 */ 365 enum perf_event_read_format { 366 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 367 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 368 PERF_FORMAT_ID = 1U << 2, 369 PERF_FORMAT_GROUP = 1U << 3, 370 PERF_FORMAT_LOST = 1U << 4, 371 372 PERF_FORMAT_MAX = 1U << 5, /* non-ABI */ 373 }; 374 375 #define PERF_ATTR_SIZE_VER0 64 /* Size of first published 'struct perf_event_attr' */ 376 #define PERF_ATTR_SIZE_VER1 72 /* Add: config2 */ 377 #define PERF_ATTR_SIZE_VER2 80 /* Add: branch_sample_type */ 378 #define PERF_ATTR_SIZE_VER3 96 /* Add: sample_regs_user */ 379 /* Add: sample_stack_user */ 380 #define PERF_ATTR_SIZE_VER4 104 /* Add: sample_regs_intr */ 381 #define PERF_ATTR_SIZE_VER5 112 /* Add: aux_watermark */ 382 #define PERF_ATTR_SIZE_VER6 120 /* Add: aux_sample_size */ 383 #define PERF_ATTR_SIZE_VER7 128 /* Add: sig_data */ 384 #define PERF_ATTR_SIZE_VER8 136 /* Add: config3 */ 385 #define PERF_ATTR_SIZE_VER9 144 /* add: config4 */ 386 387 /* 388 * 'struct perf_event_attr' contains various attributes that define 389 * a performance event - most of them hardware related configuration 390 * details, but also a lot of behavioral switches and values implemented 391 * by the kernel. 392 */ 393 struct perf_event_attr { 394 395 /* 396 * Major type: hardware/software/tracepoint/etc. 397 */ 398 __u32 type; 399 400 /* 401 * Size of the attr structure, for forward/backwards compatibility. 402 */ 403 __u32 size; 404 405 /* 406 * Type specific configuration information. 407 */ 408 __u64 config; 409 410 union { 411 __u64 sample_period; 412 __u64 sample_freq; 413 }; 414 415 __u64 sample_type; 416 __u64 read_format; 417 418 __u64 disabled : 1, /* off by default */ 419 inherit : 1, /* children inherit it */ 420 pinned : 1, /* must always be on PMU */ 421 exclusive : 1, /* only group on PMU */ 422 exclude_user : 1, /* don't count user */ 423 exclude_kernel : 1, /* ditto kernel */ 424 exclude_hv : 1, /* ditto hypervisor */ 425 exclude_idle : 1, /* don't count when idle */ 426 mmap : 1, /* include mmap data */ 427 comm : 1, /* include comm data */ 428 freq : 1, /* use freq, not period */ 429 inherit_stat : 1, /* per task counts */ 430 enable_on_exec : 1, /* next exec enables */ 431 task : 1, /* trace fork/exit */ 432 watermark : 1, /* wakeup_watermark */ 433 /* 434 * precise_ip: 435 * 436 * 0 - SAMPLE_IP can have arbitrary skid 437 * 1 - SAMPLE_IP must have constant skid 438 * 2 - SAMPLE_IP requested to have 0 skid 439 * 3 - SAMPLE_IP must have 0 skid 440 * 441 * See also PERF_RECORD_MISC_EXACT_IP 442 */ 443 precise_ip : 2, /* skid constraint */ 444 mmap_data : 1, /* non-exec mmap data */ 445 sample_id_all : 1, /* sample_type all events */ 446 447 exclude_host : 1, /* don't count in host */ 448 exclude_guest : 1, /* don't count in guest */ 449 450 exclude_callchain_kernel : 1, /* exclude kernel callchains */ 451 exclude_callchain_user : 1, /* exclude user callchains */ 452 mmap2 : 1, /* include mmap with inode data */ 453 comm_exec : 1, /* flag comm events that are due to an exec */ 454 use_clockid : 1, /* use @clockid for time fields */ 455 context_switch : 1, /* context switch data */ 456 write_backward : 1, /* write ring buffer from end to beginning */ 457 namespaces : 1, /* include namespaces data */ 458 ksymbol : 1, /* include ksymbol events */ 459 bpf_event : 1, /* include BPF events */ 460 aux_output : 1, /* generate AUX records instead of events */ 461 cgroup : 1, /* include cgroup events */ 462 text_poke : 1, /* include text poke events */ 463 build_id : 1, /* use build ID in mmap2 events */ 464 inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */ 465 remove_on_exec : 1, /* event is removed from task on exec */ 466 sigtrap : 1, /* send synchronous SIGTRAP on event */ 467 defer_callchain: 1, /* request PERF_RECORD_CALLCHAIN_DEFERRED records */ 468 defer_output : 1, /* output PERF_RECORD_CALLCHAIN_DEFERRED records */ 469 __reserved_1 : 24; 470 471 union { 472 __u32 wakeup_events; /* wake up every n events */ 473 __u32 wakeup_watermark; /* bytes before wakeup */ 474 }; 475 476 __u32 bp_type; 477 union { 478 __u64 bp_addr; 479 __u64 kprobe_func; /* for perf_kprobe */ 480 __u64 uprobe_path; /* for perf_uprobe */ 481 __u64 config1; /* extension of config */ 482 }; 483 union { 484 __u64 bp_len; 485 __u64 kprobe_addr; /* when kprobe_func == NULL */ 486 __u64 probe_offset; /* for perf_[k,u]probe */ 487 __u64 config2; /* extension of config1 */ 488 }; 489 __u64 branch_sample_type; /* enum perf_branch_sample_type */ 490 491 /* 492 * Defines set of user regs to dump on samples. 493 * See asm/perf_regs.h for details. 494 */ 495 __u64 sample_regs_user; 496 497 /* 498 * Defines size of the user stack to dump on samples. 499 */ 500 __u32 sample_stack_user; 501 502 __s32 clockid; 503 /* 504 * Defines set of regs to dump for each sample 505 * state captured on: 506 * - precise = 0: PMU interrupt 507 * - precise > 0: sampled instruction 508 * 509 * See asm/perf_regs.h for details. 510 */ 511 __u64 sample_regs_intr; 512 513 /* 514 * Wakeup watermark for AUX area 515 */ 516 __u32 aux_watermark; 517 518 /* 519 * Max number of frame pointers in a callchain, should be 520 * lower than /proc/sys/kernel/perf_event_max_stack. 521 * 522 * Max number of entries of branch stack should be lower 523 * than the hardware limit. 524 */ 525 __u16 sample_max_stack; 526 527 __u16 __reserved_2; 528 __u32 aux_sample_size; 529 530 union { 531 __u32 aux_action; 532 struct { 533 __u32 aux_start_paused : 1, /* start AUX area tracing paused */ 534 aux_pause : 1, /* on overflow, pause AUX area tracing */ 535 aux_resume : 1, /* on overflow, resume AUX area tracing */ 536 __reserved_3 : 29; 537 }; 538 }; 539 540 /* 541 * User provided data if sigtrap=1, passed back to user via 542 * siginfo_t::si_perf_data, e.g. to permit user to identify the event. 543 * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be 544 * truncated accordingly on 32 bit architectures. 545 */ 546 __u64 sig_data; 547 548 __u64 config3; /* extension of config2 */ 549 __u64 config4; /* extension of config3 */ 550 }; 551 552 /* 553 * Structure used by below PERF_EVENT_IOC_QUERY_BPF command 554 * to query BPF programs attached to the same perf tracepoint 555 * as the given perf event. 556 */ 557 struct perf_event_query_bpf { 558 /* 559 * The below ids array length 560 */ 561 __u32 ids_len; 562 /* 563 * Set by the kernel to indicate the number of 564 * available programs 565 */ 566 __u32 prog_cnt; 567 /* 568 * User provided buffer to store program ids 569 */ 570 __u32 ids[]; 571 }; 572 573 /* 574 * Ioctls that can be done on a perf event fd: 575 */ 576 #define PERF_EVENT_IOC_ENABLE _IO ('$', 0) 577 #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) 578 #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) 579 #define PERF_EVENT_IOC_RESET _IO ('$', 3) 580 #define PERF_EVENT_IOC_PERIOD _IOW ('$', 4, __u64) 581 #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) 582 #define PERF_EVENT_IOC_SET_FILTER _IOW ('$', 6, char *) 583 #define PERF_EVENT_IOC_ID _IOR ('$', 7, __u64 *) 584 #define PERF_EVENT_IOC_SET_BPF _IOW ('$', 8, __u32) 585 #define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW ('$', 9, __u32) 586 #define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *) 587 #define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW ('$', 11, struct perf_event_attr *) 588 589 enum perf_event_ioc_flags { 590 PERF_IOC_FLAG_GROUP = 1U << 0, 591 }; 592 593 /* 594 * Structure of the page that can be mapped via mmap 595 */ 596 struct perf_event_mmap_page { 597 __u32 version; /* version number of this structure */ 598 __u32 compat_version; /* lowest version this is compat with */ 599 600 /* 601 * Bits needed to read the HW events in user-space. 602 * 603 * u32 seq, time_mult, time_shift, index, width; 604 * u64 count, enabled, running; 605 * u64 cyc, time_offset; 606 * s64 pmc = 0; 607 * 608 * do { 609 * seq = pc->lock; 610 * barrier() 611 * 612 * enabled = pc->time_enabled; 613 * running = pc->time_running; 614 * 615 * if (pc->cap_usr_time && enabled != running) { 616 * cyc = rdtsc(); 617 * time_offset = pc->time_offset; 618 * time_mult = pc->time_mult; 619 * time_shift = pc->time_shift; 620 * } 621 * 622 * index = pc->index; 623 * count = pc->offset; 624 * if (pc->cap_user_rdpmc && index) { 625 * width = pc->pmc_width; 626 * pmc = rdpmc(index - 1); 627 * } 628 * 629 * barrier(); 630 * } while (pc->lock != seq); 631 * 632 * NOTE: for obvious reason this only works on self-monitoring 633 * processes. 634 */ 635 __u32 lock; /* seqlock for synchronization */ 636 __u32 index; /* hardware event identifier */ 637 __s64 offset; /* add to hardware event value */ 638 __u64 time_enabled; /* time event active */ 639 __u64 time_running; /* time event on CPU */ 640 union { 641 __u64 capabilities; 642 struct { 643 __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */ 644 cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */ 645 646 cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */ 647 cap_user_time : 1, /* The time_{shift,mult,offset} fields are used */ 648 cap_user_time_zero : 1, /* The time_zero field is used */ 649 cap_user_time_short : 1, /* the time_{cycle,mask} fields are used */ 650 cap_____res : 58; 651 }; 652 }; 653 654 /* 655 * If cap_user_rdpmc this field provides the bit-width of the value 656 * read using the rdpmc() or equivalent instruction. This can be used 657 * to sign extend the result like: 658 * 659 * pmc <<= 64 - width; 660 * pmc >>= 64 - width; // signed shift right 661 * count += pmc; 662 */ 663 __u16 pmc_width; 664 665 /* 666 * If cap_usr_time the below fields can be used to compute the time 667 * delta since time_enabled (in ns) using RDTSC or similar. 668 * 669 * u64 quot, rem; 670 * u64 delta; 671 * 672 * quot = (cyc >> time_shift); 673 * rem = cyc & (((u64)1 << time_shift) - 1); 674 * delta = time_offset + quot * time_mult + 675 * ((rem * time_mult) >> time_shift); 676 * 677 * Where time_offset,time_mult,time_shift and cyc are read in the 678 * seqcount loop described above. This delta can then be added to 679 * enabled and possible running (if index), improving the scaling: 680 * 681 * enabled += delta; 682 * if (index) 683 * running += delta; 684 * 685 * quot = count / running; 686 * rem = count % running; 687 * count = quot * enabled + (rem * enabled) / running; 688 */ 689 __u16 time_shift; 690 __u32 time_mult; 691 __u64 time_offset; 692 /* 693 * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated 694 * from sample timestamps. 695 * 696 * time = timestamp - time_zero; 697 * quot = time / time_mult; 698 * rem = time % time_mult; 699 * cyc = (quot << time_shift) + (rem << time_shift) / time_mult; 700 * 701 * And vice versa: 702 * 703 * quot = cyc >> time_shift; 704 * rem = cyc & (((u64)1 << time_shift) - 1); 705 * timestamp = time_zero + quot * time_mult + 706 * ((rem * time_mult) >> time_shift); 707 */ 708 __u64 time_zero; 709 710 __u32 size; /* Header size up to __reserved[] fields. */ 711 __u32 __reserved_1; 712 713 /* 714 * If cap_usr_time_short, the hardware clock is less than 64bit wide 715 * and we must compute the 'cyc' value, as used by cap_usr_time, as: 716 * 717 * cyc = time_cycles + ((cyc - time_cycles) & time_mask) 718 * 719 * NOTE: this form is explicitly chosen such that cap_usr_time_short 720 * is a correction on top of cap_usr_time, and code that doesn't 721 * know about cap_usr_time_short still works under the assumption 722 * the counter doesn't wrap. 723 */ 724 __u64 time_cycles; 725 __u64 time_mask; 726 727 /* 728 * Hole for extension of the self monitor capabilities 729 */ 730 731 __u8 __reserved[116*8]; /* align to 1k. */ 732 733 /* 734 * Control data for the mmap() data buffer. 735 * 736 * User-space reading the @data_head value should issue an smp_rmb(), 737 * after reading this value. 738 * 739 * When the mapping is PROT_WRITE the @data_tail value should be 740 * written by user-space to reflect the last read data, after issuing 741 * an smp_mb() to separate the data read from the ->data_tail store. 742 * In this case the kernel will not over-write unread data. 743 * 744 * See perf_output_put_handle() for the data ordering. 745 * 746 * data_{offset,size} indicate the location and size of the perf record 747 * buffer within the mmapped area. 748 */ 749 __u64 data_head; /* head in the data section */ 750 __u64 data_tail; /* user-space written tail */ 751 __u64 data_offset; /* where the buffer starts */ 752 __u64 data_size; /* data buffer size */ 753 754 /* 755 * AUX area is defined by aux_{offset,size} fields that should be set 756 * by the user-space, so that 757 * 758 * aux_offset >= data_offset + data_size 759 * 760 * prior to mmap()ing it. Size of the mmap()ed area should be aux_size. 761 * 762 * Ring buffer pointers aux_{head,tail} have the same semantics as 763 * data_{head,tail} and same ordering rules apply. 764 */ 765 __u64 aux_head; 766 __u64 aux_tail; 767 __u64 aux_offset; 768 __u64 aux_size; 769 }; 770 771 /* 772 * The current state of perf_event_header::misc bits usage: 773 * ('|' used bit, '-' unused bit) 774 * 775 * 012 CDEF 776 * |||---------|||| 777 * 778 * Where: 779 * 0-2 CPUMODE_MASK 780 * 781 * C PROC_MAP_PARSE_TIMEOUT 782 * D MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT 783 * E MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT 784 * F (reserved) 785 */ 786 787 #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0) 788 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) 789 #define PERF_RECORD_MISC_KERNEL (1 << 0) 790 #define PERF_RECORD_MISC_USER (2 << 0) 791 #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) 792 #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0) 793 #define PERF_RECORD_MISC_GUEST_USER (5 << 0) 794 795 /* 796 * Indicates that /proc/PID/maps parsing are truncated by time out. 797 */ 798 #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12) 799 /* 800 * Following PERF_RECORD_MISC_* are used on different 801 * events, so can reuse the same bit position: 802 * 803 * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events 804 * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event 805 * PERF_RECORD_MISC_FORK_EXEC - PERF_RECORD_FORK event (perf internal) 806 * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events 807 */ 808 #define PERF_RECORD_MISC_MMAP_DATA (1 << 13) 809 #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) 810 #define PERF_RECORD_MISC_FORK_EXEC (1 << 13) 811 #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) 812 /* 813 * These PERF_RECORD_MISC_* flags below are safely reused 814 * for the following events: 815 * 816 * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events 817 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events 818 * PERF_RECORD_MISC_MMAP_BUILD_ID - PERF_RECORD_MMAP2 event 819 * 820 * 821 * PERF_RECORD_MISC_EXACT_IP: 822 * Indicates that the content of PERF_SAMPLE_IP points to 823 * the actual instruction that triggered the event. See also 824 * perf_event_attr::precise_ip. 825 * 826 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: 827 * Indicates that thread was preempted in TASK_RUNNING state. 828 * 829 * PERF_RECORD_MISC_MMAP_BUILD_ID: 830 * Indicates that mmap2 event carries build ID data. 831 */ 832 #define PERF_RECORD_MISC_EXACT_IP (1 << 14) 833 #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) 834 #define PERF_RECORD_MISC_MMAP_BUILD_ID (1 << 14) 835 /* 836 * Reserve the last bit to indicate some extended misc field 837 */ 838 #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) 839 840 struct perf_event_header { 841 __u32 type; 842 __u16 misc; 843 __u16 size; 844 }; 845 846 struct perf_ns_link_info { 847 __u64 dev; 848 __u64 ino; 849 }; 850 851 enum { 852 NET_NS_INDEX = 0, 853 UTS_NS_INDEX = 1, 854 IPC_NS_INDEX = 2, 855 PID_NS_INDEX = 3, 856 USER_NS_INDEX = 4, 857 MNT_NS_INDEX = 5, 858 CGROUP_NS_INDEX = 6, 859 860 NR_NAMESPACES, /* number of available namespaces */ 861 }; 862 863 enum perf_event_type { 864 865 /* 866 * If perf_event_attr.sample_id_all is set then all event types will 867 * have the sample_type selected fields related to where/when 868 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU, 869 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed 870 * just after the perf_event_header and the fields already present for 871 * the existing fields, i.e. at the end of the payload. That way a newer 872 * perf.data file will be supported by older perf tools, with these new 873 * optional fields being ignored. 874 * 875 * struct sample_id { 876 * { u32 pid, tid; } && PERF_SAMPLE_TID 877 * { u64 time; } && PERF_SAMPLE_TIME 878 * { u64 id; } && PERF_SAMPLE_ID 879 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 880 * { u32 cpu, res; } && PERF_SAMPLE_CPU 881 * { u64 id; } && PERF_SAMPLE_IDENTIFIER 882 * } && perf_event_attr::sample_id_all 883 * 884 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The 885 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed 886 * relative to header.size. 887 */ 888 889 /* 890 * The MMAP events record the PROT_EXEC mappings so that we can 891 * correlate user-space IPs to code. They have the following structure: 892 * 893 * struct { 894 * struct perf_event_header header; 895 * 896 * u32 pid, tid; 897 * u64 addr; 898 * u64 len; 899 * u64 pgoff; 900 * char filename[]; 901 * struct sample_id sample_id; 902 * }; 903 */ 904 PERF_RECORD_MMAP = 1, 905 906 /* 907 * struct { 908 * struct perf_event_header header; 909 * u64 id; 910 * u64 lost; 911 * struct sample_id sample_id; 912 * }; 913 */ 914 PERF_RECORD_LOST = 2, 915 916 /* 917 * struct { 918 * struct perf_event_header header; 919 * 920 * u32 pid, tid; 921 * char comm[]; 922 * struct sample_id sample_id; 923 * }; 924 */ 925 PERF_RECORD_COMM = 3, 926 927 /* 928 * struct { 929 * struct perf_event_header header; 930 * u32 pid, ppid; 931 * u32 tid, ptid; 932 * u64 time; 933 * struct sample_id sample_id; 934 * }; 935 */ 936 PERF_RECORD_EXIT = 4, 937 938 /* 939 * struct { 940 * struct perf_event_header header; 941 * u64 time; 942 * u64 id; 943 * u64 stream_id; 944 * struct sample_id sample_id; 945 * }; 946 */ 947 PERF_RECORD_THROTTLE = 5, 948 PERF_RECORD_UNTHROTTLE = 6, 949 950 /* 951 * struct { 952 * struct perf_event_header header; 953 * u32 pid, ppid; 954 * u32 tid, ptid; 955 * u64 time; 956 * struct sample_id sample_id; 957 * }; 958 */ 959 PERF_RECORD_FORK = 7, 960 961 /* 962 * struct { 963 * struct perf_event_header header; 964 * u32 pid, tid; 965 * 966 * struct read_format values; 967 * struct sample_id sample_id; 968 * }; 969 */ 970 PERF_RECORD_READ = 8, 971 972 /* 973 * struct { 974 * struct perf_event_header header; 975 * 976 * # 977 * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. 978 * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position 979 * # is fixed relative to header. 980 * # 981 * 982 * { u64 id; } && PERF_SAMPLE_IDENTIFIER 983 * { u64 ip; } && PERF_SAMPLE_IP 984 * { u32 pid, tid; } && PERF_SAMPLE_TID 985 * { u64 time; } && PERF_SAMPLE_TIME 986 * { u64 addr; } && PERF_SAMPLE_ADDR 987 * { u64 id; } && PERF_SAMPLE_ID 988 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 989 * { u32 cpu, res; } && PERF_SAMPLE_CPU 990 * { u64 period; } && PERF_SAMPLE_PERIOD 991 * 992 * { struct read_format values; } && PERF_SAMPLE_READ 993 * 994 * { u64 nr, 995 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 996 * 997 * # 998 * # The RAW record below is opaque data wrt the ABI 999 * # 1000 * # That is, the ABI doesn't make any promises wrt to 1001 * # the stability of its content, it may vary depending 1002 * # on event, hardware, kernel version and phase of 1003 * # the moon. 1004 * # 1005 * # In other words, PERF_SAMPLE_RAW contents are not an ABI. 1006 * # 1007 * 1008 * { u32 size; 1009 * char data[size];}&& PERF_SAMPLE_RAW 1010 * 1011 * { u64 nr; 1012 * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX 1013 * { u64 from, to, flags } lbr[nr]; 1014 * # 1015 * # The format of the counters is decided by the 1016 * # "branch_counter_nr" and "branch_counter_width", 1017 * # which are defined in the ABI. 1018 * # 1019 * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS 1020 * } && PERF_SAMPLE_BRANCH_STACK 1021 * 1022 * { u64 abi; # enum perf_sample_regs_abi 1023 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER 1024 * 1025 * { u64 size; 1026 * char data[size]; 1027 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER 1028 * 1029 * { union perf_sample_weight 1030 * { 1031 * u64 full; && PERF_SAMPLE_WEIGHT 1032 * #if defined(__LITTLE_ENDIAN_BITFIELD) 1033 * struct { 1034 * u32 var1_dw; 1035 * u16 var2_w; 1036 * u16 var3_w; 1037 * } && PERF_SAMPLE_WEIGHT_STRUCT 1038 * #elif defined(__BIG_ENDIAN_BITFIELD) 1039 * struct { 1040 * u16 var3_w; 1041 * u16 var2_w; 1042 * u32 var1_dw; 1043 * } && PERF_SAMPLE_WEIGHT_STRUCT 1044 * #endif 1045 * } 1046 * } 1047 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC 1048 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION 1049 * { u64 abi; # enum perf_sample_regs_abi 1050 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR 1051 * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR 1052 * { u64 cgroup;} && PERF_SAMPLE_CGROUP 1053 * { u64 data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE 1054 * { u64 code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE 1055 * { u64 size; 1056 * char data[size]; } && PERF_SAMPLE_AUX 1057 * }; 1058 */ 1059 PERF_RECORD_SAMPLE = 9, 1060 1061 /* 1062 * The MMAP2 records are an augmented version of MMAP, they add 1063 * maj, min, ino numbers to be used to uniquely identify each mapping 1064 * 1065 * struct { 1066 * struct perf_event_header header; 1067 * 1068 * u32 pid, tid; 1069 * u64 addr; 1070 * u64 len; 1071 * u64 pgoff; 1072 * union { 1073 * struct { 1074 * u32 maj; 1075 * u32 min; 1076 * u64 ino; 1077 * u64 ino_generation; 1078 * }; 1079 * struct { 1080 * u8 build_id_size; 1081 * u8 __reserved_1; 1082 * u16 __reserved_2; 1083 * u8 build_id[20]; 1084 * }; 1085 * }; 1086 * u32 prot, flags; 1087 * char filename[]; 1088 * struct sample_id sample_id; 1089 * }; 1090 */ 1091 PERF_RECORD_MMAP2 = 10, 1092 1093 /* 1094 * Records that new data landed in the AUX buffer part. 1095 * 1096 * struct { 1097 * struct perf_event_header header; 1098 * 1099 * u64 aux_offset; 1100 * u64 aux_size; 1101 * u64 flags; 1102 * struct sample_id sample_id; 1103 * }; 1104 */ 1105 PERF_RECORD_AUX = 11, 1106 1107 /* 1108 * Indicates that instruction trace has started 1109 * 1110 * struct { 1111 * struct perf_event_header header; 1112 * u32 pid; 1113 * u32 tid; 1114 * struct sample_id sample_id; 1115 * }; 1116 */ 1117 PERF_RECORD_ITRACE_START = 12, 1118 1119 /* 1120 * Records the dropped/lost sample number. 1121 * 1122 * struct { 1123 * struct perf_event_header header; 1124 * 1125 * u64 lost; 1126 * struct sample_id sample_id; 1127 * }; 1128 */ 1129 PERF_RECORD_LOST_SAMPLES = 13, 1130 1131 /* 1132 * Records a context switch in or out (flagged by 1133 * PERF_RECORD_MISC_SWITCH_OUT). See also 1134 * PERF_RECORD_SWITCH_CPU_WIDE. 1135 * 1136 * struct { 1137 * struct perf_event_header header; 1138 * struct sample_id sample_id; 1139 * }; 1140 */ 1141 PERF_RECORD_SWITCH = 14, 1142 1143 /* 1144 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and 1145 * next_prev_tid that are the next (switching out) or previous 1146 * (switching in) pid/tid. 1147 * 1148 * struct { 1149 * struct perf_event_header header; 1150 * u32 next_prev_pid; 1151 * u32 next_prev_tid; 1152 * struct sample_id sample_id; 1153 * }; 1154 */ 1155 PERF_RECORD_SWITCH_CPU_WIDE = 15, 1156 1157 /* 1158 * struct { 1159 * struct perf_event_header header; 1160 * u32 pid; 1161 * u32 tid; 1162 * u64 nr_namespaces; 1163 * { u64 dev, inode; } [nr_namespaces]; 1164 * struct sample_id sample_id; 1165 * }; 1166 */ 1167 PERF_RECORD_NAMESPACES = 16, 1168 1169 /* 1170 * Record ksymbol register/unregister events: 1171 * 1172 * struct { 1173 * struct perf_event_header header; 1174 * u64 addr; 1175 * u32 len; 1176 * u16 ksym_type; 1177 * u16 flags; 1178 * char name[]; 1179 * struct sample_id sample_id; 1180 * }; 1181 */ 1182 PERF_RECORD_KSYMBOL = 17, 1183 1184 /* 1185 * Record BPF events: 1186 * enum perf_bpf_event_type { 1187 * PERF_BPF_EVENT_UNKNOWN = 0, 1188 * PERF_BPF_EVENT_PROG_LOAD = 1, 1189 * PERF_BPF_EVENT_PROG_UNLOAD = 2, 1190 * }; 1191 * 1192 * struct { 1193 * struct perf_event_header header; 1194 * u16 type; 1195 * u16 flags; 1196 * u32 id; 1197 * u8 tag[BPF_TAG_SIZE]; 1198 * struct sample_id sample_id; 1199 * }; 1200 */ 1201 PERF_RECORD_BPF_EVENT = 18, 1202 1203 /* 1204 * struct { 1205 * struct perf_event_header header; 1206 * u64 id; 1207 * char path[]; 1208 * struct sample_id sample_id; 1209 * }; 1210 */ 1211 PERF_RECORD_CGROUP = 19, 1212 1213 /* 1214 * Records changes to kernel text i.e. self-modified code. 'old_len' is 1215 * the number of old bytes, 'new_len' is the number of new bytes. Either 1216 * 'old_len' or 'new_len' may be zero to indicate, for example, the 1217 * addition or removal of a trampoline. 'bytes' contains the old bytes 1218 * followed immediately by the new bytes. 1219 * 1220 * struct { 1221 * struct perf_event_header header; 1222 * u64 addr; 1223 * u16 old_len; 1224 * u16 new_len; 1225 * u8 bytes[]; 1226 * struct sample_id sample_id; 1227 * }; 1228 */ 1229 PERF_RECORD_TEXT_POKE = 20, 1230 1231 /* 1232 * Data written to the AUX area by hardware due to aux_output, may need 1233 * to be matched to the event by an architecture-specific hardware ID. 1234 * This records the hardware ID, but requires sample_id to provide the 1235 * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT 1236 * records from multiple events. 1237 * 1238 * struct { 1239 * struct perf_event_header header; 1240 * u64 hw_id; 1241 * struct sample_id sample_id; 1242 * }; 1243 */ 1244 PERF_RECORD_AUX_OUTPUT_HW_ID = 21, 1245 1246 /* 1247 * This user callchain capture was deferred until shortly before 1248 * returning to user space. Previous samples would have kernel 1249 * callchains only and they need to be stitched with this to make full 1250 * callchains. 1251 * 1252 * struct { 1253 * struct perf_event_header header; 1254 * u64 cookie; 1255 * u64 nr; 1256 * u64 ips[nr]; 1257 * struct sample_id sample_id; 1258 * }; 1259 */ 1260 PERF_RECORD_CALLCHAIN_DEFERRED = 22, 1261 1262 PERF_RECORD_MAX, /* non-ABI */ 1263 }; 1264 1265 enum perf_record_ksymbol_type { 1266 PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, 1267 PERF_RECORD_KSYMBOL_TYPE_BPF = 1, 1268 /* 1269 * Out of line code such as kprobe-replaced instructions or optimized 1270 * kprobes or ftrace trampolines. 1271 */ 1272 PERF_RECORD_KSYMBOL_TYPE_OOL = 2, 1273 PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */ 1274 }; 1275 1276 #define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0) 1277 1278 enum perf_bpf_event_type { 1279 PERF_BPF_EVENT_UNKNOWN = 0, 1280 PERF_BPF_EVENT_PROG_LOAD = 1, 1281 PERF_BPF_EVENT_PROG_UNLOAD = 2, 1282 PERF_BPF_EVENT_MAX, /* non-ABI */ 1283 }; 1284 1285 #define PERF_MAX_STACK_DEPTH 127 1286 #define PERF_MAX_CONTEXTS_PER_STACK 8 1287 1288 enum perf_callchain_context { 1289 PERF_CONTEXT_HV = (__u64)-32, 1290 PERF_CONTEXT_KERNEL = (__u64)-128, 1291 PERF_CONTEXT_USER = (__u64)-512, 1292 PERF_CONTEXT_USER_DEFERRED = (__u64)-640, 1293 1294 PERF_CONTEXT_GUEST = (__u64)-2048, 1295 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, 1296 PERF_CONTEXT_GUEST_USER = (__u64)-2560, 1297 1298 PERF_CONTEXT_MAX = (__u64)-4095, 1299 }; 1300 1301 /** 1302 * PERF_RECORD_AUX::flags bits 1303 */ 1304 #define PERF_AUX_FLAG_TRUNCATED 0x0001 /* Record was truncated to fit */ 1305 #define PERF_AUX_FLAG_OVERWRITE 0x0002 /* Snapshot from overwrite mode */ 1306 #define PERF_AUX_FLAG_PARTIAL 0x0004 /* Record contains gaps */ 1307 #define PERF_AUX_FLAG_COLLISION 0x0008 /* Sample collided with another */ 1308 #define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */ 1309 1310 /* CoreSight PMU AUX buffer formats */ 1311 #define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */ 1312 #define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */ 1313 1314 #define PERF_FLAG_FD_NO_GROUP (1UL << 0) 1315 #define PERF_FLAG_FD_OUTPUT (1UL << 1) 1316 #define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup ID, per-CPU mode only */ 1317 #define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */ 1318 1319 #if defined(__LITTLE_ENDIAN_BITFIELD) 1320 union perf_mem_data_src { 1321 __u64 val; 1322 struct { 1323 __u64 mem_op : 5, /* Type of opcode */ 1324 mem_lvl : 14, /* Memory hierarchy level */ 1325 mem_snoop : 5, /* Snoop mode */ 1326 mem_lock : 2, /* Lock instr */ 1327 mem_dtlb : 7, /* TLB access */ 1328 mem_lvl_num : 4, /* Memory hierarchy level number */ 1329 mem_remote : 1, /* Remote */ 1330 mem_snoopx : 2, /* Snoop mode, ext */ 1331 mem_blk : 3, /* Access blocked */ 1332 mem_hops : 3, /* Hop level */ 1333 mem_rsvd : 18; 1334 }; 1335 }; 1336 #elif defined(__BIG_ENDIAN_BITFIELD) 1337 union perf_mem_data_src { 1338 __u64 val; 1339 struct { 1340 __u64 mem_rsvd : 18, 1341 mem_hops : 3, /* Hop level */ 1342 mem_blk : 3, /* Access blocked */ 1343 mem_snoopx : 2, /* Snoop mode, ext */ 1344 mem_remote : 1, /* Remote */ 1345 mem_lvl_num : 4, /* Memory hierarchy level number */ 1346 mem_dtlb : 7, /* TLB access */ 1347 mem_lock : 2, /* Lock instr */ 1348 mem_snoop : 5, /* Snoop mode */ 1349 mem_lvl : 14, /* Memory hierarchy level */ 1350 mem_op : 5; /* Type of opcode */ 1351 }; 1352 }; 1353 #else 1354 # error "Unknown endianness" 1355 #endif 1356 1357 /* Type of memory opcode: */ 1358 #define PERF_MEM_OP_NA 0x0001 /* Not available */ 1359 #define PERF_MEM_OP_LOAD 0x0002 /* Load instruction */ 1360 #define PERF_MEM_OP_STORE 0x0004 /* Store instruction */ 1361 #define PERF_MEM_OP_PFETCH 0x0008 /* Prefetch */ 1362 #define PERF_MEM_OP_EXEC 0x0010 /* Code (execution) */ 1363 #define PERF_MEM_OP_SHIFT 0 1364 1365 /* 1366 * The PERF_MEM_LVL_* namespace is being deprecated to some extent in 1367 * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields. 1368 * We support this namespace in order to not break defined ABIs. 1369 * 1370 * Memory hierarchy (memory level, hit or miss) 1371 */ 1372 #define PERF_MEM_LVL_NA 0x0001 /* Not available */ 1373 #define PERF_MEM_LVL_HIT 0x0002 /* Hit level */ 1374 #define PERF_MEM_LVL_MISS 0x0004 /* Miss level */ 1375 #define PERF_MEM_LVL_L1 0x0008 /* L1 */ 1376 #define PERF_MEM_LVL_LFB 0x0010 /* Line Fill Buffer */ 1377 #define PERF_MEM_LVL_L2 0x0020 /* L2 */ 1378 #define PERF_MEM_LVL_L3 0x0040 /* L3 */ 1379 #define PERF_MEM_LVL_LOC_RAM 0x0080 /* Local DRAM */ 1380 #define PERF_MEM_LVL_REM_RAM1 0x0100 /* Remote DRAM (1 hop) */ 1381 #define PERF_MEM_LVL_REM_RAM2 0x0200 /* Remote DRAM (2 hops) */ 1382 #define PERF_MEM_LVL_REM_CCE1 0x0400 /* Remote Cache (1 hop) */ 1383 #define PERF_MEM_LVL_REM_CCE2 0x0800 /* Remote Cache (2 hops) */ 1384 #define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ 1385 #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ 1386 #define PERF_MEM_LVL_SHIFT 5 1387 1388 #define PERF_MEM_REMOTE_REMOTE 0x0001 /* Remote */ 1389 #define PERF_MEM_REMOTE_SHIFT 37 1390 1391 #define PERF_MEM_LVLNUM_L1 0x0001 /* L1 */ 1392 #define PERF_MEM_LVLNUM_L2 0x0002 /* L2 */ 1393 #define PERF_MEM_LVLNUM_L3 0x0003 /* L3 */ 1394 #define PERF_MEM_LVLNUM_L4 0x0004 /* L4 */ 1395 #define PERF_MEM_LVLNUM_L2_MHB 0x0005 /* L2 Miss Handling Buffer */ 1396 #define PERF_MEM_LVLNUM_MSC 0x0006 /* Memory-side Cache */ 1397 /* 0x007 available */ 1398 #define PERF_MEM_LVLNUM_UNC 0x0008 /* Uncached */ 1399 #define PERF_MEM_LVLNUM_CXL 0x0009 /* CXL */ 1400 #define PERF_MEM_LVLNUM_IO 0x000a /* I/O */ 1401 #define PERF_MEM_LVLNUM_ANY_CACHE 0x000b /* Any cache */ 1402 #define PERF_MEM_LVLNUM_LFB 0x000c /* LFB / L1 Miss Handling Buffer */ 1403 #define PERF_MEM_LVLNUM_RAM 0x000d /* RAM */ 1404 #define PERF_MEM_LVLNUM_PMEM 0x000e /* PMEM */ 1405 #define PERF_MEM_LVLNUM_NA 0x000f /* N/A */ 1406 1407 #define PERF_MEM_LVLNUM_SHIFT 33 1408 1409 /* Snoop mode */ 1410 #define PERF_MEM_SNOOP_NA 0x0001 /* Not available */ 1411 #define PERF_MEM_SNOOP_NONE 0x0002 /* No snoop */ 1412 #define PERF_MEM_SNOOP_HIT 0x0004 /* Snoop hit */ 1413 #define PERF_MEM_SNOOP_MISS 0x0008 /* Snoop miss */ 1414 #define PERF_MEM_SNOOP_HITM 0x0010 /* Snoop hit modified */ 1415 #define PERF_MEM_SNOOP_SHIFT 19 1416 1417 #define PERF_MEM_SNOOPX_FWD 0x0001 /* Forward */ 1418 #define PERF_MEM_SNOOPX_PEER 0x0002 /* Transfer from peer */ 1419 #define PERF_MEM_SNOOPX_SHIFT 38 1420 1421 /* Locked instruction */ 1422 #define PERF_MEM_LOCK_NA 0x0001 /* Not available */ 1423 #define PERF_MEM_LOCK_LOCKED 0x0002 /* Locked transaction */ 1424 #define PERF_MEM_LOCK_SHIFT 24 1425 1426 /* TLB access */ 1427 #define PERF_MEM_TLB_NA 0x0001 /* Not available */ 1428 #define PERF_MEM_TLB_HIT 0x0002 /* Hit level */ 1429 #define PERF_MEM_TLB_MISS 0x0004 /* Miss level */ 1430 #define PERF_MEM_TLB_L1 0x0008 /* L1 */ 1431 #define PERF_MEM_TLB_L2 0x0010 /* L2 */ 1432 #define PERF_MEM_TLB_WK 0x0020 /* Hardware Walker*/ 1433 #define PERF_MEM_TLB_OS 0x0040 /* OS fault handler */ 1434 #define PERF_MEM_TLB_SHIFT 26 1435 1436 /* Access blocked */ 1437 #define PERF_MEM_BLK_NA 0x0001 /* Not available */ 1438 #define PERF_MEM_BLK_DATA 0x0002 /* Data could not be forwarded */ 1439 #define PERF_MEM_BLK_ADDR 0x0004 /* Address conflict */ 1440 #define PERF_MEM_BLK_SHIFT 40 1441 1442 /* Hop level */ 1443 #define PERF_MEM_HOPS_0 0x0001 /* Remote core, same node */ 1444 #define PERF_MEM_HOPS_1 0x0002 /* Remote node, same socket */ 1445 #define PERF_MEM_HOPS_2 0x0003 /* Remote socket, same board */ 1446 #define PERF_MEM_HOPS_3 0x0004 /* Remote board */ 1447 /* 5-7 available */ 1448 #define PERF_MEM_HOPS_SHIFT 43 1449 1450 #define PERF_MEM_S(a, s) \ 1451 (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) 1452 1453 /* 1454 * Layout of single taken branch records: 1455 * 1456 * from: source instruction (may not always be a branch insn) 1457 * to: branch target 1458 * mispred: branch target was mispredicted 1459 * predicted: branch target was predicted 1460 * 1461 * support for mispred, predicted is optional. In case it 1462 * is not supported mispred = predicted = 0. 1463 * 1464 * in_tx: running in a hardware transaction 1465 * abort: aborting a hardware transaction 1466 * cycles: cycles from last branch (or 0 if not supported) 1467 * type: branch type 1468 * spec: branch speculation info (or 0 if not supported) 1469 */ 1470 struct perf_branch_entry { 1471 __u64 from; 1472 __u64 to; 1473 __u64 mispred : 1, /* target mispredicted */ 1474 predicted : 1, /* target predicted */ 1475 in_tx : 1, /* in transaction */ 1476 abort : 1, /* transaction abort */ 1477 cycles : 16, /* cycle count to last branch */ 1478 type : 4, /* branch type */ 1479 spec : 2, /* branch speculation info */ 1480 new_type : 4, /* additional branch type */ 1481 priv : 3, /* privilege level */ 1482 reserved : 31; 1483 }; 1484 1485 /* Size of used info bits in struct perf_branch_entry */ 1486 #define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33 1487 1488 union perf_sample_weight { 1489 __u64 full; 1490 #if defined(__LITTLE_ENDIAN_BITFIELD) 1491 struct { 1492 __u32 var1_dw; 1493 __u16 var2_w; 1494 __u16 var3_w; 1495 }; 1496 #elif defined(__BIG_ENDIAN_BITFIELD) 1497 struct { 1498 __u16 var3_w; 1499 __u16 var2_w; 1500 __u32 var1_dw; 1501 }; 1502 #else 1503 # error "Unknown endianness" 1504 #endif 1505 }; 1506 1507 #endif /* _UAPI_LINUX_PERF_EVENT_H */ 1508