1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* 3 * Performance events: 4 * 5 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 6 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar 7 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra 8 * 9 * Data type definitions, declarations, prototypes. 10 * 11 * Started by: Thomas Gleixner and Ingo Molnar 12 * 13 * For licencing details see kernel-base/COPYING 14 */ 15 #ifndef _UAPI_LINUX_PERF_EVENT_H 16 #define _UAPI_LINUX_PERF_EVENT_H 17 18 #include <linux/types.h> 19 #include <linux/ioctl.h> 20 #include <asm/byteorder.h> 21 22 /* 23 * User-space ABI bits: 24 */ 25 26 /* 27 * attr.type 28 */ 29 enum perf_type_id { 30 PERF_TYPE_HARDWARE = 0, 31 PERF_TYPE_SOFTWARE = 1, 32 PERF_TYPE_TRACEPOINT = 2, 33 PERF_TYPE_HW_CACHE = 3, 34 PERF_TYPE_RAW = 4, 35 PERF_TYPE_BREAKPOINT = 5, 36 37 PERF_TYPE_MAX, /* non-ABI */ 38 }; 39 40 /* 41 * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE 42 * 43 * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA 44 * AA: hardware event ID 45 * EEEEEEEE: PMU type ID 46 * 47 * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB 48 * BB: hardware cache ID 49 * CC: hardware cache op ID 50 * DD: hardware cache op result ID 51 * EEEEEEEE: PMU type ID 52 * 53 * If the PMU type ID is 0, PERF_TYPE_RAW will be applied. 54 */ 55 #define PERF_PMU_TYPE_SHIFT 32 56 #define PERF_HW_EVENT_MASK 0xffffffff 57 58 /* 59 * Generalized performance event event_id types, used by the 60 * attr.event_id parameter of the sys_perf_event_open() 61 * syscall: 62 */ 63 enum perf_hw_id { 64 /* 65 * Common hardware events, generalized by the kernel: 66 */ 67 PERF_COUNT_HW_CPU_CYCLES = 0, 68 PERF_COUNT_HW_INSTRUCTIONS = 1, 69 PERF_COUNT_HW_CACHE_REFERENCES = 2, 70 PERF_COUNT_HW_CACHE_MISSES = 3, 71 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, 72 PERF_COUNT_HW_BRANCH_MISSES = 5, 73 PERF_COUNT_HW_BUS_CYCLES = 6, 74 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, 75 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, 76 PERF_COUNT_HW_REF_CPU_CYCLES = 9, 77 78 PERF_COUNT_HW_MAX, /* non-ABI */ 79 }; 80 81 /* 82 * Generalized hardware cache events: 83 * 84 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x 85 * { read, write, prefetch } x 86 * { accesses, misses } 87 */ 88 enum perf_hw_cache_id { 89 PERF_COUNT_HW_CACHE_L1D = 0, 90 PERF_COUNT_HW_CACHE_L1I = 1, 91 PERF_COUNT_HW_CACHE_LL = 2, 92 PERF_COUNT_HW_CACHE_DTLB = 3, 93 PERF_COUNT_HW_CACHE_ITLB = 4, 94 PERF_COUNT_HW_CACHE_BPU = 5, 95 PERF_COUNT_HW_CACHE_NODE = 6, 96 97 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ 98 }; 99 100 enum perf_hw_cache_op_id { 101 PERF_COUNT_HW_CACHE_OP_READ = 0, 102 PERF_COUNT_HW_CACHE_OP_WRITE = 1, 103 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, 104 105 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ 106 }; 107 108 enum perf_hw_cache_op_result_id { 109 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, 110 PERF_COUNT_HW_CACHE_RESULT_MISS = 1, 111 112 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ 113 }; 114 115 /* 116 * Special "software" events provided by the kernel, even if the hardware 117 * does not support performance events. These events measure various 118 * physical and SW events of the kernel (and allow the profiling of them as 119 * well): 120 */ 121 enum perf_sw_ids { 122 PERF_COUNT_SW_CPU_CLOCK = 0, 123 PERF_COUNT_SW_TASK_CLOCK = 1, 124 PERF_COUNT_SW_PAGE_FAULTS = 2, 125 PERF_COUNT_SW_CONTEXT_SWITCHES = 3, 126 PERF_COUNT_SW_CPU_MIGRATIONS = 4, 127 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, 128 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, 129 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, 130 PERF_COUNT_SW_EMULATION_FAULTS = 8, 131 PERF_COUNT_SW_DUMMY = 9, 132 PERF_COUNT_SW_BPF_OUTPUT = 10, 133 PERF_COUNT_SW_CGROUP_SWITCHES = 11, 134 135 PERF_COUNT_SW_MAX, /* non-ABI */ 136 }; 137 138 /* 139 * Bits that can be set in attr.sample_type to request information 140 * in the overflow packets. 141 */ 142 enum perf_event_sample_format { 143 PERF_SAMPLE_IP = 1U << 0, 144 PERF_SAMPLE_TID = 1U << 1, 145 PERF_SAMPLE_TIME = 1U << 2, 146 PERF_SAMPLE_ADDR = 1U << 3, 147 PERF_SAMPLE_READ = 1U << 4, 148 PERF_SAMPLE_CALLCHAIN = 1U << 5, 149 PERF_SAMPLE_ID = 1U << 6, 150 PERF_SAMPLE_CPU = 1U << 7, 151 PERF_SAMPLE_PERIOD = 1U << 8, 152 PERF_SAMPLE_STREAM_ID = 1U << 9, 153 PERF_SAMPLE_RAW = 1U << 10, 154 PERF_SAMPLE_BRANCH_STACK = 1U << 11, 155 PERF_SAMPLE_REGS_USER = 1U << 12, 156 PERF_SAMPLE_STACK_USER = 1U << 13, 157 PERF_SAMPLE_WEIGHT = 1U << 14, 158 PERF_SAMPLE_DATA_SRC = 1U << 15, 159 PERF_SAMPLE_IDENTIFIER = 1U << 16, 160 PERF_SAMPLE_TRANSACTION = 1U << 17, 161 PERF_SAMPLE_REGS_INTR = 1U << 18, 162 PERF_SAMPLE_PHYS_ADDR = 1U << 19, 163 PERF_SAMPLE_AUX = 1U << 20, 164 PERF_SAMPLE_CGROUP = 1U << 21, 165 PERF_SAMPLE_DATA_PAGE_SIZE = 1U << 22, 166 PERF_SAMPLE_CODE_PAGE_SIZE = 1U << 23, 167 PERF_SAMPLE_WEIGHT_STRUCT = 1U << 24, 168 169 PERF_SAMPLE_MAX = 1U << 25, /* non-ABI */ 170 }; 171 172 #define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT) 173 174 /* 175 * Values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set. 176 * 177 * If the user does not pass priv level information via branch_sample_type, 178 * the kernel uses the event's priv level. Branch and event priv levels do 179 * not have to match. Branch priv level is checked for permissions. 180 * 181 * The branch types can be combined, however BRANCH_ANY covers all types 182 * of branches and therefore it supersedes all the other types. 183 */ 184 enum perf_branch_sample_type_shift { 185 PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */ 186 PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */ 187 PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */ 188 189 PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */ 190 PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */ 191 PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */ 192 PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */ 193 PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */ 194 PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */ 195 PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ 196 PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */ 197 198 PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* CALL/RET stack */ 199 PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */ 200 PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */ 201 202 PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */ 203 PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */ 204 205 PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */ 206 207 PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, /* save low level index of raw branch records */ 208 209 PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */ 210 211 PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */ 212 213 PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ 214 }; 215 216 enum perf_branch_sample_type { 217 PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, 218 PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, 219 PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, 220 221 PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, 222 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, 223 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, 224 PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, 225 PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, 226 PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, 227 PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, 228 PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, 229 230 PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, 231 PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, 232 PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, 233 234 PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, 235 PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, 236 237 PERF_SAMPLE_BRANCH_TYPE_SAVE = 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, 238 239 PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT, 240 241 PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, 242 243 PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, 244 245 PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, 246 }; 247 248 /* 249 * Common control flow change classifications: 250 */ 251 enum { 252 PERF_BR_UNKNOWN = 0, /* Unknown */ 253 PERF_BR_COND = 1, /* Conditional */ 254 PERF_BR_UNCOND = 2, /* Unconditional */ 255 PERF_BR_IND = 3, /* Indirect */ 256 PERF_BR_CALL = 4, /* Function call */ 257 PERF_BR_IND_CALL = 5, /* Indirect function call */ 258 PERF_BR_RET = 6, /* Function return */ 259 PERF_BR_SYSCALL = 7, /* Syscall */ 260 PERF_BR_SYSRET = 8, /* Syscall return */ 261 PERF_BR_COND_CALL = 9, /* Conditional function call */ 262 PERF_BR_COND_RET = 10, /* Conditional function return */ 263 PERF_BR_ERET = 11, /* Exception return */ 264 PERF_BR_IRQ = 12, /* IRQ */ 265 PERF_BR_SERROR = 13, /* System error */ 266 PERF_BR_NO_TX = 14, /* Not in transaction */ 267 PERF_BR_EXTEND_ABI = 15, /* Extend ABI */ 268 PERF_BR_MAX, 269 }; 270 271 /* 272 * Common branch speculation outcome classifications: 273 */ 274 enum { 275 PERF_BR_SPEC_NA = 0, /* Not available */ 276 PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */ 277 PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */ 278 PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */ 279 PERF_BR_SPEC_MAX, 280 }; 281 282 enum { 283 PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */ 284 PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */ 285 PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */ 286 PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */ 287 PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */ 288 PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */ 289 PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */ 290 PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */ 291 PERF_BR_NEW_MAX, 292 }; 293 294 enum { 295 PERF_BR_PRIV_UNKNOWN = 0, 296 PERF_BR_PRIV_USER = 1, 297 PERF_BR_PRIV_KERNEL = 2, 298 PERF_BR_PRIV_HV = 3, 299 }; 300 301 #define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1 302 #define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2 303 #define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3 304 #define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4 305 #define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5 306 307 #define PERF_SAMPLE_BRANCH_PLM_ALL \ 308 (PERF_SAMPLE_BRANCH_USER|\ 309 PERF_SAMPLE_BRANCH_KERNEL|\ 310 PERF_SAMPLE_BRANCH_HV) 311 312 /* 313 * Values to determine ABI of the registers dump. 314 */ 315 enum perf_sample_regs_abi { 316 PERF_SAMPLE_REGS_ABI_NONE = 0, 317 PERF_SAMPLE_REGS_ABI_32 = 1, 318 PERF_SAMPLE_REGS_ABI_64 = 2, 319 }; 320 321 /* 322 * Values for the memory transaction event qualifier, mostly for 323 * abort events. Multiple bits can be set. 324 */ 325 enum { 326 PERF_TXN_ELISION = (1 << 0), /* From elision */ 327 PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ 328 PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ 329 PERF_TXN_ASYNC = (1 << 3), /* Instruction is not related */ 330 PERF_TXN_RETRY = (1 << 4), /* Retry possible */ 331 PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ 332 PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ 333 PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ 334 335 PERF_TXN_MAX = (1 << 8), /* non-ABI */ 336 337 /* Bits 32..63 are reserved for the abort code */ 338 339 PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), 340 PERF_TXN_ABORT_SHIFT = 32, 341 }; 342 343 /* 344 * The format of the data returned by read() on a perf event fd, 345 * as specified by attr.read_format: 346 * 347 * struct read_format { 348 * { u64 value; 349 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 350 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 351 * { u64 id; } && PERF_FORMAT_ID 352 * { u64 lost; } && PERF_FORMAT_LOST 353 * } && !PERF_FORMAT_GROUP 354 * 355 * { u64 nr; 356 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 357 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 358 * { u64 value; 359 * { u64 id; } && PERF_FORMAT_ID 360 * { u64 lost; } && PERF_FORMAT_LOST 361 * } cntr[nr]; 362 * } && PERF_FORMAT_GROUP 363 * }; 364 */ 365 enum perf_event_read_format { 366 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 367 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 368 PERF_FORMAT_ID = 1U << 2, 369 PERF_FORMAT_GROUP = 1U << 3, 370 PERF_FORMAT_LOST = 1U << 4, 371 372 PERF_FORMAT_MAX = 1U << 5, /* non-ABI */ 373 }; 374 375 #define PERF_ATTR_SIZE_VER0 64 /* Size of first published 'struct perf_event_attr' */ 376 #define PERF_ATTR_SIZE_VER1 72 /* Add: config2 */ 377 #define PERF_ATTR_SIZE_VER2 80 /* Add: branch_sample_type */ 378 #define PERF_ATTR_SIZE_VER3 96 /* Add: sample_regs_user */ 379 /* Add: sample_stack_user */ 380 #define PERF_ATTR_SIZE_VER4 104 /* Add: sample_regs_intr */ 381 #define PERF_ATTR_SIZE_VER5 112 /* Add: aux_watermark */ 382 #define PERF_ATTR_SIZE_VER6 120 /* Add: aux_sample_size */ 383 #define PERF_ATTR_SIZE_VER7 128 /* Add: sig_data */ 384 #define PERF_ATTR_SIZE_VER8 136 /* Add: config3 */ 385 386 /* 387 * 'struct perf_event_attr' contains various attributes that define 388 * a performance event - most of them hardware related configuration 389 * details, but also a lot of behavioral switches and values implemented 390 * by the kernel. 391 */ 392 struct perf_event_attr { 393 394 /* 395 * Major type: hardware/software/tracepoint/etc. 396 */ 397 __u32 type; 398 399 /* 400 * Size of the attr structure, for forward/backwards compatibility. 401 */ 402 __u32 size; 403 404 /* 405 * Type specific configuration information. 406 */ 407 __u64 config; 408 409 union { 410 __u64 sample_period; 411 __u64 sample_freq; 412 }; 413 414 __u64 sample_type; 415 __u64 read_format; 416 417 __u64 disabled : 1, /* off by default */ 418 inherit : 1, /* children inherit it */ 419 pinned : 1, /* must always be on PMU */ 420 exclusive : 1, /* only group on PMU */ 421 exclude_user : 1, /* don't count user */ 422 exclude_kernel : 1, /* ditto kernel */ 423 exclude_hv : 1, /* ditto hypervisor */ 424 exclude_idle : 1, /* don't count when idle */ 425 mmap : 1, /* include mmap data */ 426 comm : 1, /* include comm data */ 427 freq : 1, /* use freq, not period */ 428 inherit_stat : 1, /* per task counts */ 429 enable_on_exec : 1, /* next exec enables */ 430 task : 1, /* trace fork/exit */ 431 watermark : 1, /* wakeup_watermark */ 432 /* 433 * precise_ip: 434 * 435 * 0 - SAMPLE_IP can have arbitrary skid 436 * 1 - SAMPLE_IP must have constant skid 437 * 2 - SAMPLE_IP requested to have 0 skid 438 * 3 - SAMPLE_IP must have 0 skid 439 * 440 * See also PERF_RECORD_MISC_EXACT_IP 441 */ 442 precise_ip : 2, /* skid constraint */ 443 mmap_data : 1, /* non-exec mmap data */ 444 sample_id_all : 1, /* sample_type all events */ 445 446 exclude_host : 1, /* don't count in host */ 447 exclude_guest : 1, /* don't count in guest */ 448 449 exclude_callchain_kernel : 1, /* exclude kernel callchains */ 450 exclude_callchain_user : 1, /* exclude user callchains */ 451 mmap2 : 1, /* include mmap with inode data */ 452 comm_exec : 1, /* flag comm events that are due to an exec */ 453 use_clockid : 1, /* use @clockid for time fields */ 454 context_switch : 1, /* context switch data */ 455 write_backward : 1, /* write ring buffer from end to beginning */ 456 namespaces : 1, /* include namespaces data */ 457 ksymbol : 1, /* include ksymbol events */ 458 bpf_event : 1, /* include BPF events */ 459 aux_output : 1, /* generate AUX records instead of events */ 460 cgroup : 1, /* include cgroup events */ 461 text_poke : 1, /* include text poke events */ 462 build_id : 1, /* use build ID in mmap2 events */ 463 inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */ 464 remove_on_exec : 1, /* event is removed from task on exec */ 465 sigtrap : 1, /* send synchronous SIGTRAP on event */ 466 __reserved_1 : 26; 467 468 union { 469 __u32 wakeup_events; /* wake up every n events */ 470 __u32 wakeup_watermark; /* bytes before wakeup */ 471 }; 472 473 __u32 bp_type; 474 union { 475 __u64 bp_addr; 476 __u64 kprobe_func; /* for perf_kprobe */ 477 __u64 uprobe_path; /* for perf_uprobe */ 478 __u64 config1; /* extension of config */ 479 }; 480 union { 481 __u64 bp_len; 482 __u64 kprobe_addr; /* when kprobe_func == NULL */ 483 __u64 probe_offset; /* for perf_[k,u]probe */ 484 __u64 config2; /* extension of config1 */ 485 }; 486 __u64 branch_sample_type; /* enum perf_branch_sample_type */ 487 488 /* 489 * Defines set of user regs to dump on samples. 490 * See asm/perf_regs.h for details. 491 */ 492 __u64 sample_regs_user; 493 494 /* 495 * Defines size of the user stack to dump on samples. 496 */ 497 __u32 sample_stack_user; 498 499 __s32 clockid; 500 /* 501 * Defines set of regs to dump for each sample 502 * state captured on: 503 * - precise = 0: PMU interrupt 504 * - precise > 0: sampled instruction 505 * 506 * See asm/perf_regs.h for details. 507 */ 508 __u64 sample_regs_intr; 509 510 /* 511 * Wakeup watermark for AUX area 512 */ 513 __u32 aux_watermark; 514 515 /* 516 * Max number of frame pointers in a callchain, should be 517 * lower than /proc/sys/kernel/perf_event_max_stack. 518 * 519 * Max number of entries of branch stack should be lower 520 * than the hardware limit. 521 */ 522 __u16 sample_max_stack; 523 524 __u16 __reserved_2; 525 __u32 aux_sample_size; 526 527 union { 528 __u32 aux_action; 529 struct { 530 __u32 aux_start_paused : 1, /* start AUX area tracing paused */ 531 aux_pause : 1, /* on overflow, pause AUX area tracing */ 532 aux_resume : 1, /* on overflow, resume AUX area tracing */ 533 __reserved_3 : 29; 534 }; 535 }; 536 537 /* 538 * User provided data if sigtrap=1, passed back to user via 539 * siginfo_t::si_perf_data, e.g. to permit user to identify the event. 540 * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be 541 * truncated accordingly on 32 bit architectures. 542 */ 543 __u64 sig_data; 544 545 __u64 config3; /* extension of config2 */ 546 }; 547 548 /* 549 * Structure used by below PERF_EVENT_IOC_QUERY_BPF command 550 * to query BPF programs attached to the same perf tracepoint 551 * as the given perf event. 552 */ 553 struct perf_event_query_bpf { 554 /* 555 * The below ids array length 556 */ 557 __u32 ids_len; 558 /* 559 * Set by the kernel to indicate the number of 560 * available programs 561 */ 562 __u32 prog_cnt; 563 /* 564 * User provided buffer to store program ids 565 */ 566 __u32 ids[]; 567 }; 568 569 /* 570 * Ioctls that can be done on a perf event fd: 571 */ 572 #define PERF_EVENT_IOC_ENABLE _IO ('$', 0) 573 #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) 574 #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) 575 #define PERF_EVENT_IOC_RESET _IO ('$', 3) 576 #define PERF_EVENT_IOC_PERIOD _IOW ('$', 4, __u64) 577 #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) 578 #define PERF_EVENT_IOC_SET_FILTER _IOW ('$', 6, char *) 579 #define PERF_EVENT_IOC_ID _IOR ('$', 7, __u64 *) 580 #define PERF_EVENT_IOC_SET_BPF _IOW ('$', 8, __u32) 581 #define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW ('$', 9, __u32) 582 #define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *) 583 #define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW ('$', 11, struct perf_event_attr *) 584 585 enum perf_event_ioc_flags { 586 PERF_IOC_FLAG_GROUP = 1U << 0, 587 }; 588 589 /* 590 * Structure of the page that can be mapped via mmap 591 */ 592 struct perf_event_mmap_page { 593 __u32 version; /* version number of this structure */ 594 __u32 compat_version; /* lowest version this is compat with */ 595 596 /* 597 * Bits needed to read the HW events in user-space. 598 * 599 * u32 seq, time_mult, time_shift, index, width; 600 * u64 count, enabled, running; 601 * u64 cyc, time_offset; 602 * s64 pmc = 0; 603 * 604 * do { 605 * seq = pc->lock; 606 * barrier() 607 * 608 * enabled = pc->time_enabled; 609 * running = pc->time_running; 610 * 611 * if (pc->cap_usr_time && enabled != running) { 612 * cyc = rdtsc(); 613 * time_offset = pc->time_offset; 614 * time_mult = pc->time_mult; 615 * time_shift = pc->time_shift; 616 * } 617 * 618 * index = pc->index; 619 * count = pc->offset; 620 * if (pc->cap_user_rdpmc && index) { 621 * width = pc->pmc_width; 622 * pmc = rdpmc(index - 1); 623 * } 624 * 625 * barrier(); 626 * } while (pc->lock != seq); 627 * 628 * NOTE: for obvious reason this only works on self-monitoring 629 * processes. 630 */ 631 __u32 lock; /* seqlock for synchronization */ 632 __u32 index; /* hardware event identifier */ 633 __s64 offset; /* add to hardware event value */ 634 __u64 time_enabled; /* time event active */ 635 __u64 time_running; /* time event on CPU */ 636 union { 637 __u64 capabilities; 638 struct { 639 __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */ 640 cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */ 641 642 cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */ 643 cap_user_time : 1, /* The time_{shift,mult,offset} fields are used */ 644 cap_user_time_zero : 1, /* The time_zero field is used */ 645 cap_user_time_short : 1, /* the time_{cycle,mask} fields are used */ 646 cap_____res : 58; 647 }; 648 }; 649 650 /* 651 * If cap_user_rdpmc this field provides the bit-width of the value 652 * read using the rdpmc() or equivalent instruction. This can be used 653 * to sign extend the result like: 654 * 655 * pmc <<= 64 - width; 656 * pmc >>= 64 - width; // signed shift right 657 * count += pmc; 658 */ 659 __u16 pmc_width; 660 661 /* 662 * If cap_usr_time the below fields can be used to compute the time 663 * delta since time_enabled (in ns) using RDTSC or similar. 664 * 665 * u64 quot, rem; 666 * u64 delta; 667 * 668 * quot = (cyc >> time_shift); 669 * rem = cyc & (((u64)1 << time_shift) - 1); 670 * delta = time_offset + quot * time_mult + 671 * ((rem * time_mult) >> time_shift); 672 * 673 * Where time_offset,time_mult,time_shift and cyc are read in the 674 * seqcount loop described above. This delta can then be added to 675 * enabled and possible running (if index), improving the scaling: 676 * 677 * enabled += delta; 678 * if (index) 679 * running += delta; 680 * 681 * quot = count / running; 682 * rem = count % running; 683 * count = quot * enabled + (rem * enabled) / running; 684 */ 685 __u16 time_shift; 686 __u32 time_mult; 687 __u64 time_offset; 688 /* 689 * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated 690 * from sample timestamps. 691 * 692 * time = timestamp - time_zero; 693 * quot = time / time_mult; 694 * rem = time % time_mult; 695 * cyc = (quot << time_shift) + (rem << time_shift) / time_mult; 696 * 697 * And vice versa: 698 * 699 * quot = cyc >> time_shift; 700 * rem = cyc & (((u64)1 << time_shift) - 1); 701 * timestamp = time_zero + quot * time_mult + 702 * ((rem * time_mult) >> time_shift); 703 */ 704 __u64 time_zero; 705 706 __u32 size; /* Header size up to __reserved[] fields. */ 707 __u32 __reserved_1; 708 709 /* 710 * If cap_usr_time_short, the hardware clock is less than 64bit wide 711 * and we must compute the 'cyc' value, as used by cap_usr_time, as: 712 * 713 * cyc = time_cycles + ((cyc - time_cycles) & time_mask) 714 * 715 * NOTE: this form is explicitly chosen such that cap_usr_time_short 716 * is a correction on top of cap_usr_time, and code that doesn't 717 * know about cap_usr_time_short still works under the assumption 718 * the counter doesn't wrap. 719 */ 720 __u64 time_cycles; 721 __u64 time_mask; 722 723 /* 724 * Hole for extension of the self monitor capabilities 725 */ 726 727 __u8 __reserved[116*8]; /* align to 1k. */ 728 729 /* 730 * Control data for the mmap() data buffer. 731 * 732 * User-space reading the @data_head value should issue an smp_rmb(), 733 * after reading this value. 734 * 735 * When the mapping is PROT_WRITE the @data_tail value should be 736 * written by user-space to reflect the last read data, after issuing 737 * an smp_mb() to separate the data read from the ->data_tail store. 738 * In this case the kernel will not over-write unread data. 739 * 740 * See perf_output_put_handle() for the data ordering. 741 * 742 * data_{offset,size} indicate the location and size of the perf record 743 * buffer within the mmapped area. 744 */ 745 __u64 data_head; /* head in the data section */ 746 __u64 data_tail; /* user-space written tail */ 747 __u64 data_offset; /* where the buffer starts */ 748 __u64 data_size; /* data buffer size */ 749 750 /* 751 * AUX area is defined by aux_{offset,size} fields that should be set 752 * by the user-space, so that 753 * 754 * aux_offset >= data_offset + data_size 755 * 756 * prior to mmap()ing it. Size of the mmap()ed area should be aux_size. 757 * 758 * Ring buffer pointers aux_{head,tail} have the same semantics as 759 * data_{head,tail} and same ordering rules apply. 760 */ 761 __u64 aux_head; 762 __u64 aux_tail; 763 __u64 aux_offset; 764 __u64 aux_size; 765 }; 766 767 /* 768 * The current state of perf_event_header::misc bits usage: 769 * ('|' used bit, '-' unused bit) 770 * 771 * 012 CDEF 772 * |||---------|||| 773 * 774 * Where: 775 * 0-2 CPUMODE_MASK 776 * 777 * C PROC_MAP_PARSE_TIMEOUT 778 * D MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT 779 * E MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT 780 * F (reserved) 781 */ 782 783 #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0) 784 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) 785 #define PERF_RECORD_MISC_KERNEL (1 << 0) 786 #define PERF_RECORD_MISC_USER (2 << 0) 787 #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) 788 #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0) 789 #define PERF_RECORD_MISC_GUEST_USER (5 << 0) 790 791 /* 792 * Indicates that /proc/PID/maps parsing are truncated by time out. 793 */ 794 #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12) 795 /* 796 * Following PERF_RECORD_MISC_* are used on different 797 * events, so can reuse the same bit position: 798 * 799 * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events 800 * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event 801 * PERF_RECORD_MISC_FORK_EXEC - PERF_RECORD_FORK event (perf internal) 802 * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events 803 */ 804 #define PERF_RECORD_MISC_MMAP_DATA (1 << 13) 805 #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) 806 #define PERF_RECORD_MISC_FORK_EXEC (1 << 13) 807 #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) 808 /* 809 * These PERF_RECORD_MISC_* flags below are safely reused 810 * for the following events: 811 * 812 * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events 813 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events 814 * PERF_RECORD_MISC_MMAP_BUILD_ID - PERF_RECORD_MMAP2 event 815 * 816 * 817 * PERF_RECORD_MISC_EXACT_IP: 818 * Indicates that the content of PERF_SAMPLE_IP points to 819 * the actual instruction that triggered the event. See also 820 * perf_event_attr::precise_ip. 821 * 822 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: 823 * Indicates that thread was preempted in TASK_RUNNING state. 824 * 825 * PERF_RECORD_MISC_MMAP_BUILD_ID: 826 * Indicates that mmap2 event carries build ID data. 827 */ 828 #define PERF_RECORD_MISC_EXACT_IP (1 << 14) 829 #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) 830 #define PERF_RECORD_MISC_MMAP_BUILD_ID (1 << 14) 831 /* 832 * Reserve the last bit to indicate some extended misc field 833 */ 834 #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) 835 836 struct perf_event_header { 837 __u32 type; 838 __u16 misc; 839 __u16 size; 840 }; 841 842 struct perf_ns_link_info { 843 __u64 dev; 844 __u64 ino; 845 }; 846 847 enum { 848 NET_NS_INDEX = 0, 849 UTS_NS_INDEX = 1, 850 IPC_NS_INDEX = 2, 851 PID_NS_INDEX = 3, 852 USER_NS_INDEX = 4, 853 MNT_NS_INDEX = 5, 854 CGROUP_NS_INDEX = 6, 855 856 NR_NAMESPACES, /* number of available namespaces */ 857 }; 858 859 enum perf_event_type { 860 861 /* 862 * If perf_event_attr.sample_id_all is set then all event types will 863 * have the sample_type selected fields related to where/when 864 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU, 865 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed 866 * just after the perf_event_header and the fields already present for 867 * the existing fields, i.e. at the end of the payload. That way a newer 868 * perf.data file will be supported by older perf tools, with these new 869 * optional fields being ignored. 870 * 871 * struct sample_id { 872 * { u32 pid, tid; } && PERF_SAMPLE_TID 873 * { u64 time; } && PERF_SAMPLE_TIME 874 * { u64 id; } && PERF_SAMPLE_ID 875 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 876 * { u32 cpu, res; } && PERF_SAMPLE_CPU 877 * { u64 id; } && PERF_SAMPLE_IDENTIFIER 878 * } && perf_event_attr::sample_id_all 879 * 880 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The 881 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed 882 * relative to header.size. 883 */ 884 885 /* 886 * The MMAP events record the PROT_EXEC mappings so that we can 887 * correlate user-space IPs to code. They have the following structure: 888 * 889 * struct { 890 * struct perf_event_header header; 891 * 892 * u32 pid, tid; 893 * u64 addr; 894 * u64 len; 895 * u64 pgoff; 896 * char filename[]; 897 * struct sample_id sample_id; 898 * }; 899 */ 900 PERF_RECORD_MMAP = 1, 901 902 /* 903 * struct { 904 * struct perf_event_header header; 905 * u64 id; 906 * u64 lost; 907 * struct sample_id sample_id; 908 * }; 909 */ 910 PERF_RECORD_LOST = 2, 911 912 /* 913 * struct { 914 * struct perf_event_header header; 915 * 916 * u32 pid, tid; 917 * char comm[]; 918 * struct sample_id sample_id; 919 * }; 920 */ 921 PERF_RECORD_COMM = 3, 922 923 /* 924 * struct { 925 * struct perf_event_header header; 926 * u32 pid, ppid; 927 * u32 tid, ptid; 928 * u64 time; 929 * struct sample_id sample_id; 930 * }; 931 */ 932 PERF_RECORD_EXIT = 4, 933 934 /* 935 * struct { 936 * struct perf_event_header header; 937 * u64 time; 938 * u64 id; 939 * u64 stream_id; 940 * struct sample_id sample_id; 941 * }; 942 */ 943 PERF_RECORD_THROTTLE = 5, 944 PERF_RECORD_UNTHROTTLE = 6, 945 946 /* 947 * struct { 948 * struct perf_event_header header; 949 * u32 pid, ppid; 950 * u32 tid, ptid; 951 * u64 time; 952 * struct sample_id sample_id; 953 * }; 954 */ 955 PERF_RECORD_FORK = 7, 956 957 /* 958 * struct { 959 * struct perf_event_header header; 960 * u32 pid, tid; 961 * 962 * struct read_format values; 963 * struct sample_id sample_id; 964 * }; 965 */ 966 PERF_RECORD_READ = 8, 967 968 /* 969 * struct { 970 * struct perf_event_header header; 971 * 972 * # 973 * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. 974 * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position 975 * # is fixed relative to header. 976 * # 977 * 978 * { u64 id; } && PERF_SAMPLE_IDENTIFIER 979 * { u64 ip; } && PERF_SAMPLE_IP 980 * { u32 pid, tid; } && PERF_SAMPLE_TID 981 * { u64 time; } && PERF_SAMPLE_TIME 982 * { u64 addr; } && PERF_SAMPLE_ADDR 983 * { u64 id; } && PERF_SAMPLE_ID 984 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 985 * { u32 cpu, res; } && PERF_SAMPLE_CPU 986 * { u64 period; } && PERF_SAMPLE_PERIOD 987 * 988 * { struct read_format values; } && PERF_SAMPLE_READ 989 * 990 * { u64 nr, 991 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 992 * 993 * # 994 * # The RAW record below is opaque data wrt the ABI 995 * # 996 * # That is, the ABI doesn't make any promises wrt to 997 * # the stability of its content, it may vary depending 998 * # on event, hardware, kernel version and phase of 999 * # the moon. 1000 * # 1001 * # In other words, PERF_SAMPLE_RAW contents are not an ABI. 1002 * # 1003 * 1004 * { u32 size; 1005 * char data[size];}&& PERF_SAMPLE_RAW 1006 * 1007 * { u64 nr; 1008 * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX 1009 * { u64 from, to, flags } lbr[nr]; 1010 * # 1011 * # The format of the counters is decided by the 1012 * # "branch_counter_nr" and "branch_counter_width", 1013 * # which are defined in the ABI. 1014 * # 1015 * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS 1016 * } && PERF_SAMPLE_BRANCH_STACK 1017 * 1018 * { u64 abi; # enum perf_sample_regs_abi 1019 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER 1020 * 1021 * { u64 size; 1022 * char data[size]; 1023 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER 1024 * 1025 * { union perf_sample_weight 1026 * { 1027 * u64 full; && PERF_SAMPLE_WEIGHT 1028 * #if defined(__LITTLE_ENDIAN_BITFIELD) 1029 * struct { 1030 * u32 var1_dw; 1031 * u16 var2_w; 1032 * u16 var3_w; 1033 * } && PERF_SAMPLE_WEIGHT_STRUCT 1034 * #elif defined(__BIG_ENDIAN_BITFIELD) 1035 * struct { 1036 * u16 var3_w; 1037 * u16 var2_w; 1038 * u32 var1_dw; 1039 * } && PERF_SAMPLE_WEIGHT_STRUCT 1040 * #endif 1041 * } 1042 * } 1043 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC 1044 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION 1045 * { u64 abi; # enum perf_sample_regs_abi 1046 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR 1047 * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR 1048 * { u64 cgroup;} && PERF_SAMPLE_CGROUP 1049 * { u64 data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE 1050 * { u64 code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE 1051 * { u64 size; 1052 * char data[size]; } && PERF_SAMPLE_AUX 1053 * }; 1054 */ 1055 PERF_RECORD_SAMPLE = 9, 1056 1057 /* 1058 * The MMAP2 records are an augmented version of MMAP, they add 1059 * maj, min, ino numbers to be used to uniquely identify each mapping 1060 * 1061 * struct { 1062 * struct perf_event_header header; 1063 * 1064 * u32 pid, tid; 1065 * u64 addr; 1066 * u64 len; 1067 * u64 pgoff; 1068 * union { 1069 * struct { 1070 * u32 maj; 1071 * u32 min; 1072 * u64 ino; 1073 * u64 ino_generation; 1074 * }; 1075 * struct { 1076 * u8 build_id_size; 1077 * u8 __reserved_1; 1078 * u16 __reserved_2; 1079 * u8 build_id[20]; 1080 * }; 1081 * }; 1082 * u32 prot, flags; 1083 * char filename[]; 1084 * struct sample_id sample_id; 1085 * }; 1086 */ 1087 PERF_RECORD_MMAP2 = 10, 1088 1089 /* 1090 * Records that new data landed in the AUX buffer part. 1091 * 1092 * struct { 1093 * struct perf_event_header header; 1094 * 1095 * u64 aux_offset; 1096 * u64 aux_size; 1097 * u64 flags; 1098 * struct sample_id sample_id; 1099 * }; 1100 */ 1101 PERF_RECORD_AUX = 11, 1102 1103 /* 1104 * Indicates that instruction trace has started 1105 * 1106 * struct { 1107 * struct perf_event_header header; 1108 * u32 pid; 1109 * u32 tid; 1110 * struct sample_id sample_id; 1111 * }; 1112 */ 1113 PERF_RECORD_ITRACE_START = 12, 1114 1115 /* 1116 * Records the dropped/lost sample number. 1117 * 1118 * struct { 1119 * struct perf_event_header header; 1120 * 1121 * u64 lost; 1122 * struct sample_id sample_id; 1123 * }; 1124 */ 1125 PERF_RECORD_LOST_SAMPLES = 13, 1126 1127 /* 1128 * Records a context switch in or out (flagged by 1129 * PERF_RECORD_MISC_SWITCH_OUT). See also 1130 * PERF_RECORD_SWITCH_CPU_WIDE. 1131 * 1132 * struct { 1133 * struct perf_event_header header; 1134 * struct sample_id sample_id; 1135 * }; 1136 */ 1137 PERF_RECORD_SWITCH = 14, 1138 1139 /* 1140 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and 1141 * next_prev_tid that are the next (switching out) or previous 1142 * (switching in) pid/tid. 1143 * 1144 * struct { 1145 * struct perf_event_header header; 1146 * u32 next_prev_pid; 1147 * u32 next_prev_tid; 1148 * struct sample_id sample_id; 1149 * }; 1150 */ 1151 PERF_RECORD_SWITCH_CPU_WIDE = 15, 1152 1153 /* 1154 * struct { 1155 * struct perf_event_header header; 1156 * u32 pid; 1157 * u32 tid; 1158 * u64 nr_namespaces; 1159 * { u64 dev, inode; } [nr_namespaces]; 1160 * struct sample_id sample_id; 1161 * }; 1162 */ 1163 PERF_RECORD_NAMESPACES = 16, 1164 1165 /* 1166 * Record ksymbol register/unregister events: 1167 * 1168 * struct { 1169 * struct perf_event_header header; 1170 * u64 addr; 1171 * u32 len; 1172 * u16 ksym_type; 1173 * u16 flags; 1174 * char name[]; 1175 * struct sample_id sample_id; 1176 * }; 1177 */ 1178 PERF_RECORD_KSYMBOL = 17, 1179 1180 /* 1181 * Record BPF events: 1182 * enum perf_bpf_event_type { 1183 * PERF_BPF_EVENT_UNKNOWN = 0, 1184 * PERF_BPF_EVENT_PROG_LOAD = 1, 1185 * PERF_BPF_EVENT_PROG_UNLOAD = 2, 1186 * }; 1187 * 1188 * struct { 1189 * struct perf_event_header header; 1190 * u16 type; 1191 * u16 flags; 1192 * u32 id; 1193 * u8 tag[BPF_TAG_SIZE]; 1194 * struct sample_id sample_id; 1195 * }; 1196 */ 1197 PERF_RECORD_BPF_EVENT = 18, 1198 1199 /* 1200 * struct { 1201 * struct perf_event_header header; 1202 * u64 id; 1203 * char path[]; 1204 * struct sample_id sample_id; 1205 * }; 1206 */ 1207 PERF_RECORD_CGROUP = 19, 1208 1209 /* 1210 * Records changes to kernel text i.e. self-modified code. 'old_len' is 1211 * the number of old bytes, 'new_len' is the number of new bytes. Either 1212 * 'old_len' or 'new_len' may be zero to indicate, for example, the 1213 * addition or removal of a trampoline. 'bytes' contains the old bytes 1214 * followed immediately by the new bytes. 1215 * 1216 * struct { 1217 * struct perf_event_header header; 1218 * u64 addr; 1219 * u16 old_len; 1220 * u16 new_len; 1221 * u8 bytes[]; 1222 * struct sample_id sample_id; 1223 * }; 1224 */ 1225 PERF_RECORD_TEXT_POKE = 20, 1226 1227 /* 1228 * Data written to the AUX area by hardware due to aux_output, may need 1229 * to be matched to the event by an architecture-specific hardware ID. 1230 * This records the hardware ID, but requires sample_id to provide the 1231 * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT 1232 * records from multiple events. 1233 * 1234 * struct { 1235 * struct perf_event_header header; 1236 * u64 hw_id; 1237 * struct sample_id sample_id; 1238 * }; 1239 */ 1240 PERF_RECORD_AUX_OUTPUT_HW_ID = 21, 1241 1242 PERF_RECORD_MAX, /* non-ABI */ 1243 }; 1244 1245 enum perf_record_ksymbol_type { 1246 PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, 1247 PERF_RECORD_KSYMBOL_TYPE_BPF = 1, 1248 /* 1249 * Out of line code such as kprobe-replaced instructions or optimized 1250 * kprobes or ftrace trampolines. 1251 */ 1252 PERF_RECORD_KSYMBOL_TYPE_OOL = 2, 1253 PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */ 1254 }; 1255 1256 #define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0) 1257 1258 enum perf_bpf_event_type { 1259 PERF_BPF_EVENT_UNKNOWN = 0, 1260 PERF_BPF_EVENT_PROG_LOAD = 1, 1261 PERF_BPF_EVENT_PROG_UNLOAD = 2, 1262 PERF_BPF_EVENT_MAX, /* non-ABI */ 1263 }; 1264 1265 #define PERF_MAX_STACK_DEPTH 127 1266 #define PERF_MAX_CONTEXTS_PER_STACK 8 1267 1268 enum perf_callchain_context { 1269 PERF_CONTEXT_HV = (__u64)-32, 1270 PERF_CONTEXT_KERNEL = (__u64)-128, 1271 PERF_CONTEXT_USER = (__u64)-512, 1272 1273 PERF_CONTEXT_GUEST = (__u64)-2048, 1274 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, 1275 PERF_CONTEXT_GUEST_USER = (__u64)-2560, 1276 1277 PERF_CONTEXT_MAX = (__u64)-4095, 1278 }; 1279 1280 /** 1281 * PERF_RECORD_AUX::flags bits 1282 */ 1283 #define PERF_AUX_FLAG_TRUNCATED 0x0001 /* Record was truncated to fit */ 1284 #define PERF_AUX_FLAG_OVERWRITE 0x0002 /* Snapshot from overwrite mode */ 1285 #define PERF_AUX_FLAG_PARTIAL 0x0004 /* Record contains gaps */ 1286 #define PERF_AUX_FLAG_COLLISION 0x0008 /* Sample collided with another */ 1287 #define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */ 1288 1289 /* CoreSight PMU AUX buffer formats */ 1290 #define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */ 1291 #define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */ 1292 1293 #define PERF_FLAG_FD_NO_GROUP (1UL << 0) 1294 #define PERF_FLAG_FD_OUTPUT (1UL << 1) 1295 #define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup ID, per-CPU mode only */ 1296 #define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */ 1297 1298 #if defined(__LITTLE_ENDIAN_BITFIELD) 1299 union perf_mem_data_src { 1300 __u64 val; 1301 struct { 1302 __u64 mem_op : 5, /* Type of opcode */ 1303 mem_lvl : 14, /* Memory hierarchy level */ 1304 mem_snoop : 5, /* Snoop mode */ 1305 mem_lock : 2, /* Lock instr */ 1306 mem_dtlb : 7, /* TLB access */ 1307 mem_lvl_num : 4, /* Memory hierarchy level number */ 1308 mem_remote : 1, /* Remote */ 1309 mem_snoopx : 2, /* Snoop mode, ext */ 1310 mem_blk : 3, /* Access blocked */ 1311 mem_hops : 3, /* Hop level */ 1312 mem_rsvd : 18; 1313 }; 1314 }; 1315 #elif defined(__BIG_ENDIAN_BITFIELD) 1316 union perf_mem_data_src { 1317 __u64 val; 1318 struct { 1319 __u64 mem_rsvd : 18, 1320 mem_hops : 3, /* Hop level */ 1321 mem_blk : 3, /* Access blocked */ 1322 mem_snoopx : 2, /* Snoop mode, ext */ 1323 mem_remote : 1, /* Remote */ 1324 mem_lvl_num : 4, /* Memory hierarchy level number */ 1325 mem_dtlb : 7, /* TLB access */ 1326 mem_lock : 2, /* Lock instr */ 1327 mem_snoop : 5, /* Snoop mode */ 1328 mem_lvl : 14, /* Memory hierarchy level */ 1329 mem_op : 5; /* Type of opcode */ 1330 }; 1331 }; 1332 #else 1333 # error "Unknown endianness" 1334 #endif 1335 1336 /* Type of memory opcode: */ 1337 #define PERF_MEM_OP_NA 0x0001 /* Not available */ 1338 #define PERF_MEM_OP_LOAD 0x0002 /* Load instruction */ 1339 #define PERF_MEM_OP_STORE 0x0004 /* Store instruction */ 1340 #define PERF_MEM_OP_PFETCH 0x0008 /* Prefetch */ 1341 #define PERF_MEM_OP_EXEC 0x0010 /* Code (execution) */ 1342 #define PERF_MEM_OP_SHIFT 0 1343 1344 /* 1345 * The PERF_MEM_LVL_* namespace is being deprecated to some extent in 1346 * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields. 1347 * We support this namespace in order to not break defined ABIs. 1348 * 1349 * Memory hierarchy (memory level, hit or miss) 1350 */ 1351 #define PERF_MEM_LVL_NA 0x0001 /* Not available */ 1352 #define PERF_MEM_LVL_HIT 0x0002 /* Hit level */ 1353 #define PERF_MEM_LVL_MISS 0x0004 /* Miss level */ 1354 #define PERF_MEM_LVL_L1 0x0008 /* L1 */ 1355 #define PERF_MEM_LVL_LFB 0x0010 /* Line Fill Buffer */ 1356 #define PERF_MEM_LVL_L2 0x0020 /* L2 */ 1357 #define PERF_MEM_LVL_L3 0x0040 /* L3 */ 1358 #define PERF_MEM_LVL_LOC_RAM 0x0080 /* Local DRAM */ 1359 #define PERF_MEM_LVL_REM_RAM1 0x0100 /* Remote DRAM (1 hop) */ 1360 #define PERF_MEM_LVL_REM_RAM2 0x0200 /* Remote DRAM (2 hops) */ 1361 #define PERF_MEM_LVL_REM_CCE1 0x0400 /* Remote Cache (1 hop) */ 1362 #define PERF_MEM_LVL_REM_CCE2 0x0800 /* Remote Cache (2 hops) */ 1363 #define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ 1364 #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ 1365 #define PERF_MEM_LVL_SHIFT 5 1366 1367 #define PERF_MEM_REMOTE_REMOTE 0x0001 /* Remote */ 1368 #define PERF_MEM_REMOTE_SHIFT 37 1369 1370 #define PERF_MEM_LVLNUM_L1 0x0001 /* L1 */ 1371 #define PERF_MEM_LVLNUM_L2 0x0002 /* L2 */ 1372 #define PERF_MEM_LVLNUM_L3 0x0003 /* L3 */ 1373 #define PERF_MEM_LVLNUM_L4 0x0004 /* L4 */ 1374 #define PERF_MEM_LVLNUM_L2_MHB 0x0005 /* L2 Miss Handling Buffer */ 1375 #define PERF_MEM_LVLNUM_MSC 0x0006 /* Memory-side Cache */ 1376 /* 0x007 available */ 1377 #define PERF_MEM_LVLNUM_UNC 0x0008 /* Uncached */ 1378 #define PERF_MEM_LVLNUM_CXL 0x0009 /* CXL */ 1379 #define PERF_MEM_LVLNUM_IO 0x000a /* I/O */ 1380 #define PERF_MEM_LVLNUM_ANY_CACHE 0x000b /* Any cache */ 1381 #define PERF_MEM_LVLNUM_LFB 0x000c /* LFB / L1 Miss Handling Buffer */ 1382 #define PERF_MEM_LVLNUM_RAM 0x000d /* RAM */ 1383 #define PERF_MEM_LVLNUM_PMEM 0x000e /* PMEM */ 1384 #define PERF_MEM_LVLNUM_NA 0x000f /* N/A */ 1385 1386 #define PERF_MEM_LVLNUM_SHIFT 33 1387 1388 /* Snoop mode */ 1389 #define PERF_MEM_SNOOP_NA 0x0001 /* Not available */ 1390 #define PERF_MEM_SNOOP_NONE 0x0002 /* No snoop */ 1391 #define PERF_MEM_SNOOP_HIT 0x0004 /* Snoop hit */ 1392 #define PERF_MEM_SNOOP_MISS 0x0008 /* Snoop miss */ 1393 #define PERF_MEM_SNOOP_HITM 0x0010 /* Snoop hit modified */ 1394 #define PERF_MEM_SNOOP_SHIFT 19 1395 1396 #define PERF_MEM_SNOOPX_FWD 0x0001 /* Forward */ 1397 #define PERF_MEM_SNOOPX_PEER 0x0002 /* Transfer from peer */ 1398 #define PERF_MEM_SNOOPX_SHIFT 38 1399 1400 /* Locked instruction */ 1401 #define PERF_MEM_LOCK_NA 0x0001 /* Not available */ 1402 #define PERF_MEM_LOCK_LOCKED 0x0002 /* Locked transaction */ 1403 #define PERF_MEM_LOCK_SHIFT 24 1404 1405 /* TLB access */ 1406 #define PERF_MEM_TLB_NA 0x0001 /* Not available */ 1407 #define PERF_MEM_TLB_HIT 0x0002 /* Hit level */ 1408 #define PERF_MEM_TLB_MISS 0x0004 /* Miss level */ 1409 #define PERF_MEM_TLB_L1 0x0008 /* L1 */ 1410 #define PERF_MEM_TLB_L2 0x0010 /* L2 */ 1411 #define PERF_MEM_TLB_WK 0x0020 /* Hardware Walker*/ 1412 #define PERF_MEM_TLB_OS 0x0040 /* OS fault handler */ 1413 #define PERF_MEM_TLB_SHIFT 26 1414 1415 /* Access blocked */ 1416 #define PERF_MEM_BLK_NA 0x0001 /* Not available */ 1417 #define PERF_MEM_BLK_DATA 0x0002 /* Data could not be forwarded */ 1418 #define PERF_MEM_BLK_ADDR 0x0004 /* Address conflict */ 1419 #define PERF_MEM_BLK_SHIFT 40 1420 1421 /* Hop level */ 1422 #define PERF_MEM_HOPS_0 0x0001 /* Remote core, same node */ 1423 #define PERF_MEM_HOPS_1 0x0002 /* Remote node, same socket */ 1424 #define PERF_MEM_HOPS_2 0x0003 /* Remote socket, same board */ 1425 #define PERF_MEM_HOPS_3 0x0004 /* Remote board */ 1426 /* 5-7 available */ 1427 #define PERF_MEM_HOPS_SHIFT 43 1428 1429 #define PERF_MEM_S(a, s) \ 1430 (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) 1431 1432 /* 1433 * Layout of single taken branch records: 1434 * 1435 * from: source instruction (may not always be a branch insn) 1436 * to: branch target 1437 * mispred: branch target was mispredicted 1438 * predicted: branch target was predicted 1439 * 1440 * support for mispred, predicted is optional. In case it 1441 * is not supported mispred = predicted = 0. 1442 * 1443 * in_tx: running in a hardware transaction 1444 * abort: aborting a hardware transaction 1445 * cycles: cycles from last branch (or 0 if not supported) 1446 * type: branch type 1447 * spec: branch speculation info (or 0 if not supported) 1448 */ 1449 struct perf_branch_entry { 1450 __u64 from; 1451 __u64 to; 1452 __u64 mispred : 1, /* target mispredicted */ 1453 predicted : 1, /* target predicted */ 1454 in_tx : 1, /* in transaction */ 1455 abort : 1, /* transaction abort */ 1456 cycles : 16, /* cycle count to last branch */ 1457 type : 4, /* branch type */ 1458 spec : 2, /* branch speculation info */ 1459 new_type : 4, /* additional branch type */ 1460 priv : 3, /* privilege level */ 1461 reserved : 31; 1462 }; 1463 1464 /* Size of used info bits in struct perf_branch_entry */ 1465 #define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33 1466 1467 union perf_sample_weight { 1468 __u64 full; 1469 #if defined(__LITTLE_ENDIAN_BITFIELD) 1470 struct { 1471 __u32 var1_dw; 1472 __u16 var2_w; 1473 __u16 var3_w; 1474 }; 1475 #elif defined(__BIG_ENDIAN_BITFIELD) 1476 struct { 1477 __u16 var3_w; 1478 __u16 var2_w; 1479 __u32 var1_dw; 1480 }; 1481 #else 1482 # error "Unknown endianness" 1483 #endif 1484 }; 1485 1486 #endif /* _UAPI_LINUX_PERF_EVENT_H */ 1487