1e2be04c7SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2607ca46eSDavid Howells /* 3607ca46eSDavid Howells * Performance events: 4607ca46eSDavid Howells * 5607ca46eSDavid Howells * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 6607ca46eSDavid Howells * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar 7607ca46eSDavid Howells * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra 8607ca46eSDavid Howells * 9607ca46eSDavid Howells * Data type definitions, declarations, prototypes. 10607ca46eSDavid Howells * 11607ca46eSDavid Howells * Started by: Thomas Gleixner and Ingo Molnar 12607ca46eSDavid Howells * 13607ca46eSDavid Howells * For licencing details see kernel-base/COPYING 14607ca46eSDavid Howells */ 15607ca46eSDavid Howells #ifndef _UAPI_LINUX_PERF_EVENT_H 16607ca46eSDavid Howells #define _UAPI_LINUX_PERF_EVENT_H 17607ca46eSDavid Howells 18607ca46eSDavid Howells #include <linux/types.h> 19607ca46eSDavid Howells #include <linux/ioctl.h> 20607ca46eSDavid Howells #include <asm/byteorder.h> 21607ca46eSDavid Howells 22607ca46eSDavid Howells /* 23607ca46eSDavid Howells * User-space ABI bits: 24607ca46eSDavid Howells */ 25607ca46eSDavid Howells 26607ca46eSDavid Howells /* 27607ca46eSDavid Howells * attr.type 28607ca46eSDavid Howells */ 29607ca46eSDavid Howells enum perf_type_id { 30607ca46eSDavid Howells PERF_TYPE_HARDWARE = 0, 31607ca46eSDavid Howells PERF_TYPE_SOFTWARE = 1, 32607ca46eSDavid Howells PERF_TYPE_TRACEPOINT = 2, 33607ca46eSDavid Howells PERF_TYPE_HW_CACHE = 3, 34607ca46eSDavid Howells PERF_TYPE_RAW = 4, 35607ca46eSDavid Howells PERF_TYPE_BREAKPOINT = 5, 36607ca46eSDavid Howells 37607ca46eSDavid Howells PERF_TYPE_MAX, /* non-ABI */ 38607ca46eSDavid Howells }; 39607ca46eSDavid Howells 40607ca46eSDavid Howells /* 41607ca46eSDavid Howells * Generalized performance event event_id types, used by the 42607ca46eSDavid Howells * attr.event_id parameter of the sys_perf_event_open() 43607ca46eSDavid Howells * syscall: 44607ca46eSDavid Howells */ 45607ca46eSDavid Howells enum perf_hw_id { 46607ca46eSDavid Howells /* 47607ca46eSDavid Howells * Common hardware events, generalized by the kernel: 48607ca46eSDavid Howells */ 49607ca46eSDavid Howells PERF_COUNT_HW_CPU_CYCLES = 0, 50607ca46eSDavid Howells PERF_COUNT_HW_INSTRUCTIONS = 1, 51607ca46eSDavid Howells PERF_COUNT_HW_CACHE_REFERENCES = 2, 52607ca46eSDavid Howells PERF_COUNT_HW_CACHE_MISSES = 3, 53607ca46eSDavid Howells PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, 54607ca46eSDavid Howells PERF_COUNT_HW_BRANCH_MISSES = 5, 55607ca46eSDavid Howells PERF_COUNT_HW_BUS_CYCLES = 6, 56607ca46eSDavid Howells PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, 57607ca46eSDavid Howells PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, 58607ca46eSDavid Howells PERF_COUNT_HW_REF_CPU_CYCLES = 9, 59607ca46eSDavid Howells 60607ca46eSDavid Howells PERF_COUNT_HW_MAX, /* non-ABI */ 61607ca46eSDavid Howells }; 62607ca46eSDavid Howells 63607ca46eSDavid Howells /* 64607ca46eSDavid Howells * Generalized hardware cache events: 65607ca46eSDavid Howells * 66607ca46eSDavid Howells * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x 67607ca46eSDavid Howells * { read, write, prefetch } x 68607ca46eSDavid Howells * { accesses, misses } 69607ca46eSDavid Howells */ 70607ca46eSDavid Howells enum perf_hw_cache_id { 71607ca46eSDavid Howells PERF_COUNT_HW_CACHE_L1D = 0, 72607ca46eSDavid Howells PERF_COUNT_HW_CACHE_L1I = 1, 73607ca46eSDavid Howells PERF_COUNT_HW_CACHE_LL = 2, 74607ca46eSDavid Howells PERF_COUNT_HW_CACHE_DTLB = 3, 75607ca46eSDavid Howells PERF_COUNT_HW_CACHE_ITLB = 4, 76607ca46eSDavid Howells PERF_COUNT_HW_CACHE_BPU = 5, 77607ca46eSDavid Howells PERF_COUNT_HW_CACHE_NODE = 6, 78607ca46eSDavid Howells 79607ca46eSDavid Howells PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ 80607ca46eSDavid Howells }; 81607ca46eSDavid Howells 82607ca46eSDavid Howells enum perf_hw_cache_op_id { 83607ca46eSDavid Howells PERF_COUNT_HW_CACHE_OP_READ = 0, 84607ca46eSDavid Howells PERF_COUNT_HW_CACHE_OP_WRITE = 1, 85607ca46eSDavid Howells PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, 86607ca46eSDavid Howells 87607ca46eSDavid Howells PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ 88607ca46eSDavid Howells }; 89607ca46eSDavid Howells 90607ca46eSDavid Howells enum perf_hw_cache_op_result_id { 91607ca46eSDavid Howells PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, 92607ca46eSDavid Howells PERF_COUNT_HW_CACHE_RESULT_MISS = 1, 93607ca46eSDavid Howells 94607ca46eSDavid Howells PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ 95607ca46eSDavid Howells }; 96607ca46eSDavid Howells 97607ca46eSDavid Howells /* 98607ca46eSDavid Howells * Special "software" events provided by the kernel, even if the hardware 99607ca46eSDavid Howells * does not support performance events. These events measure various 100607ca46eSDavid Howells * physical and sw events of the kernel (and allow the profiling of them as 101607ca46eSDavid Howells * well): 102607ca46eSDavid Howells */ 103607ca46eSDavid Howells enum perf_sw_ids { 104607ca46eSDavid Howells PERF_COUNT_SW_CPU_CLOCK = 0, 105607ca46eSDavid Howells PERF_COUNT_SW_TASK_CLOCK = 1, 106607ca46eSDavid Howells PERF_COUNT_SW_PAGE_FAULTS = 2, 107607ca46eSDavid Howells PERF_COUNT_SW_CONTEXT_SWITCHES = 3, 108607ca46eSDavid Howells PERF_COUNT_SW_CPU_MIGRATIONS = 4, 109607ca46eSDavid Howells PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, 110607ca46eSDavid Howells PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, 111607ca46eSDavid Howells PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, 112607ca46eSDavid Howells PERF_COUNT_SW_EMULATION_FAULTS = 8, 113fa0097eeSAdrian Hunter PERF_COUNT_SW_DUMMY = 9, 114a43eec30SAlexei Starovoitov PERF_COUNT_SW_BPF_OUTPUT = 10, 115607ca46eSDavid Howells 116607ca46eSDavid Howells PERF_COUNT_SW_MAX, /* non-ABI */ 117607ca46eSDavid Howells }; 118607ca46eSDavid Howells 119607ca46eSDavid Howells /* 120607ca46eSDavid Howells * Bits that can be set in attr.sample_type to request information 121607ca46eSDavid Howells * in the overflow packets. 122607ca46eSDavid Howells */ 123607ca46eSDavid Howells enum perf_event_sample_format { 124607ca46eSDavid Howells PERF_SAMPLE_IP = 1U << 0, 125607ca46eSDavid Howells PERF_SAMPLE_TID = 1U << 1, 126607ca46eSDavid Howells PERF_SAMPLE_TIME = 1U << 2, 127607ca46eSDavid Howells PERF_SAMPLE_ADDR = 1U << 3, 128607ca46eSDavid Howells PERF_SAMPLE_READ = 1U << 4, 129607ca46eSDavid Howells PERF_SAMPLE_CALLCHAIN = 1U << 5, 130607ca46eSDavid Howells PERF_SAMPLE_ID = 1U << 6, 131607ca46eSDavid Howells PERF_SAMPLE_CPU = 1U << 7, 132607ca46eSDavid Howells PERF_SAMPLE_PERIOD = 1U << 8, 133607ca46eSDavid Howells PERF_SAMPLE_STREAM_ID = 1U << 9, 134607ca46eSDavid Howells PERF_SAMPLE_RAW = 1U << 10, 135607ca46eSDavid Howells PERF_SAMPLE_BRANCH_STACK = 1U << 11, 136607ca46eSDavid Howells PERF_SAMPLE_REGS_USER = 1U << 12, 137607ca46eSDavid Howells PERF_SAMPLE_STACK_USER = 1U << 13, 138c3feedf2SAndi Kleen PERF_SAMPLE_WEIGHT = 1U << 14, 139d6be9ad6SStephane Eranian PERF_SAMPLE_DATA_SRC = 1U << 15, 140ff3d527cSAdrian Hunter PERF_SAMPLE_IDENTIFIER = 1U << 16, 141fdfbbd07SAndi Kleen PERF_SAMPLE_TRANSACTION = 1U << 17, 14260e2364eSStephane Eranian PERF_SAMPLE_REGS_INTR = 1U << 18, 143fc7ce9c7SKan Liang PERF_SAMPLE_PHYS_ADDR = 1U << 19, 144607ca46eSDavid Howells 145fc7ce9c7SKan Liang PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */ 1466cbc304fSPeter Zijlstra 14709121255SPeter Zijlstra __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */ 148607ca46eSDavid Howells }; 149607ca46eSDavid Howells 150607ca46eSDavid Howells /* 151607ca46eSDavid Howells * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set 152607ca46eSDavid Howells * 153607ca46eSDavid Howells * If the user does not pass priv level information via branch_sample_type, 154607ca46eSDavid Howells * the kernel uses the event's priv level. Branch and event priv levels do 155607ca46eSDavid Howells * not have to match. Branch priv level is checked for permissions. 156607ca46eSDavid Howells * 157607ca46eSDavid Howells * The branch types can be combined, however BRANCH_ANY covers all types 158607ca46eSDavid Howells * of branches and therefore it supersedes all the other types. 159607ca46eSDavid Howells */ 16027ac905bSYan, Zheng enum perf_branch_sample_type_shift { 16127ac905bSYan, Zheng PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */ 16227ac905bSYan, Zheng PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */ 16327ac905bSYan, Zheng PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */ 16427ac905bSYan, Zheng 16527ac905bSYan, Zheng PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */ 16627ac905bSYan, Zheng PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */ 16727ac905bSYan, Zheng PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */ 16827ac905bSYan, Zheng PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */ 16927ac905bSYan, Zheng PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */ 17027ac905bSYan, Zheng PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */ 17127ac905bSYan, Zheng PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ 17227ac905bSYan, Zheng PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */ 17327ac905bSYan, Zheng 1742c44b193SPeter Zijlstra PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */ 175c9fdfa14SStephane Eranian PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */ 176c229bf9dSStephane Eranian PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */ 1772c44b193SPeter Zijlstra 178b16a5b52SAndi Kleen PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */ 179b16a5b52SAndi Kleen PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */ 180b16a5b52SAndi Kleen 181eb0baf8aSJin Yao PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */ 182eb0baf8aSJin Yao 18327ac905bSYan, Zheng PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ 18427ac905bSYan, Zheng }; 18527ac905bSYan, Zheng 186607ca46eSDavid Howells enum perf_branch_sample_type { 18727ac905bSYan, Zheng PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, 18827ac905bSYan, Zheng PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, 18927ac905bSYan, Zheng PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, 190607ca46eSDavid Howells 19127ac905bSYan, Zheng PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, 1922c44b193SPeter Zijlstra PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, 1932c44b193SPeter Zijlstra PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, 1942c44b193SPeter Zijlstra PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, 1952c44b193SPeter Zijlstra PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, 19627ac905bSYan, Zheng PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, 19727ac905bSYan, Zheng PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, 19827ac905bSYan, Zheng PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, 199607ca46eSDavid Howells 2002c44b193SPeter Zijlstra PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, 201c9fdfa14SStephane Eranian PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, 202c229bf9dSStephane Eranian PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, 2032c44b193SPeter Zijlstra 204b16a5b52SAndi Kleen PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, 205b16a5b52SAndi Kleen PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, 206b16a5b52SAndi Kleen 207eb0baf8aSJin Yao PERF_SAMPLE_BRANCH_TYPE_SAVE = 208eb0baf8aSJin Yao 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, 209eb0baf8aSJin Yao 21027ac905bSYan, Zheng PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, 211607ca46eSDavid Howells }; 212607ca46eSDavid Howells 213eb0baf8aSJin Yao /* 214eb0baf8aSJin Yao * Common flow change classification 215eb0baf8aSJin Yao */ 216eb0baf8aSJin Yao enum { 217eb0baf8aSJin Yao PERF_BR_UNKNOWN = 0, /* unknown */ 218eb0baf8aSJin Yao PERF_BR_COND = 1, /* conditional */ 219eb0baf8aSJin Yao PERF_BR_UNCOND = 2, /* unconditional */ 220eb0baf8aSJin Yao PERF_BR_IND = 3, /* indirect */ 221eb0baf8aSJin Yao PERF_BR_CALL = 4, /* function call */ 222eb0baf8aSJin Yao PERF_BR_IND_CALL = 5, /* indirect function call */ 223eb0baf8aSJin Yao PERF_BR_RET = 6, /* function return */ 224eb0baf8aSJin Yao PERF_BR_SYSCALL = 7, /* syscall */ 225eb0baf8aSJin Yao PERF_BR_SYSRET = 8, /* syscall return */ 226eb0baf8aSJin Yao PERF_BR_COND_CALL = 9, /* conditional function call */ 227eb0baf8aSJin Yao PERF_BR_COND_RET = 10, /* conditional function return */ 228eb0baf8aSJin Yao PERF_BR_MAX, 229eb0baf8aSJin Yao }; 230eb0baf8aSJin Yao 231607ca46eSDavid Howells #define PERF_SAMPLE_BRANCH_PLM_ALL \ 232607ca46eSDavid Howells (PERF_SAMPLE_BRANCH_USER|\ 233607ca46eSDavid Howells PERF_SAMPLE_BRANCH_KERNEL|\ 234607ca46eSDavid Howells PERF_SAMPLE_BRANCH_HV) 235607ca46eSDavid Howells 236607ca46eSDavid Howells /* 237607ca46eSDavid Howells * Values to determine ABI of the registers dump. 238607ca46eSDavid Howells */ 239607ca46eSDavid Howells enum perf_sample_regs_abi { 240607ca46eSDavid Howells PERF_SAMPLE_REGS_ABI_NONE = 0, 241607ca46eSDavid Howells PERF_SAMPLE_REGS_ABI_32 = 1, 242607ca46eSDavid Howells PERF_SAMPLE_REGS_ABI_64 = 2, 243607ca46eSDavid Howells }; 244607ca46eSDavid Howells 245607ca46eSDavid Howells /* 246fdfbbd07SAndi Kleen * Values for the memory transaction event qualifier, mostly for 247fdfbbd07SAndi Kleen * abort events. Multiple bits can be set. 248fdfbbd07SAndi Kleen */ 249fdfbbd07SAndi Kleen enum { 250fdfbbd07SAndi Kleen PERF_TXN_ELISION = (1 << 0), /* From elision */ 251fdfbbd07SAndi Kleen PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ 252fdfbbd07SAndi Kleen PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ 253fdfbbd07SAndi Kleen PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */ 254fdfbbd07SAndi Kleen PERF_TXN_RETRY = (1 << 4), /* Retry possible */ 255fdfbbd07SAndi Kleen PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ 256fdfbbd07SAndi Kleen PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ 257fdfbbd07SAndi Kleen PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ 258fdfbbd07SAndi Kleen 259fdfbbd07SAndi Kleen PERF_TXN_MAX = (1 << 8), /* non-ABI */ 260fdfbbd07SAndi Kleen 261fdfbbd07SAndi Kleen /* bits 32..63 are reserved for the abort code */ 262fdfbbd07SAndi Kleen 263fdfbbd07SAndi Kleen PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), 264fdfbbd07SAndi Kleen PERF_TXN_ABORT_SHIFT = 32, 265fdfbbd07SAndi Kleen }; 266fdfbbd07SAndi Kleen 267fdfbbd07SAndi Kleen /* 268607ca46eSDavid Howells * The format of the data returned by read() on a perf event fd, 269607ca46eSDavid Howells * as specified by attr.read_format: 270607ca46eSDavid Howells * 271607ca46eSDavid Howells * struct read_format { 272607ca46eSDavid Howells * { u64 value; 273607ca46eSDavid Howells * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 274607ca46eSDavid Howells * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 275607ca46eSDavid Howells * { u64 id; } && PERF_FORMAT_ID 276607ca46eSDavid Howells * } && !PERF_FORMAT_GROUP 277607ca46eSDavid Howells * 278607ca46eSDavid Howells * { u64 nr; 279607ca46eSDavid Howells * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 280607ca46eSDavid Howells * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 281607ca46eSDavid Howells * { u64 value; 282607ca46eSDavid Howells * { u64 id; } && PERF_FORMAT_ID 283607ca46eSDavid Howells * } cntr[nr]; 284607ca46eSDavid Howells * } && PERF_FORMAT_GROUP 285607ca46eSDavid Howells * }; 286607ca46eSDavid Howells */ 287607ca46eSDavid Howells enum perf_event_read_format { 288607ca46eSDavid Howells PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 289607ca46eSDavid Howells PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 290607ca46eSDavid Howells PERF_FORMAT_ID = 1U << 2, 291607ca46eSDavid Howells PERF_FORMAT_GROUP = 1U << 3, 292607ca46eSDavid Howells 293607ca46eSDavid Howells PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ 294607ca46eSDavid Howells }; 295607ca46eSDavid Howells 296607ca46eSDavid Howells #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ 297607ca46eSDavid Howells #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ 298607ca46eSDavid Howells #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ 299607ca46eSDavid Howells #define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */ 300607ca46eSDavid Howells /* add: sample_stack_user */ 30160e2364eSStephane Eranian #define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */ 3021a594131SAlexander Shishkin #define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */ 303607ca46eSDavid Howells 304607ca46eSDavid Howells /* 305607ca46eSDavid Howells * Hardware event_id to monitor via a performance monitoring event: 30697c79a38SArnaldo Carvalho de Melo * 30797c79a38SArnaldo Carvalho de Melo * @sample_max_stack: Max number of frame pointers in a callchain, 30897c79a38SArnaldo Carvalho de Melo * should be < /proc/sys/kernel/perf_event_max_stack 309607ca46eSDavid Howells */ 310607ca46eSDavid Howells struct perf_event_attr { 311607ca46eSDavid Howells 312607ca46eSDavid Howells /* 313607ca46eSDavid Howells * Major type: hardware/software/tracepoint/etc. 314607ca46eSDavid Howells */ 315607ca46eSDavid Howells __u32 type; 316607ca46eSDavid Howells 317607ca46eSDavid Howells /* 318607ca46eSDavid Howells * Size of the attr structure, for fwd/bwd compat. 319607ca46eSDavid Howells */ 320607ca46eSDavid Howells __u32 size; 321607ca46eSDavid Howells 322607ca46eSDavid Howells /* 323607ca46eSDavid Howells * Type specific configuration information. 324607ca46eSDavid Howells */ 325607ca46eSDavid Howells __u64 config; 326607ca46eSDavid Howells 327607ca46eSDavid Howells union { 328607ca46eSDavid Howells __u64 sample_period; 329607ca46eSDavid Howells __u64 sample_freq; 330607ca46eSDavid Howells }; 331607ca46eSDavid Howells 332607ca46eSDavid Howells __u64 sample_type; 333607ca46eSDavid Howells __u64 read_format; 334607ca46eSDavid Howells 335607ca46eSDavid Howells __u64 disabled : 1, /* off by default */ 336607ca46eSDavid Howells inherit : 1, /* children inherit it */ 337607ca46eSDavid Howells pinned : 1, /* must always be on PMU */ 338607ca46eSDavid Howells exclusive : 1, /* only group on PMU */ 339607ca46eSDavid Howells exclude_user : 1, /* don't count user */ 340607ca46eSDavid Howells exclude_kernel : 1, /* ditto kernel */ 341607ca46eSDavid Howells exclude_hv : 1, /* ditto hypervisor */ 342607ca46eSDavid Howells exclude_idle : 1, /* don't count when idle */ 343607ca46eSDavid Howells mmap : 1, /* include mmap data */ 344607ca46eSDavid Howells comm : 1, /* include comm data */ 345607ca46eSDavid Howells freq : 1, /* use freq, not period */ 346607ca46eSDavid Howells inherit_stat : 1, /* per task counts */ 347607ca46eSDavid Howells enable_on_exec : 1, /* next exec enables */ 348607ca46eSDavid Howells task : 1, /* trace fork/exit */ 349607ca46eSDavid Howells watermark : 1, /* wakeup_watermark */ 350607ca46eSDavid Howells /* 351607ca46eSDavid Howells * precise_ip: 352607ca46eSDavid Howells * 353607ca46eSDavid Howells * 0 - SAMPLE_IP can have arbitrary skid 354607ca46eSDavid Howells * 1 - SAMPLE_IP must have constant skid 355607ca46eSDavid Howells * 2 - SAMPLE_IP requested to have 0 skid 356607ca46eSDavid Howells * 3 - SAMPLE_IP must have 0 skid 357607ca46eSDavid Howells * 358607ca46eSDavid Howells * See also PERF_RECORD_MISC_EXACT_IP 359607ca46eSDavid Howells */ 360607ca46eSDavid Howells precise_ip : 2, /* skid constraint */ 361607ca46eSDavid Howells mmap_data : 1, /* non-exec mmap data */ 362607ca46eSDavid Howells sample_id_all : 1, /* sample_type all events */ 363607ca46eSDavid Howells 364607ca46eSDavid Howells exclude_host : 1, /* don't count in host */ 365607ca46eSDavid Howells exclude_guest : 1, /* don't count in guest */ 366607ca46eSDavid Howells 367607ca46eSDavid Howells exclude_callchain_kernel : 1, /* exclude kernel callchains */ 368607ca46eSDavid Howells exclude_callchain_user : 1, /* exclude user callchains */ 36913d7a241SStephane Eranian mmap2 : 1, /* include mmap with inode data */ 37082b89778SAdrian Hunter comm_exec : 1, /* flag comm events that are due to an exec */ 37134f43927SPeter Zijlstra use_clockid : 1, /* use @clockid for time fields */ 37245ac1403SAdrian Hunter context_switch : 1, /* context switch data */ 3739ecda41aSWang Nan write_backward : 1, /* Write ring buffer from end to beginning */ 374e4222673SHari Bathini namespaces : 1, /* include namespaces data */ 375e4222673SHari Bathini __reserved_1 : 35; 376607ca46eSDavid Howells 377607ca46eSDavid Howells union { 378607ca46eSDavid Howells __u32 wakeup_events; /* wakeup every n events */ 379607ca46eSDavid Howells __u32 wakeup_watermark; /* bytes before wakeup */ 380607ca46eSDavid Howells }; 381607ca46eSDavid Howells 382607ca46eSDavid Howells __u32 bp_type; 383607ca46eSDavid Howells union { 384607ca46eSDavid Howells __u64 bp_addr; 38565074d43SSong Liu __u64 kprobe_func; /* for perf_kprobe */ 38665074d43SSong Liu __u64 uprobe_path; /* for perf_uprobe */ 387607ca46eSDavid Howells __u64 config1; /* extension of config */ 388607ca46eSDavid Howells }; 389607ca46eSDavid Howells union { 390607ca46eSDavid Howells __u64 bp_len; 39165074d43SSong Liu __u64 kprobe_addr; /* when kprobe_func == NULL */ 39265074d43SSong Liu __u64 probe_offset; /* for perf_[k,u]probe */ 393607ca46eSDavid Howells __u64 config2; /* extension of config1 */ 394607ca46eSDavid Howells }; 395607ca46eSDavid Howells __u64 branch_sample_type; /* enum perf_branch_sample_type */ 396607ca46eSDavid Howells 397607ca46eSDavid Howells /* 398607ca46eSDavid Howells * Defines set of user regs to dump on samples. 399607ca46eSDavid Howells * See asm/perf_regs.h for details. 400607ca46eSDavid Howells */ 401607ca46eSDavid Howells __u64 sample_regs_user; 402607ca46eSDavid Howells 403607ca46eSDavid Howells /* 404607ca46eSDavid Howells * Defines size of the user stack to dump on samples. 405607ca46eSDavid Howells */ 406607ca46eSDavid Howells __u32 sample_stack_user; 407607ca46eSDavid Howells 40834f43927SPeter Zijlstra __s32 clockid; 40960e2364eSStephane Eranian /* 41060e2364eSStephane Eranian * Defines set of regs to dump for each sample 41160e2364eSStephane Eranian * state captured on: 41260e2364eSStephane Eranian * - precise = 0: PMU interrupt 41360e2364eSStephane Eranian * - precise > 0: sampled instruction 41460e2364eSStephane Eranian * 41560e2364eSStephane Eranian * See asm/perf_regs.h for details. 41660e2364eSStephane Eranian */ 41760e2364eSStephane Eranian __u64 sample_regs_intr; 4181a594131SAlexander Shishkin 4191a594131SAlexander Shishkin /* 4201a594131SAlexander Shishkin * Wakeup watermark for AUX area 4211a594131SAlexander Shishkin */ 4221a594131SAlexander Shishkin __u32 aux_watermark; 42397c79a38SArnaldo Carvalho de Melo __u16 sample_max_stack; 42497c79a38SArnaldo Carvalho de Melo __u16 __reserved_2; /* align to __u64 */ 425607ca46eSDavid Howells }; 426607ca46eSDavid Howells 427f371b304SYonghong Song /* 428f371b304SYonghong Song * Structure used by below PERF_EVENT_IOC_QUERY_BPF command 429f371b304SYonghong Song * to query bpf programs attached to the same perf tracepoint 430f371b304SYonghong Song * as the given perf event. 431f371b304SYonghong Song */ 432f371b304SYonghong Song struct perf_event_query_bpf { 433f371b304SYonghong Song /* 434f371b304SYonghong Song * The below ids array length 435f371b304SYonghong Song */ 436f371b304SYonghong Song __u32 ids_len; 437f371b304SYonghong Song /* 438f371b304SYonghong Song * Set by the kernel to indicate the number of 439f371b304SYonghong Song * available programs 440f371b304SYonghong Song */ 441f371b304SYonghong Song __u32 prog_cnt; 442f371b304SYonghong Song /* 443f371b304SYonghong Song * User provided buffer to store program ids 444f371b304SYonghong Song */ 445f371b304SYonghong Song __u32 ids[0]; 446f371b304SYonghong Song }; 447f371b304SYonghong Song 448607ca46eSDavid Howells #define perf_flags(attr) (*(&(attr)->read_format + 1)) 449607ca46eSDavid Howells 450607ca46eSDavid Howells /* 451607ca46eSDavid Howells * Ioctls that can be done on a perf event fd: 452607ca46eSDavid Howells */ 453607ca46eSDavid Howells #define PERF_EVENT_IOC_ENABLE _IO ('$', 0) 454607ca46eSDavid Howells #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) 455607ca46eSDavid Howells #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) 456607ca46eSDavid Howells #define PERF_EVENT_IOC_RESET _IO ('$', 3) 457607ca46eSDavid Howells #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) 458607ca46eSDavid Howells #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) 459607ca46eSDavid Howells #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) 460a8e0108cSVince Weaver #define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *) 4612541517cSAlexei Starovoitov #define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32) 46286e7972fSWang Nan #define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32) 463f371b304SYonghong Song #define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *) 46432ff77e8SMilind Chabbi #define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW('$', 11, struct perf_event_attr *) 465607ca46eSDavid Howells 466607ca46eSDavid Howells enum perf_event_ioc_flags { 467607ca46eSDavid Howells PERF_IOC_FLAG_GROUP = 1U << 0, 468607ca46eSDavid Howells }; 469607ca46eSDavid Howells 470607ca46eSDavid Howells /* 471607ca46eSDavid Howells * Structure of the page that can be mapped via mmap 472607ca46eSDavid Howells */ 473607ca46eSDavid Howells struct perf_event_mmap_page { 474607ca46eSDavid Howells __u32 version; /* version number of this structure */ 475607ca46eSDavid Howells __u32 compat_version; /* lowest version this is compat with */ 476607ca46eSDavid Howells 477607ca46eSDavid Howells /* 478607ca46eSDavid Howells * Bits needed to read the hw events in user-space. 479607ca46eSDavid Howells * 480b438b1abSAndy Lutomirski * u32 seq, time_mult, time_shift, index, width; 481607ca46eSDavid Howells * u64 count, enabled, running; 482607ca46eSDavid Howells * u64 cyc, time_offset; 483607ca46eSDavid Howells * s64 pmc = 0; 484607ca46eSDavid Howells * 485607ca46eSDavid Howells * do { 486607ca46eSDavid Howells * seq = pc->lock; 487607ca46eSDavid Howells * barrier() 488607ca46eSDavid Howells * 489607ca46eSDavid Howells * enabled = pc->time_enabled; 490607ca46eSDavid Howells * running = pc->time_running; 491607ca46eSDavid Howells * 492607ca46eSDavid Howells * if (pc->cap_usr_time && enabled != running) { 493607ca46eSDavid Howells * cyc = rdtsc(); 494607ca46eSDavid Howells * time_offset = pc->time_offset; 495607ca46eSDavid Howells * time_mult = pc->time_mult; 496607ca46eSDavid Howells * time_shift = pc->time_shift; 497607ca46eSDavid Howells * } 498607ca46eSDavid Howells * 499b438b1abSAndy Lutomirski * index = pc->index; 500607ca46eSDavid Howells * count = pc->offset; 501b438b1abSAndy Lutomirski * if (pc->cap_user_rdpmc && index) { 502607ca46eSDavid Howells * width = pc->pmc_width; 503b438b1abSAndy Lutomirski * pmc = rdpmc(index - 1); 504607ca46eSDavid Howells * } 505607ca46eSDavid Howells * 506607ca46eSDavid Howells * barrier(); 507607ca46eSDavid Howells * } while (pc->lock != seq); 508607ca46eSDavid Howells * 509607ca46eSDavid Howells * NOTE: for obvious reason this only works on self-monitoring 510607ca46eSDavid Howells * processes. 511607ca46eSDavid Howells */ 512607ca46eSDavid Howells __u32 lock; /* seqlock for synchronization */ 513607ca46eSDavid Howells __u32 index; /* hardware event identifier */ 514607ca46eSDavid Howells __s64 offset; /* add to hardware event value */ 515607ca46eSDavid Howells __u64 time_enabled; /* time event active */ 516607ca46eSDavid Howells __u64 time_running; /* time event on cpu */ 517607ca46eSDavid Howells union { 518607ca46eSDavid Howells __u64 capabilities; 519860f085bSAdrian Hunter struct { 520fa731587SPeter Zijlstra __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */ 521fa731587SPeter Zijlstra cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */ 522fa731587SPeter Zijlstra 523fa731587SPeter Zijlstra cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */ 524fa731587SPeter Zijlstra cap_user_time : 1, /* The time_* fields are used */ 525fa731587SPeter Zijlstra cap_user_time_zero : 1, /* The time_zero field is used */ 526fa731587SPeter Zijlstra cap_____res : 59; 527607ca46eSDavid Howells }; 528860f085bSAdrian Hunter }; 529607ca46eSDavid Howells 530607ca46eSDavid Howells /* 531b438b1abSAndy Lutomirski * If cap_user_rdpmc this field provides the bit-width of the value 532607ca46eSDavid Howells * read using the rdpmc() or equivalent instruction. This can be used 533607ca46eSDavid Howells * to sign extend the result like: 534607ca46eSDavid Howells * 535607ca46eSDavid Howells * pmc <<= 64 - width; 536607ca46eSDavid Howells * pmc >>= 64 - width; // signed shift right 537607ca46eSDavid Howells * count += pmc; 538607ca46eSDavid Howells */ 539607ca46eSDavid Howells __u16 pmc_width; 540607ca46eSDavid Howells 541607ca46eSDavid Howells /* 542607ca46eSDavid Howells * If cap_usr_time the below fields can be used to compute the time 543607ca46eSDavid Howells * delta since time_enabled (in ns) using rdtsc or similar. 544607ca46eSDavid Howells * 545607ca46eSDavid Howells * u64 quot, rem; 546607ca46eSDavid Howells * u64 delta; 547607ca46eSDavid Howells * 548607ca46eSDavid Howells * quot = (cyc >> time_shift); 549b9511cd7SAdrian Hunter * rem = cyc & (((u64)1 << time_shift) - 1); 550607ca46eSDavid Howells * delta = time_offset + quot * time_mult + 551607ca46eSDavid Howells * ((rem * time_mult) >> time_shift); 552607ca46eSDavid Howells * 553607ca46eSDavid Howells * Where time_offset,time_mult,time_shift and cyc are read in the 554607ca46eSDavid Howells * seqcount loop described above. This delta can then be added to 555b438b1abSAndy Lutomirski * enabled and possible running (if index), improving the scaling: 556607ca46eSDavid Howells * 557607ca46eSDavid Howells * enabled += delta; 558b438b1abSAndy Lutomirski * if (index) 559607ca46eSDavid Howells * running += delta; 560607ca46eSDavid Howells * 561607ca46eSDavid Howells * quot = count / running; 562607ca46eSDavid Howells * rem = count % running; 563607ca46eSDavid Howells * count = quot * enabled + (rem * enabled) / running; 564607ca46eSDavid Howells */ 565607ca46eSDavid Howells __u16 time_shift; 566607ca46eSDavid Howells __u32 time_mult; 567607ca46eSDavid Howells __u64 time_offset; 568c73deb6aSAdrian Hunter /* 569c73deb6aSAdrian Hunter * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated 570c73deb6aSAdrian Hunter * from sample timestamps. 571c73deb6aSAdrian Hunter * 572c73deb6aSAdrian Hunter * time = timestamp - time_zero; 573c73deb6aSAdrian Hunter * quot = time / time_mult; 574c73deb6aSAdrian Hunter * rem = time % time_mult; 575c73deb6aSAdrian Hunter * cyc = (quot << time_shift) + (rem << time_shift) / time_mult; 576c73deb6aSAdrian Hunter * 577c73deb6aSAdrian Hunter * And vice versa: 578c73deb6aSAdrian Hunter * 579c73deb6aSAdrian Hunter * quot = cyc >> time_shift; 580b9511cd7SAdrian Hunter * rem = cyc & (((u64)1 << time_shift) - 1); 581c73deb6aSAdrian Hunter * timestamp = time_zero + quot * time_mult + 582c73deb6aSAdrian Hunter * ((rem * time_mult) >> time_shift); 583c73deb6aSAdrian Hunter */ 584c73deb6aSAdrian Hunter __u64 time_zero; 585fa731587SPeter Zijlstra __u32 size; /* Header size up to __reserved[] fields. */ 586607ca46eSDavid Howells 587607ca46eSDavid Howells /* 588607ca46eSDavid Howells * Hole for extension of the self monitor capabilities 589607ca46eSDavid Howells */ 590607ca46eSDavid Howells 591fa731587SPeter Zijlstra __u8 __reserved[118*8+4]; /* align to 1k. */ 592607ca46eSDavid Howells 593607ca46eSDavid Howells /* 594607ca46eSDavid Howells * Control data for the mmap() data buffer. 595607ca46eSDavid Howells * 596bf378d34SPeter Zijlstra * User-space reading the @data_head value should issue an smp_rmb(), 597bf378d34SPeter Zijlstra * after reading this value. 598607ca46eSDavid Howells * 599607ca46eSDavid Howells * When the mapping is PROT_WRITE the @data_tail value should be 600bf378d34SPeter Zijlstra * written by userspace to reflect the last read data, after issueing 601bf378d34SPeter Zijlstra * an smp_mb() to separate the data read from the ->data_tail store. 602bf378d34SPeter Zijlstra * In this case the kernel will not over-write unread data. 603bf378d34SPeter Zijlstra * 604bf378d34SPeter Zijlstra * See perf_output_put_handle() for the data ordering. 605e8c6deacSAlexander Shishkin * 606e8c6deacSAlexander Shishkin * data_{offset,size} indicate the location and size of the perf record 607e8c6deacSAlexander Shishkin * buffer within the mmapped area. 608607ca46eSDavid Howells */ 609607ca46eSDavid Howells __u64 data_head; /* head in the data section */ 610607ca46eSDavid Howells __u64 data_tail; /* user-space written tail */ 611e8c6deacSAlexander Shishkin __u64 data_offset; /* where the buffer starts */ 612e8c6deacSAlexander Shishkin __u64 data_size; /* data buffer size */ 61345bfb2e5SPeter Zijlstra 61445bfb2e5SPeter Zijlstra /* 61545bfb2e5SPeter Zijlstra * AUX area is defined by aux_{offset,size} fields that should be set 61645bfb2e5SPeter Zijlstra * by the userspace, so that 61745bfb2e5SPeter Zijlstra * 61845bfb2e5SPeter Zijlstra * aux_offset >= data_offset + data_size 61945bfb2e5SPeter Zijlstra * 62045bfb2e5SPeter Zijlstra * prior to mmap()ing it. Size of the mmap()ed area should be aux_size. 62145bfb2e5SPeter Zijlstra * 62245bfb2e5SPeter Zijlstra * Ring buffer pointers aux_{head,tail} have the same semantics as 62345bfb2e5SPeter Zijlstra * data_{head,tail} and same ordering rules apply. 62445bfb2e5SPeter Zijlstra */ 62545bfb2e5SPeter Zijlstra __u64 aux_head; 62645bfb2e5SPeter Zijlstra __u64 aux_tail; 62745bfb2e5SPeter Zijlstra __u64 aux_offset; 62845bfb2e5SPeter Zijlstra __u64 aux_size; 629607ca46eSDavid Howells }; 630607ca46eSDavid Howells 631607ca46eSDavid Howells #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0) 632607ca46eSDavid Howells #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) 633607ca46eSDavid Howells #define PERF_RECORD_MISC_KERNEL (1 << 0) 634607ca46eSDavid Howells #define PERF_RECORD_MISC_USER (2 << 0) 635607ca46eSDavid Howells #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) 636607ca46eSDavid Howells #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0) 637607ca46eSDavid Howells #define PERF_RECORD_MISC_GUEST_USER (5 << 0) 638607ca46eSDavid Howells 63982b89778SAdrian Hunter /* 640930e6fcdSKan Liang * Indicates that /proc/PID/maps parsing are truncated by time out. 641930e6fcdSKan Liang */ 642930e6fcdSKan Liang #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12) 643930e6fcdSKan Liang /* 644972c1488SJiri Olsa * Following PERF_RECORD_MISC_* are used on different 645972c1488SJiri Olsa * events, so can reuse the same bit position: 646972c1488SJiri Olsa * 647972c1488SJiri Olsa * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events 648972c1488SJiri Olsa * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event 649*4f8f382eSDavid Miller * PERF_RECORD_MISC_FORK_EXEC - PERF_RECORD_FORK event (perf internal) 650972c1488SJiri Olsa * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events 65182b89778SAdrian Hunter */ 6522fe85427SStephane Eranian #define PERF_RECORD_MISC_MMAP_DATA (1 << 13) 65382b89778SAdrian Hunter #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) 654*4f8f382eSDavid Miller #define PERF_RECORD_MISC_FORK_EXEC (1 << 13) 65545ac1403SAdrian Hunter #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) 656607ca46eSDavid Howells /* 657101592b4SAlexey Budankov * These PERF_RECORD_MISC_* flags below are safely reused 658101592b4SAlexey Budankov * for the following events: 659101592b4SAlexey Budankov * 660101592b4SAlexey Budankov * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events 661101592b4SAlexey Budankov * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events 662101592b4SAlexey Budankov * 663101592b4SAlexey Budankov * 664101592b4SAlexey Budankov * PERF_RECORD_MISC_EXACT_IP: 665607ca46eSDavid Howells * Indicates that the content of PERF_SAMPLE_IP points to 666607ca46eSDavid Howells * the actual instruction that triggered the event. See also 667607ca46eSDavid Howells * perf_event_attr::precise_ip. 668101592b4SAlexey Budankov * 669101592b4SAlexey Budankov * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: 670101592b4SAlexey Budankov * Indicates that thread was preempted in TASK_RUNNING state. 671607ca46eSDavid Howells */ 672607ca46eSDavid Howells #define PERF_RECORD_MISC_EXACT_IP (1 << 14) 673101592b4SAlexey Budankov #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) 674607ca46eSDavid Howells /* 675607ca46eSDavid Howells * Reserve the last bit to indicate some extended misc field 676607ca46eSDavid Howells */ 677607ca46eSDavid Howells #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) 678607ca46eSDavid Howells 679607ca46eSDavid Howells struct perf_event_header { 680607ca46eSDavid Howells __u32 type; 681607ca46eSDavid Howells __u16 misc; 682607ca46eSDavid Howells __u16 size; 683607ca46eSDavid Howells }; 684607ca46eSDavid Howells 685e4222673SHari Bathini struct perf_ns_link_info { 686e4222673SHari Bathini __u64 dev; 687e4222673SHari Bathini __u64 ino; 688e4222673SHari Bathini }; 689e4222673SHari Bathini 690e4222673SHari Bathini enum { 691e4222673SHari Bathini NET_NS_INDEX = 0, 692e4222673SHari Bathini UTS_NS_INDEX = 1, 693e4222673SHari Bathini IPC_NS_INDEX = 2, 694e4222673SHari Bathini PID_NS_INDEX = 3, 695e4222673SHari Bathini USER_NS_INDEX = 4, 696e4222673SHari Bathini MNT_NS_INDEX = 5, 697e4222673SHari Bathini CGROUP_NS_INDEX = 6, 698e4222673SHari Bathini 699e4222673SHari Bathini NR_NAMESPACES, /* number of available namespaces */ 700e4222673SHari Bathini }; 701e4222673SHari Bathini 702607ca46eSDavid Howells enum perf_event_type { 703607ca46eSDavid Howells 704607ca46eSDavid Howells /* 705607ca46eSDavid Howells * If perf_event_attr.sample_id_all is set then all event types will 706607ca46eSDavid Howells * have the sample_type selected fields related to where/when 707ff3d527cSAdrian Hunter * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU, 708ff3d527cSAdrian Hunter * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed 709ff3d527cSAdrian Hunter * just after the perf_event_header and the fields already present for 710ff3d527cSAdrian Hunter * the existing fields, i.e. at the end of the payload. That way a newer 711ff3d527cSAdrian Hunter * perf.data file will be supported by older perf tools, with these new 712ff3d527cSAdrian Hunter * optional fields being ignored. 713607ca46eSDavid Howells * 714a5cdd40cSPeter Zijlstra * struct sample_id { 715a5cdd40cSPeter Zijlstra * { u32 pid, tid; } && PERF_SAMPLE_TID 716a5cdd40cSPeter Zijlstra * { u64 time; } && PERF_SAMPLE_TIME 717a5cdd40cSPeter Zijlstra * { u64 id; } && PERF_SAMPLE_ID 718a5cdd40cSPeter Zijlstra * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 719a5cdd40cSPeter Zijlstra * { u32 cpu, res; } && PERF_SAMPLE_CPU 720ff3d527cSAdrian Hunter * { u64 id; } && PERF_SAMPLE_IDENTIFIER 721a5cdd40cSPeter Zijlstra * } && perf_event_attr::sample_id_all 722ff3d527cSAdrian Hunter * 723ff3d527cSAdrian Hunter * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The 724ff3d527cSAdrian Hunter * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed 725ff3d527cSAdrian Hunter * relative to header.size. 726a5cdd40cSPeter Zijlstra */ 727a5cdd40cSPeter Zijlstra 728a5cdd40cSPeter Zijlstra /* 729607ca46eSDavid Howells * The MMAP events record the PROT_EXEC mappings so that we can 730607ca46eSDavid Howells * correlate userspace IPs to code. They have the following structure: 731607ca46eSDavid Howells * 732607ca46eSDavid Howells * struct { 733607ca46eSDavid Howells * struct perf_event_header header; 734607ca46eSDavid Howells * 735607ca46eSDavid Howells * u32 pid, tid; 736607ca46eSDavid Howells * u64 addr; 737607ca46eSDavid Howells * u64 len; 738607ca46eSDavid Howells * u64 pgoff; 739607ca46eSDavid Howells * char filename[]; 740c5ecceefSPeter Zijlstra * struct sample_id sample_id; 741607ca46eSDavid Howells * }; 742607ca46eSDavid Howells */ 743607ca46eSDavid Howells PERF_RECORD_MMAP = 1, 744607ca46eSDavid Howells 745607ca46eSDavid Howells /* 746607ca46eSDavid Howells * struct { 747607ca46eSDavid Howells * struct perf_event_header header; 748607ca46eSDavid Howells * u64 id; 749607ca46eSDavid Howells * u64 lost; 750a5cdd40cSPeter Zijlstra * struct sample_id sample_id; 751607ca46eSDavid Howells * }; 752607ca46eSDavid Howells */ 753607ca46eSDavid Howells PERF_RECORD_LOST = 2, 754607ca46eSDavid Howells 755607ca46eSDavid Howells /* 756607ca46eSDavid Howells * struct { 757607ca46eSDavid Howells * struct perf_event_header header; 758607ca46eSDavid Howells * 759607ca46eSDavid Howells * u32 pid, tid; 760607ca46eSDavid Howells * char comm[]; 761a5cdd40cSPeter Zijlstra * struct sample_id sample_id; 762607ca46eSDavid Howells * }; 763607ca46eSDavid Howells */ 764607ca46eSDavid Howells PERF_RECORD_COMM = 3, 765607ca46eSDavid Howells 766607ca46eSDavid Howells /* 767607ca46eSDavid Howells * struct { 768607ca46eSDavid Howells * struct perf_event_header header; 769607ca46eSDavid Howells * u32 pid, ppid; 770607ca46eSDavid Howells * u32 tid, ptid; 771607ca46eSDavid Howells * u64 time; 772a5cdd40cSPeter Zijlstra * struct sample_id sample_id; 773607ca46eSDavid Howells * }; 774607ca46eSDavid Howells */ 775607ca46eSDavid Howells PERF_RECORD_EXIT = 4, 776607ca46eSDavid Howells 777607ca46eSDavid Howells /* 778607ca46eSDavid Howells * struct { 779607ca46eSDavid Howells * struct perf_event_header header; 780607ca46eSDavid Howells * u64 time; 781607ca46eSDavid Howells * u64 id; 782607ca46eSDavid Howells * u64 stream_id; 783a5cdd40cSPeter Zijlstra * struct sample_id sample_id; 784607ca46eSDavid Howells * }; 785607ca46eSDavid Howells */ 786607ca46eSDavid Howells PERF_RECORD_THROTTLE = 5, 787607ca46eSDavid Howells PERF_RECORD_UNTHROTTLE = 6, 788607ca46eSDavid Howells 789607ca46eSDavid Howells /* 790607ca46eSDavid Howells * struct { 791607ca46eSDavid Howells * struct perf_event_header header; 792607ca46eSDavid Howells * u32 pid, ppid; 793607ca46eSDavid Howells * u32 tid, ptid; 794607ca46eSDavid Howells * u64 time; 795a5cdd40cSPeter Zijlstra * struct sample_id sample_id; 796607ca46eSDavid Howells * }; 797607ca46eSDavid Howells */ 798607ca46eSDavid Howells PERF_RECORD_FORK = 7, 799607ca46eSDavid Howells 800607ca46eSDavid Howells /* 801607ca46eSDavid Howells * struct { 802607ca46eSDavid Howells * struct perf_event_header header; 803607ca46eSDavid Howells * u32 pid, tid; 804607ca46eSDavid Howells * 805607ca46eSDavid Howells * struct read_format values; 806a5cdd40cSPeter Zijlstra * struct sample_id sample_id; 807607ca46eSDavid Howells * }; 808607ca46eSDavid Howells */ 809607ca46eSDavid Howells PERF_RECORD_READ = 8, 810607ca46eSDavid Howells 811607ca46eSDavid Howells /* 812607ca46eSDavid Howells * struct { 813607ca46eSDavid Howells * struct perf_event_header header; 814607ca46eSDavid Howells * 815ff3d527cSAdrian Hunter * # 816ff3d527cSAdrian Hunter * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. 817ff3d527cSAdrian Hunter * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position 818ff3d527cSAdrian Hunter * # is fixed relative to header. 819ff3d527cSAdrian Hunter * # 820ff3d527cSAdrian Hunter * 821ff3d527cSAdrian Hunter * { u64 id; } && PERF_SAMPLE_IDENTIFIER 822607ca46eSDavid Howells * { u64 ip; } && PERF_SAMPLE_IP 823607ca46eSDavid Howells * { u32 pid, tid; } && PERF_SAMPLE_TID 824607ca46eSDavid Howells * { u64 time; } && PERF_SAMPLE_TIME 825607ca46eSDavid Howells * { u64 addr; } && PERF_SAMPLE_ADDR 826607ca46eSDavid Howells * { u64 id; } && PERF_SAMPLE_ID 827607ca46eSDavid Howells * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 828607ca46eSDavid Howells * { u32 cpu, res; } && PERF_SAMPLE_CPU 829607ca46eSDavid Howells * { u64 period; } && PERF_SAMPLE_PERIOD 830607ca46eSDavid Howells * 831607ca46eSDavid Howells * { struct read_format values; } && PERF_SAMPLE_READ 832607ca46eSDavid Howells * 833607ca46eSDavid Howells * { u64 nr, 834607ca46eSDavid Howells * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 835607ca46eSDavid Howells * 836607ca46eSDavid Howells * # 837607ca46eSDavid Howells * # The RAW record below is opaque data wrt the ABI 838607ca46eSDavid Howells * # 839607ca46eSDavid Howells * # That is, the ABI doesn't make any promises wrt to 840607ca46eSDavid Howells * # the stability of its content, it may vary depending 841607ca46eSDavid Howells * # on event, hardware, kernel version and phase of 842607ca46eSDavid Howells * # the moon. 843607ca46eSDavid Howells * # 844607ca46eSDavid Howells * # In other words, PERF_SAMPLE_RAW contents are not an ABI. 845607ca46eSDavid Howells * # 846607ca46eSDavid Howells * 847607ca46eSDavid Howells * { u32 size; 848607ca46eSDavid Howells * char data[size];}&& PERF_SAMPLE_RAW 849607ca46eSDavid Howells * 850b878e7fbSVince Weaver * { u64 nr; 851607ca46eSDavid Howells * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK 852607ca46eSDavid Howells * 853607ca46eSDavid Howells * { u64 abi; # enum perf_sample_regs_abi 854607ca46eSDavid Howells * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER 855607ca46eSDavid Howells * 856607ca46eSDavid Howells * { u64 size; 857607ca46eSDavid Howells * char data[size]; 858607ca46eSDavid Howells * u64 dyn_size; } && PERF_SAMPLE_STACK_USER 859c3feedf2SAndi Kleen * 860c3feedf2SAndi Kleen * { u64 weight; } && PERF_SAMPLE_WEIGHT 861d6be9ad6SStephane Eranian * { u64 data_src; } && PERF_SAMPLE_DATA_SRC 862189b84fbSVince Weaver * { u64 transaction; } && PERF_SAMPLE_TRANSACTION 86360e2364eSStephane Eranian * { u64 abi; # enum perf_sample_regs_abi 86460e2364eSStephane Eranian * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR 865fc7ce9c7SKan Liang * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR 866607ca46eSDavid Howells * }; 867607ca46eSDavid Howells */ 868607ca46eSDavid Howells PERF_RECORD_SAMPLE = 9, 869607ca46eSDavid Howells 87013d7a241SStephane Eranian /* 87113d7a241SStephane Eranian * The MMAP2 records are an augmented version of MMAP, they add 87213d7a241SStephane Eranian * maj, min, ino numbers to be used to uniquely identify each mapping 87313d7a241SStephane Eranian * 87413d7a241SStephane Eranian * struct { 87513d7a241SStephane Eranian * struct perf_event_header header; 87613d7a241SStephane Eranian * 87713d7a241SStephane Eranian * u32 pid, tid; 87813d7a241SStephane Eranian * u64 addr; 87913d7a241SStephane Eranian * u64 len; 88013d7a241SStephane Eranian * u64 pgoff; 88113d7a241SStephane Eranian * u32 maj; 88213d7a241SStephane Eranian * u32 min; 88313d7a241SStephane Eranian * u64 ino; 88413d7a241SStephane Eranian * u64 ino_generation; 885f972eb63SPeter Zijlstra * u32 prot, flags; 88613d7a241SStephane Eranian * char filename[]; 88713d7a241SStephane Eranian * struct sample_id sample_id; 88813d7a241SStephane Eranian * }; 88913d7a241SStephane Eranian */ 89013d7a241SStephane Eranian PERF_RECORD_MMAP2 = 10, 89113d7a241SStephane Eranian 89268db7e98SAlexander Shishkin /* 89368db7e98SAlexander Shishkin * Records that new data landed in the AUX buffer part. 89468db7e98SAlexander Shishkin * 89568db7e98SAlexander Shishkin * struct { 89668db7e98SAlexander Shishkin * struct perf_event_header header; 89768db7e98SAlexander Shishkin * 89868db7e98SAlexander Shishkin * u64 aux_offset; 89968db7e98SAlexander Shishkin * u64 aux_size; 90068db7e98SAlexander Shishkin * u64 flags; 90168db7e98SAlexander Shishkin * struct sample_id sample_id; 90268db7e98SAlexander Shishkin * }; 90368db7e98SAlexander Shishkin */ 90468db7e98SAlexander Shishkin PERF_RECORD_AUX = 11, 90568db7e98SAlexander Shishkin 906ec0d7729SAlexander Shishkin /* 907ec0d7729SAlexander Shishkin * Indicates that instruction trace has started 908ec0d7729SAlexander Shishkin * 909ec0d7729SAlexander Shishkin * struct { 910ec0d7729SAlexander Shishkin * struct perf_event_header header; 911ec0d7729SAlexander Shishkin * u32 pid; 912ec0d7729SAlexander Shishkin * u32 tid; 91381df978cSJiri Olsa * struct sample_id sample_id; 914ec0d7729SAlexander Shishkin * }; 915ec0d7729SAlexander Shishkin */ 916ec0d7729SAlexander Shishkin PERF_RECORD_ITRACE_START = 12, 917ec0d7729SAlexander Shishkin 918f38b0dbbSKan Liang /* 919f38b0dbbSKan Liang * Records the dropped/lost sample number. 920f38b0dbbSKan Liang * 921f38b0dbbSKan Liang * struct { 922f38b0dbbSKan Liang * struct perf_event_header header; 923f38b0dbbSKan Liang * 924f38b0dbbSKan Liang * u64 lost; 925f38b0dbbSKan Liang * struct sample_id sample_id; 926f38b0dbbSKan Liang * }; 927f38b0dbbSKan Liang */ 928f38b0dbbSKan Liang PERF_RECORD_LOST_SAMPLES = 13, 929f38b0dbbSKan Liang 93045ac1403SAdrian Hunter /* 93145ac1403SAdrian Hunter * Records a context switch in or out (flagged by 93245ac1403SAdrian Hunter * PERF_RECORD_MISC_SWITCH_OUT). See also 93345ac1403SAdrian Hunter * PERF_RECORD_SWITCH_CPU_WIDE. 93445ac1403SAdrian Hunter * 93545ac1403SAdrian Hunter * struct { 93645ac1403SAdrian Hunter * struct perf_event_header header; 93745ac1403SAdrian Hunter * struct sample_id sample_id; 93845ac1403SAdrian Hunter * }; 93945ac1403SAdrian Hunter */ 94045ac1403SAdrian Hunter PERF_RECORD_SWITCH = 14, 94145ac1403SAdrian Hunter 94245ac1403SAdrian Hunter /* 94345ac1403SAdrian Hunter * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and 94445ac1403SAdrian Hunter * next_prev_tid that are the next (switching out) or previous 94545ac1403SAdrian Hunter * (switching in) pid/tid. 94645ac1403SAdrian Hunter * 94745ac1403SAdrian Hunter * struct { 94845ac1403SAdrian Hunter * struct perf_event_header header; 94945ac1403SAdrian Hunter * u32 next_prev_pid; 95045ac1403SAdrian Hunter * u32 next_prev_tid; 95145ac1403SAdrian Hunter * struct sample_id sample_id; 95245ac1403SAdrian Hunter * }; 95345ac1403SAdrian Hunter */ 95445ac1403SAdrian Hunter PERF_RECORD_SWITCH_CPU_WIDE = 15, 95545ac1403SAdrian Hunter 956e4222673SHari Bathini /* 957e4222673SHari Bathini * struct { 958e4222673SHari Bathini * struct perf_event_header header; 959e4222673SHari Bathini * u32 pid; 960e4222673SHari Bathini * u32 tid; 961e4222673SHari Bathini * u64 nr_namespaces; 962e4222673SHari Bathini * { u64 dev, inode; } [nr_namespaces]; 963e4222673SHari Bathini * struct sample_id sample_id; 964e4222673SHari Bathini * }; 965e4222673SHari Bathini */ 966e4222673SHari Bathini PERF_RECORD_NAMESPACES = 16, 967e4222673SHari Bathini 968607ca46eSDavid Howells PERF_RECORD_MAX, /* non-ABI */ 969607ca46eSDavid Howells }; 970607ca46eSDavid Howells 971607ca46eSDavid Howells #define PERF_MAX_STACK_DEPTH 127 972c85b0334SArnaldo Carvalho de Melo #define PERF_MAX_CONTEXTS_PER_STACK 8 973607ca46eSDavid Howells 974607ca46eSDavid Howells enum perf_callchain_context { 975607ca46eSDavid Howells PERF_CONTEXT_HV = (__u64)-32, 976607ca46eSDavid Howells PERF_CONTEXT_KERNEL = (__u64)-128, 977607ca46eSDavid Howells PERF_CONTEXT_USER = (__u64)-512, 978607ca46eSDavid Howells 979607ca46eSDavid Howells PERF_CONTEXT_GUEST = (__u64)-2048, 980607ca46eSDavid Howells PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, 981607ca46eSDavid Howells PERF_CONTEXT_GUEST_USER = (__u64)-2560, 982607ca46eSDavid Howells 983607ca46eSDavid Howells PERF_CONTEXT_MAX = (__u64)-4095, 984607ca46eSDavid Howells }; 985607ca46eSDavid Howells 98668db7e98SAlexander Shishkin /** 98768db7e98SAlexander Shishkin * PERF_RECORD_AUX::flags bits 98868db7e98SAlexander Shishkin */ 98968db7e98SAlexander Shishkin #define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ 9902023a0d2SAlexander Shishkin #define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ 991ae0c2d99SAlexander Shishkin #define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ 992085b3062SWill Deacon #define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */ 99368db7e98SAlexander Shishkin 994643fd0b9SPeter Zijlstra #define PERF_FLAG_FD_NO_GROUP (1UL << 0) 995643fd0b9SPeter Zijlstra #define PERF_FLAG_FD_OUTPUT (1UL << 1) 996643fd0b9SPeter Zijlstra #define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */ 997643fd0b9SPeter Zijlstra #define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */ 998607ca46eSDavid Howells 9998c5073dbSSukadev Bhattiprolu #if defined(__LITTLE_ENDIAN_BITFIELD) 1000d6be9ad6SStephane Eranian union perf_mem_data_src { 1001d6be9ad6SStephane Eranian __u64 val; 1002d6be9ad6SStephane Eranian struct { 1003d6be9ad6SStephane Eranian __u64 mem_op:5, /* type of opcode */ 1004d6be9ad6SStephane Eranian mem_lvl:14, /* memory hierarchy level */ 1005d6be9ad6SStephane Eranian mem_snoop:5, /* snoop mode */ 1006d6be9ad6SStephane Eranian mem_lock:2, /* lock instr */ 1007d6be9ad6SStephane Eranian mem_dtlb:7, /* tlb access */ 10086ae5fa61SAndi Kleen mem_lvl_num:4, /* memory hierarchy level number */ 10096ae5fa61SAndi Kleen mem_remote:1, /* remote */ 10106ae5fa61SAndi Kleen mem_snoopx:2, /* snoop mode, ext */ 10116ae5fa61SAndi Kleen mem_rsvd:24; 1012d6be9ad6SStephane Eranian }; 1013d6be9ad6SStephane Eranian }; 10148c5073dbSSukadev Bhattiprolu #elif defined(__BIG_ENDIAN_BITFIELD) 10158c5073dbSSukadev Bhattiprolu union perf_mem_data_src { 10168c5073dbSSukadev Bhattiprolu __u64 val; 10178c5073dbSSukadev Bhattiprolu struct { 10186ae5fa61SAndi Kleen __u64 mem_rsvd:24, 10196ae5fa61SAndi Kleen mem_snoopx:2, /* snoop mode, ext */ 10206ae5fa61SAndi Kleen mem_remote:1, /* remote */ 10216ae5fa61SAndi Kleen mem_lvl_num:4, /* memory hierarchy level number */ 10228c5073dbSSukadev Bhattiprolu mem_dtlb:7, /* tlb access */ 10238c5073dbSSukadev Bhattiprolu mem_lock:2, /* lock instr */ 10248c5073dbSSukadev Bhattiprolu mem_snoop:5, /* snoop mode */ 10258c5073dbSSukadev Bhattiprolu mem_lvl:14, /* memory hierarchy level */ 10268c5073dbSSukadev Bhattiprolu mem_op:5; /* type of opcode */ 10278c5073dbSSukadev Bhattiprolu }; 10288c5073dbSSukadev Bhattiprolu }; 10298c5073dbSSukadev Bhattiprolu #else 10308c5073dbSSukadev Bhattiprolu #error "Unknown endianness" 10318c5073dbSSukadev Bhattiprolu #endif 1032d6be9ad6SStephane Eranian 1033d6be9ad6SStephane Eranian /* type of opcode (load/store/prefetch,code) */ 1034d6be9ad6SStephane Eranian #define PERF_MEM_OP_NA 0x01 /* not available */ 1035d6be9ad6SStephane Eranian #define PERF_MEM_OP_LOAD 0x02 /* load instruction */ 1036d6be9ad6SStephane Eranian #define PERF_MEM_OP_STORE 0x04 /* store instruction */ 1037d6be9ad6SStephane Eranian #define PERF_MEM_OP_PFETCH 0x08 /* prefetch */ 1038d6be9ad6SStephane Eranian #define PERF_MEM_OP_EXEC 0x10 /* code (execution) */ 1039d6be9ad6SStephane Eranian #define PERF_MEM_OP_SHIFT 0 1040d6be9ad6SStephane Eranian 1041d6be9ad6SStephane Eranian /* memory hierarchy (memory level, hit or miss) */ 1042d6be9ad6SStephane Eranian #define PERF_MEM_LVL_NA 0x01 /* not available */ 1043d6be9ad6SStephane Eranian #define PERF_MEM_LVL_HIT 0x02 /* hit level */ 1044d6be9ad6SStephane Eranian #define PERF_MEM_LVL_MISS 0x04 /* miss level */ 1045d6be9ad6SStephane Eranian #define PERF_MEM_LVL_L1 0x08 /* L1 */ 1046d6be9ad6SStephane Eranian #define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */ 1047cc2f5a8aSStephane Eranian #define PERF_MEM_LVL_L2 0x20 /* L2 */ 1048cc2f5a8aSStephane Eranian #define PERF_MEM_LVL_L3 0x40 /* L3 */ 1049d6be9ad6SStephane Eranian #define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */ 1050d6be9ad6SStephane Eranian #define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */ 1051d6be9ad6SStephane Eranian #define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */ 1052d6be9ad6SStephane Eranian #define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */ 1053d6be9ad6SStephane Eranian #define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */ 1054d6be9ad6SStephane Eranian #define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ 1055d6be9ad6SStephane Eranian #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ 1056d6be9ad6SStephane Eranian #define PERF_MEM_LVL_SHIFT 5 1057d6be9ad6SStephane Eranian 10586ae5fa61SAndi Kleen #define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */ 10596ae5fa61SAndi Kleen #define PERF_MEM_REMOTE_SHIFT 37 10606ae5fa61SAndi Kleen 10616ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_L1 0x01 /* L1 */ 10626ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_L2 0x02 /* L2 */ 10636ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_L3 0x03 /* L3 */ 10646ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_L4 0x04 /* L4 */ 10656ae5fa61SAndi Kleen /* 5-0xa available */ 10666ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */ 10676ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */ 10686ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */ 10696ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */ 10706ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_NA 0x0f /* N/A */ 10716ae5fa61SAndi Kleen 10726ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_SHIFT 33 10736ae5fa61SAndi Kleen 1074d6be9ad6SStephane Eranian /* snoop mode */ 1075d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_NA 0x01 /* not available */ 1076d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */ 1077d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */ 1078d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */ 1079d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */ 1080d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_SHIFT 19 1081d6be9ad6SStephane Eranian 10826ae5fa61SAndi Kleen #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ 10836ae5fa61SAndi Kleen /* 1 free */ 10846ae5fa61SAndi Kleen #define PERF_MEM_SNOOPX_SHIFT 37 10856ae5fa61SAndi Kleen 1086d6be9ad6SStephane Eranian /* locked instruction */ 1087d6be9ad6SStephane Eranian #define PERF_MEM_LOCK_NA 0x01 /* not available */ 1088d6be9ad6SStephane Eranian #define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */ 1089d6be9ad6SStephane Eranian #define PERF_MEM_LOCK_SHIFT 24 1090d6be9ad6SStephane Eranian 1091d6be9ad6SStephane Eranian /* TLB access */ 1092d6be9ad6SStephane Eranian #define PERF_MEM_TLB_NA 0x01 /* not available */ 1093d6be9ad6SStephane Eranian #define PERF_MEM_TLB_HIT 0x02 /* hit level */ 1094d6be9ad6SStephane Eranian #define PERF_MEM_TLB_MISS 0x04 /* miss level */ 1095d6be9ad6SStephane Eranian #define PERF_MEM_TLB_L1 0x08 /* L1 */ 1096d6be9ad6SStephane Eranian #define PERF_MEM_TLB_L2 0x10 /* L2 */ 1097d6be9ad6SStephane Eranian #define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/ 1098d6be9ad6SStephane Eranian #define PERF_MEM_TLB_OS 0x40 /* OS fault handler */ 1099d6be9ad6SStephane Eranian #define PERF_MEM_TLB_SHIFT 26 1100d6be9ad6SStephane Eranian 1101d6be9ad6SStephane Eranian #define PERF_MEM_S(a, s) \ 11020d9dfc23SMike Frysinger (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) 1103d6be9ad6SStephane Eranian 1104274481deSVince Weaver /* 1105274481deSVince Weaver * single taken branch record layout: 1106274481deSVince Weaver * 1107274481deSVince Weaver * from: source instruction (may not always be a branch insn) 1108274481deSVince Weaver * to: branch target 1109274481deSVince Weaver * mispred: branch target was mispredicted 1110274481deSVince Weaver * predicted: branch target was predicted 1111274481deSVince Weaver * 1112274481deSVince Weaver * support for mispred, predicted is optional. In case it 1113274481deSVince Weaver * is not supported mispred = predicted = 0. 1114274481deSVince Weaver * 1115274481deSVince Weaver * in_tx: running in a hardware transaction 1116274481deSVince Weaver * abort: aborting a hardware transaction 111771ef3c6bSAndi Kleen * cycles: cycles from last branch (or 0 if not supported) 1118eb0baf8aSJin Yao * type: branch type 1119274481deSVince Weaver */ 1120274481deSVince Weaver struct perf_branch_entry { 1121274481deSVince Weaver __u64 from; 1122274481deSVince Weaver __u64 to; 1123274481deSVince Weaver __u64 mispred:1, /* target mispredicted */ 1124274481deSVince Weaver predicted:1,/* target predicted */ 1125274481deSVince Weaver in_tx:1, /* in transaction */ 1126274481deSVince Weaver abort:1, /* transaction abort */ 112771ef3c6bSAndi Kleen cycles:16, /* cycle count to last branch */ 1128eb0baf8aSJin Yao type:4, /* branch type */ 1129eb0baf8aSJin Yao reserved:40; 1130274481deSVince Weaver }; 1131274481deSVince Weaver 1132607ca46eSDavid Howells #endif /* _UAPI_LINUX_PERF_EVENT_H */ 1133