1e2be04c7SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2607ca46eSDavid Howells /* 3607ca46eSDavid Howells * Performance events: 4607ca46eSDavid Howells * 5607ca46eSDavid Howells * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 6607ca46eSDavid Howells * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar 7607ca46eSDavid Howells * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra 8607ca46eSDavid Howells * 9607ca46eSDavid Howells * Data type definitions, declarations, prototypes. 10607ca46eSDavid Howells * 11607ca46eSDavid Howells * Started by: Thomas Gleixner and Ingo Molnar 12607ca46eSDavid Howells * 13607ca46eSDavid Howells * For licencing details see kernel-base/COPYING 14607ca46eSDavid Howells */ 15607ca46eSDavid Howells #ifndef _UAPI_LINUX_PERF_EVENT_H 16607ca46eSDavid Howells #define _UAPI_LINUX_PERF_EVENT_H 17607ca46eSDavid Howells 18607ca46eSDavid Howells #include <linux/types.h> 19607ca46eSDavid Howells #include <linux/ioctl.h> 20607ca46eSDavid Howells #include <asm/byteorder.h> 21607ca46eSDavid Howells 22607ca46eSDavid Howells /* 23607ca46eSDavid Howells * User-space ABI bits: 24607ca46eSDavid Howells */ 25607ca46eSDavid Howells 26607ca46eSDavid Howells /* 27607ca46eSDavid Howells * attr.type 28607ca46eSDavid Howells */ 29607ca46eSDavid Howells enum perf_type_id { 30607ca46eSDavid Howells PERF_TYPE_HARDWARE = 0, 31607ca46eSDavid Howells PERF_TYPE_SOFTWARE = 1, 32607ca46eSDavid Howells PERF_TYPE_TRACEPOINT = 2, 33607ca46eSDavid Howells PERF_TYPE_HW_CACHE = 3, 34607ca46eSDavid Howells PERF_TYPE_RAW = 4, 35607ca46eSDavid Howells PERF_TYPE_BREAKPOINT = 5, 36607ca46eSDavid Howells 37607ca46eSDavid Howells PERF_TYPE_MAX, /* non-ABI */ 38607ca46eSDavid Howells }; 39607ca46eSDavid Howells 40607ca46eSDavid Howells /* 4155bcf6efSKan Liang * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE 4255bcf6efSKan Liang * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA 4355bcf6efSKan Liang * AA: hardware event ID 4455bcf6efSKan Liang * EEEEEEEE: PMU type ID 4555bcf6efSKan Liang * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB 4655bcf6efSKan Liang * BB: hardware cache ID 4755bcf6efSKan Liang * CC: hardware cache op ID 4855bcf6efSKan Liang * DD: hardware cache op result ID 4955bcf6efSKan Liang * EEEEEEEE: PMU type ID 5055bcf6efSKan Liang * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied. 5155bcf6efSKan Liang */ 5255bcf6efSKan Liang #define PERF_PMU_TYPE_SHIFT 32 5355bcf6efSKan Liang #define PERF_HW_EVENT_MASK 0xffffffff 5455bcf6efSKan Liang 5555bcf6efSKan Liang /* 56607ca46eSDavid Howells * Generalized performance event event_id types, used by the 57607ca46eSDavid Howells * attr.event_id parameter of the sys_perf_event_open() 58607ca46eSDavid Howells * syscall: 59607ca46eSDavid Howells */ 60607ca46eSDavid Howells enum perf_hw_id { 61607ca46eSDavid Howells /* 62607ca46eSDavid Howells * Common hardware events, generalized by the kernel: 63607ca46eSDavid Howells */ 64607ca46eSDavid Howells PERF_COUNT_HW_CPU_CYCLES = 0, 65607ca46eSDavid Howells PERF_COUNT_HW_INSTRUCTIONS = 1, 66607ca46eSDavid Howells PERF_COUNT_HW_CACHE_REFERENCES = 2, 67607ca46eSDavid Howells PERF_COUNT_HW_CACHE_MISSES = 3, 68607ca46eSDavid Howells PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, 69607ca46eSDavid Howells PERF_COUNT_HW_BRANCH_MISSES = 5, 70607ca46eSDavid Howells PERF_COUNT_HW_BUS_CYCLES = 6, 71607ca46eSDavid Howells PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, 72607ca46eSDavid Howells PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, 73607ca46eSDavid Howells PERF_COUNT_HW_REF_CPU_CYCLES = 9, 74607ca46eSDavid Howells 75607ca46eSDavid Howells PERF_COUNT_HW_MAX, /* non-ABI */ 76607ca46eSDavid Howells }; 77607ca46eSDavid Howells 78607ca46eSDavid Howells /* 79607ca46eSDavid Howells * Generalized hardware cache events: 80607ca46eSDavid Howells * 81607ca46eSDavid Howells * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x 82607ca46eSDavid Howells * { read, write, prefetch } x 83607ca46eSDavid Howells * { accesses, misses } 84607ca46eSDavid Howells */ 85607ca46eSDavid Howells enum perf_hw_cache_id { 86607ca46eSDavid Howells PERF_COUNT_HW_CACHE_L1D = 0, 87607ca46eSDavid Howells PERF_COUNT_HW_CACHE_L1I = 1, 88607ca46eSDavid Howells PERF_COUNT_HW_CACHE_LL = 2, 89607ca46eSDavid Howells PERF_COUNT_HW_CACHE_DTLB = 3, 90607ca46eSDavid Howells PERF_COUNT_HW_CACHE_ITLB = 4, 91607ca46eSDavid Howells PERF_COUNT_HW_CACHE_BPU = 5, 92607ca46eSDavid Howells PERF_COUNT_HW_CACHE_NODE = 6, 93607ca46eSDavid Howells 94607ca46eSDavid Howells PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ 95607ca46eSDavid Howells }; 96607ca46eSDavid Howells 97607ca46eSDavid Howells enum perf_hw_cache_op_id { 98607ca46eSDavid Howells PERF_COUNT_HW_CACHE_OP_READ = 0, 99607ca46eSDavid Howells PERF_COUNT_HW_CACHE_OP_WRITE = 1, 100607ca46eSDavid Howells PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, 101607ca46eSDavid Howells 102607ca46eSDavid Howells PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ 103607ca46eSDavid Howells }; 104607ca46eSDavid Howells 105607ca46eSDavid Howells enum perf_hw_cache_op_result_id { 106607ca46eSDavid Howells PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, 107607ca46eSDavid Howells PERF_COUNT_HW_CACHE_RESULT_MISS = 1, 108607ca46eSDavid Howells 109607ca46eSDavid Howells PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ 110607ca46eSDavid Howells }; 111607ca46eSDavid Howells 112607ca46eSDavid Howells /* 113607ca46eSDavid Howells * Special "software" events provided by the kernel, even if the hardware 114607ca46eSDavid Howells * does not support performance events. These events measure various 115607ca46eSDavid Howells * physical and sw events of the kernel (and allow the profiling of them as 116607ca46eSDavid Howells * well): 117607ca46eSDavid Howells */ 118607ca46eSDavid Howells enum perf_sw_ids { 119607ca46eSDavid Howells PERF_COUNT_SW_CPU_CLOCK = 0, 120607ca46eSDavid Howells PERF_COUNT_SW_TASK_CLOCK = 1, 121607ca46eSDavid Howells PERF_COUNT_SW_PAGE_FAULTS = 2, 122607ca46eSDavid Howells PERF_COUNT_SW_CONTEXT_SWITCHES = 3, 123607ca46eSDavid Howells PERF_COUNT_SW_CPU_MIGRATIONS = 4, 124607ca46eSDavid Howells PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, 125607ca46eSDavid Howells PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, 126607ca46eSDavid Howells PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, 127607ca46eSDavid Howells PERF_COUNT_SW_EMULATION_FAULTS = 8, 128fa0097eeSAdrian Hunter PERF_COUNT_SW_DUMMY = 9, 129a43eec30SAlexei Starovoitov PERF_COUNT_SW_BPF_OUTPUT = 10, 130d0d1dd62SNamhyung Kim PERF_COUNT_SW_CGROUP_SWITCHES = 11, 131607ca46eSDavid Howells 132607ca46eSDavid Howells PERF_COUNT_SW_MAX, /* non-ABI */ 133607ca46eSDavid Howells }; 134607ca46eSDavid Howells 135607ca46eSDavid Howells /* 136607ca46eSDavid Howells * Bits that can be set in attr.sample_type to request information 137607ca46eSDavid Howells * in the overflow packets. 138607ca46eSDavid Howells */ 139607ca46eSDavid Howells enum perf_event_sample_format { 140607ca46eSDavid Howells PERF_SAMPLE_IP = 1U << 0, 141607ca46eSDavid Howells PERF_SAMPLE_TID = 1U << 1, 142607ca46eSDavid Howells PERF_SAMPLE_TIME = 1U << 2, 143607ca46eSDavid Howells PERF_SAMPLE_ADDR = 1U << 3, 144607ca46eSDavid Howells PERF_SAMPLE_READ = 1U << 4, 145607ca46eSDavid Howells PERF_SAMPLE_CALLCHAIN = 1U << 5, 146607ca46eSDavid Howells PERF_SAMPLE_ID = 1U << 6, 147607ca46eSDavid Howells PERF_SAMPLE_CPU = 1U << 7, 148607ca46eSDavid Howells PERF_SAMPLE_PERIOD = 1U << 8, 149607ca46eSDavid Howells PERF_SAMPLE_STREAM_ID = 1U << 9, 150607ca46eSDavid Howells PERF_SAMPLE_RAW = 1U << 10, 151607ca46eSDavid Howells PERF_SAMPLE_BRANCH_STACK = 1U << 11, 152607ca46eSDavid Howells PERF_SAMPLE_REGS_USER = 1U << 12, 153607ca46eSDavid Howells PERF_SAMPLE_STACK_USER = 1U << 13, 154c3feedf2SAndi Kleen PERF_SAMPLE_WEIGHT = 1U << 14, 155d6be9ad6SStephane Eranian PERF_SAMPLE_DATA_SRC = 1U << 15, 156ff3d527cSAdrian Hunter PERF_SAMPLE_IDENTIFIER = 1U << 16, 157fdfbbd07SAndi Kleen PERF_SAMPLE_TRANSACTION = 1U << 17, 15860e2364eSStephane Eranian PERF_SAMPLE_REGS_INTR = 1U << 18, 159fc7ce9c7SKan Liang PERF_SAMPLE_PHYS_ADDR = 1U << 19, 160a4faf00dSAlexander Shishkin PERF_SAMPLE_AUX = 1U << 20, 1616546b19fSNamhyung Kim PERF_SAMPLE_CGROUP = 1U << 21, 1628d97e718SKan Liang PERF_SAMPLE_DATA_PAGE_SIZE = 1U << 22, 163995f088eSStephane Eranian PERF_SAMPLE_CODE_PAGE_SIZE = 1U << 23, 1642a6c6b7dSKan Liang PERF_SAMPLE_WEIGHT_STRUCT = 1U << 24, 165607ca46eSDavid Howells 1662a6c6b7dSKan Liang PERF_SAMPLE_MAX = 1U << 25, /* non-ABI */ 1676cbc304fSPeter Zijlstra 16809121255SPeter Zijlstra __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */ 169607ca46eSDavid Howells }; 170607ca46eSDavid Howells 1712a6c6b7dSKan Liang #define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT) 172607ca46eSDavid Howells /* 173607ca46eSDavid Howells * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set 174607ca46eSDavid Howells * 175607ca46eSDavid Howells * If the user does not pass priv level information via branch_sample_type, 176607ca46eSDavid Howells * the kernel uses the event's priv level. Branch and event priv levels do 177607ca46eSDavid Howells * not have to match. Branch priv level is checked for permissions. 178607ca46eSDavid Howells * 179607ca46eSDavid Howells * The branch types can be combined, however BRANCH_ANY covers all types 180607ca46eSDavid Howells * of branches and therefore it supersedes all the other types. 181607ca46eSDavid Howells */ 18227ac905bSYan, Zheng enum perf_branch_sample_type_shift { 18327ac905bSYan, Zheng PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */ 18427ac905bSYan, Zheng PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */ 18527ac905bSYan, Zheng PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */ 18627ac905bSYan, Zheng 18727ac905bSYan, Zheng PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */ 18827ac905bSYan, Zheng PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */ 18927ac905bSYan, Zheng PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */ 19027ac905bSYan, Zheng PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */ 19127ac905bSYan, Zheng PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */ 19227ac905bSYan, Zheng PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */ 19327ac905bSYan, Zheng PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ 19427ac905bSYan, Zheng PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */ 19527ac905bSYan, Zheng 1962c44b193SPeter Zijlstra PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */ 197c9fdfa14SStephane Eranian PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */ 198c229bf9dSStephane Eranian PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */ 1992c44b193SPeter Zijlstra 200b16a5b52SAndi Kleen PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */ 201b16a5b52SAndi Kleen PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */ 202b16a5b52SAndi Kleen 203eb0baf8aSJin Yao PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */ 204eb0baf8aSJin Yao 205bbfd5e4fSKan Liang PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, /* save low level index of raw branch records */ 206bbfd5e4fSKan Liang 20727ac905bSYan, Zheng PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ 20827ac905bSYan, Zheng }; 20927ac905bSYan, Zheng 210607ca46eSDavid Howells enum perf_branch_sample_type { 21127ac905bSYan, Zheng PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, 21227ac905bSYan, Zheng PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, 21327ac905bSYan, Zheng PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, 214607ca46eSDavid Howells 21527ac905bSYan, Zheng PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, 2162c44b193SPeter Zijlstra PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, 2172c44b193SPeter Zijlstra PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, 2182c44b193SPeter Zijlstra PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, 2192c44b193SPeter Zijlstra PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, 22027ac905bSYan, Zheng PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, 22127ac905bSYan, Zheng PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, 22227ac905bSYan, Zheng PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, 223607ca46eSDavid Howells 2242c44b193SPeter Zijlstra PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, 225c9fdfa14SStephane Eranian PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, 226c229bf9dSStephane Eranian PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, 2272c44b193SPeter Zijlstra 228b16a5b52SAndi Kleen PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, 229b16a5b52SAndi Kleen PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, 230b16a5b52SAndi Kleen 231eb0baf8aSJin Yao PERF_SAMPLE_BRANCH_TYPE_SAVE = 232eb0baf8aSJin Yao 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, 233eb0baf8aSJin Yao 234bbfd5e4fSKan Liang PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT, 235bbfd5e4fSKan Liang 23627ac905bSYan, Zheng PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, 237607ca46eSDavid Howells }; 238607ca46eSDavid Howells 239eb0baf8aSJin Yao /* 240eb0baf8aSJin Yao * Common flow change classification 241eb0baf8aSJin Yao */ 242eb0baf8aSJin Yao enum { 243eb0baf8aSJin Yao PERF_BR_UNKNOWN = 0, /* unknown */ 244eb0baf8aSJin Yao PERF_BR_COND = 1, /* conditional */ 245eb0baf8aSJin Yao PERF_BR_UNCOND = 2, /* unconditional */ 246eb0baf8aSJin Yao PERF_BR_IND = 3, /* indirect */ 247eb0baf8aSJin Yao PERF_BR_CALL = 4, /* function call */ 248eb0baf8aSJin Yao PERF_BR_IND_CALL = 5, /* indirect function call */ 249eb0baf8aSJin Yao PERF_BR_RET = 6, /* function return */ 250eb0baf8aSJin Yao PERF_BR_SYSCALL = 7, /* syscall */ 251eb0baf8aSJin Yao PERF_BR_SYSRET = 8, /* syscall return */ 252eb0baf8aSJin Yao PERF_BR_COND_CALL = 9, /* conditional function call */ 253eb0baf8aSJin Yao PERF_BR_COND_RET = 10, /* conditional function return */ 254eb0baf8aSJin Yao PERF_BR_MAX, 255eb0baf8aSJin Yao }; 256eb0baf8aSJin Yao 257607ca46eSDavid Howells #define PERF_SAMPLE_BRANCH_PLM_ALL \ 258607ca46eSDavid Howells (PERF_SAMPLE_BRANCH_USER|\ 259607ca46eSDavid Howells PERF_SAMPLE_BRANCH_KERNEL|\ 260607ca46eSDavid Howells PERF_SAMPLE_BRANCH_HV) 261607ca46eSDavid Howells 262607ca46eSDavid Howells /* 263607ca46eSDavid Howells * Values to determine ABI of the registers dump. 264607ca46eSDavid Howells */ 265607ca46eSDavid Howells enum perf_sample_regs_abi { 266607ca46eSDavid Howells PERF_SAMPLE_REGS_ABI_NONE = 0, 267607ca46eSDavid Howells PERF_SAMPLE_REGS_ABI_32 = 1, 268607ca46eSDavid Howells PERF_SAMPLE_REGS_ABI_64 = 2, 269607ca46eSDavid Howells }; 270607ca46eSDavid Howells 271607ca46eSDavid Howells /* 272fdfbbd07SAndi Kleen * Values for the memory transaction event qualifier, mostly for 273fdfbbd07SAndi Kleen * abort events. Multiple bits can be set. 274fdfbbd07SAndi Kleen */ 275fdfbbd07SAndi Kleen enum { 276fdfbbd07SAndi Kleen PERF_TXN_ELISION = (1 << 0), /* From elision */ 277fdfbbd07SAndi Kleen PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ 278fdfbbd07SAndi Kleen PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ 279fdfbbd07SAndi Kleen PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */ 280fdfbbd07SAndi Kleen PERF_TXN_RETRY = (1 << 4), /* Retry possible */ 281fdfbbd07SAndi Kleen PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ 282fdfbbd07SAndi Kleen PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ 283fdfbbd07SAndi Kleen PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ 284fdfbbd07SAndi Kleen 285fdfbbd07SAndi Kleen PERF_TXN_MAX = (1 << 8), /* non-ABI */ 286fdfbbd07SAndi Kleen 287fdfbbd07SAndi Kleen /* bits 32..63 are reserved for the abort code */ 288fdfbbd07SAndi Kleen 289fdfbbd07SAndi Kleen PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), 290fdfbbd07SAndi Kleen PERF_TXN_ABORT_SHIFT = 32, 291fdfbbd07SAndi Kleen }; 292fdfbbd07SAndi Kleen 293fdfbbd07SAndi Kleen /* 294607ca46eSDavid Howells * The format of the data returned by read() on a perf event fd, 295607ca46eSDavid Howells * as specified by attr.read_format: 296607ca46eSDavid Howells * 297607ca46eSDavid Howells * struct read_format { 298607ca46eSDavid Howells * { u64 value; 299607ca46eSDavid Howells * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 300607ca46eSDavid Howells * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 301607ca46eSDavid Howells * { u64 id; } && PERF_FORMAT_ID 302607ca46eSDavid Howells * } && !PERF_FORMAT_GROUP 303607ca46eSDavid Howells * 304607ca46eSDavid Howells * { u64 nr; 305607ca46eSDavid Howells * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 306607ca46eSDavid Howells * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 307607ca46eSDavid Howells * { u64 value; 308607ca46eSDavid Howells * { u64 id; } && PERF_FORMAT_ID 309607ca46eSDavid Howells * } cntr[nr]; 310607ca46eSDavid Howells * } && PERF_FORMAT_GROUP 311607ca46eSDavid Howells * }; 312607ca46eSDavid Howells */ 313607ca46eSDavid Howells enum perf_event_read_format { 314607ca46eSDavid Howells PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 315607ca46eSDavid Howells PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 316607ca46eSDavid Howells PERF_FORMAT_ID = 1U << 2, 317607ca46eSDavid Howells PERF_FORMAT_GROUP = 1U << 3, 318607ca46eSDavid Howells 319607ca46eSDavid Howells PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ 320607ca46eSDavid Howells }; 321607ca46eSDavid Howells 322607ca46eSDavid Howells #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ 323607ca46eSDavid Howells #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ 324607ca46eSDavid Howells #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ 325607ca46eSDavid Howells #define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */ 326607ca46eSDavid Howells /* add: sample_stack_user */ 32760e2364eSStephane Eranian #define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */ 3281a594131SAlexander Shishkin #define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */ 329a4faf00dSAlexander Shishkin #define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */ 33097ba62b2SMarco Elver #define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */ 331607ca46eSDavid Howells 332607ca46eSDavid Howells /* 333607ca46eSDavid Howells * Hardware event_id to monitor via a performance monitoring event: 33497c79a38SArnaldo Carvalho de Melo * 33597c79a38SArnaldo Carvalho de Melo * @sample_max_stack: Max number of frame pointers in a callchain, 33697c79a38SArnaldo Carvalho de Melo * should be < /proc/sys/kernel/perf_event_max_stack 337607ca46eSDavid Howells */ 338607ca46eSDavid Howells struct perf_event_attr { 339607ca46eSDavid Howells 340607ca46eSDavid Howells /* 341607ca46eSDavid Howells * Major type: hardware/software/tracepoint/etc. 342607ca46eSDavid Howells */ 343607ca46eSDavid Howells __u32 type; 344607ca46eSDavid Howells 345607ca46eSDavid Howells /* 346607ca46eSDavid Howells * Size of the attr structure, for fwd/bwd compat. 347607ca46eSDavid Howells */ 348607ca46eSDavid Howells __u32 size; 349607ca46eSDavid Howells 350607ca46eSDavid Howells /* 351607ca46eSDavid Howells * Type specific configuration information. 352607ca46eSDavid Howells */ 353607ca46eSDavid Howells __u64 config; 354607ca46eSDavid Howells 355607ca46eSDavid Howells union { 356607ca46eSDavid Howells __u64 sample_period; 357607ca46eSDavid Howells __u64 sample_freq; 358607ca46eSDavid Howells }; 359607ca46eSDavid Howells 360607ca46eSDavid Howells __u64 sample_type; 361607ca46eSDavid Howells __u64 read_format; 362607ca46eSDavid Howells 363607ca46eSDavid Howells __u64 disabled : 1, /* off by default */ 364607ca46eSDavid Howells inherit : 1, /* children inherit it */ 365607ca46eSDavid Howells pinned : 1, /* must always be on PMU */ 366607ca46eSDavid Howells exclusive : 1, /* only group on PMU */ 367607ca46eSDavid Howells exclude_user : 1, /* don't count user */ 368607ca46eSDavid Howells exclude_kernel : 1, /* ditto kernel */ 369607ca46eSDavid Howells exclude_hv : 1, /* ditto hypervisor */ 370607ca46eSDavid Howells exclude_idle : 1, /* don't count when idle */ 371607ca46eSDavid Howells mmap : 1, /* include mmap data */ 372607ca46eSDavid Howells comm : 1, /* include comm data */ 373607ca46eSDavid Howells freq : 1, /* use freq, not period */ 374607ca46eSDavid Howells inherit_stat : 1, /* per task counts */ 375607ca46eSDavid Howells enable_on_exec : 1, /* next exec enables */ 376607ca46eSDavid Howells task : 1, /* trace fork/exit */ 377607ca46eSDavid Howells watermark : 1, /* wakeup_watermark */ 378607ca46eSDavid Howells /* 379607ca46eSDavid Howells * precise_ip: 380607ca46eSDavid Howells * 381607ca46eSDavid Howells * 0 - SAMPLE_IP can have arbitrary skid 382607ca46eSDavid Howells * 1 - SAMPLE_IP must have constant skid 383607ca46eSDavid Howells * 2 - SAMPLE_IP requested to have 0 skid 384607ca46eSDavid Howells * 3 - SAMPLE_IP must have 0 skid 385607ca46eSDavid Howells * 386607ca46eSDavid Howells * See also PERF_RECORD_MISC_EXACT_IP 387607ca46eSDavid Howells */ 388607ca46eSDavid Howells precise_ip : 2, /* skid constraint */ 389607ca46eSDavid Howells mmap_data : 1, /* non-exec mmap data */ 390607ca46eSDavid Howells sample_id_all : 1, /* sample_type all events */ 391607ca46eSDavid Howells 392607ca46eSDavid Howells exclude_host : 1, /* don't count in host */ 393607ca46eSDavid Howells exclude_guest : 1, /* don't count in guest */ 394607ca46eSDavid Howells 395607ca46eSDavid Howells exclude_callchain_kernel : 1, /* exclude kernel callchains */ 396607ca46eSDavid Howells exclude_callchain_user : 1, /* exclude user callchains */ 39713d7a241SStephane Eranian mmap2 : 1, /* include mmap with inode data */ 39882b89778SAdrian Hunter comm_exec : 1, /* flag comm events that are due to an exec */ 39934f43927SPeter Zijlstra use_clockid : 1, /* use @clockid for time fields */ 40045ac1403SAdrian Hunter context_switch : 1, /* context switch data */ 4019ecda41aSWang Nan write_backward : 1, /* Write ring buffer from end to beginning */ 402e4222673SHari Bathini namespaces : 1, /* include namespaces data */ 40376193a94SSong Liu ksymbol : 1, /* include ksymbol events */ 4046ee52e2aSSong Liu bpf_event : 1, /* include bpf events */ 405ab43762eSAlexander Shishkin aux_output : 1, /* generate AUX records instead of events */ 40696aaab68SNamhyung Kim cgroup : 1, /* include cgroup events */ 407e17d43b9SAdrian Hunter text_poke : 1, /* include text poke events */ 40888a16a13SJiri Olsa build_id : 1, /* use build id in mmap2 events */ 4092b26f0aaSMarco Elver inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */ 4102e498d0aSMarco Elver remove_on_exec : 1, /* event is removed from task on exec */ 41197ba62b2SMarco Elver sigtrap : 1, /* send synchronous SIGTRAP on event */ 41297ba62b2SMarco Elver __reserved_1 : 26; 413607ca46eSDavid Howells 414607ca46eSDavid Howells union { 415607ca46eSDavid Howells __u32 wakeup_events; /* wakeup every n events */ 416607ca46eSDavid Howells __u32 wakeup_watermark; /* bytes before wakeup */ 417607ca46eSDavid Howells }; 418607ca46eSDavid Howells 419607ca46eSDavid Howells __u32 bp_type; 420607ca46eSDavid Howells union { 421607ca46eSDavid Howells __u64 bp_addr; 42265074d43SSong Liu __u64 kprobe_func; /* for perf_kprobe */ 42365074d43SSong Liu __u64 uprobe_path; /* for perf_uprobe */ 424607ca46eSDavid Howells __u64 config1; /* extension of config */ 425607ca46eSDavid Howells }; 426607ca46eSDavid Howells union { 427607ca46eSDavid Howells __u64 bp_len; 42865074d43SSong Liu __u64 kprobe_addr; /* when kprobe_func == NULL */ 42965074d43SSong Liu __u64 probe_offset; /* for perf_[k,u]probe */ 430607ca46eSDavid Howells __u64 config2; /* extension of config1 */ 431607ca46eSDavid Howells }; 432607ca46eSDavid Howells __u64 branch_sample_type; /* enum perf_branch_sample_type */ 433607ca46eSDavid Howells 434607ca46eSDavid Howells /* 435607ca46eSDavid Howells * Defines set of user regs to dump on samples. 436607ca46eSDavid Howells * See asm/perf_regs.h for details. 437607ca46eSDavid Howells */ 438607ca46eSDavid Howells __u64 sample_regs_user; 439607ca46eSDavid Howells 440607ca46eSDavid Howells /* 441607ca46eSDavid Howells * Defines size of the user stack to dump on samples. 442607ca46eSDavid Howells */ 443607ca46eSDavid Howells __u32 sample_stack_user; 444607ca46eSDavid Howells 44534f43927SPeter Zijlstra __s32 clockid; 44660e2364eSStephane Eranian /* 44760e2364eSStephane Eranian * Defines set of regs to dump for each sample 44860e2364eSStephane Eranian * state captured on: 44960e2364eSStephane Eranian * - precise = 0: PMU interrupt 45060e2364eSStephane Eranian * - precise > 0: sampled instruction 45160e2364eSStephane Eranian * 45260e2364eSStephane Eranian * See asm/perf_regs.h for details. 45360e2364eSStephane Eranian */ 45460e2364eSStephane Eranian __u64 sample_regs_intr; 4551a594131SAlexander Shishkin 4561a594131SAlexander Shishkin /* 4571a594131SAlexander Shishkin * Wakeup watermark for AUX area 4581a594131SAlexander Shishkin */ 4591a594131SAlexander Shishkin __u32 aux_watermark; 46097c79a38SArnaldo Carvalho de Melo __u16 sample_max_stack; 461a4faf00dSAlexander Shishkin __u16 __reserved_2; 462a4faf00dSAlexander Shishkin __u32 aux_sample_size; 463a4faf00dSAlexander Shishkin __u32 __reserved_3; 46497ba62b2SMarco Elver 46597ba62b2SMarco Elver /* 46697ba62b2SMarco Elver * User provided data if sigtrap=1, passed back to user via 4670683b531SEric W. Biederman * siginfo_t::si_perf_data, e.g. to permit user to identify the event. 468*ddecd228SMarco Elver * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be 469*ddecd228SMarco Elver * truncated accordingly on 32 bit architectures. 47097ba62b2SMarco Elver */ 47197ba62b2SMarco Elver __u64 sig_data; 472607ca46eSDavid Howells }; 473607ca46eSDavid Howells 474f371b304SYonghong Song /* 475f371b304SYonghong Song * Structure used by below PERF_EVENT_IOC_QUERY_BPF command 476f371b304SYonghong Song * to query bpf programs attached to the same perf tracepoint 477f371b304SYonghong Song * as the given perf event. 478f371b304SYonghong Song */ 479f371b304SYonghong Song struct perf_event_query_bpf { 480f371b304SYonghong Song /* 481f371b304SYonghong Song * The below ids array length 482f371b304SYonghong Song */ 483f371b304SYonghong Song __u32 ids_len; 484f371b304SYonghong Song /* 485f371b304SYonghong Song * Set by the kernel to indicate the number of 486f371b304SYonghong Song * available programs 487f371b304SYonghong Song */ 488f371b304SYonghong Song __u32 prog_cnt; 489f371b304SYonghong Song /* 490f371b304SYonghong Song * User provided buffer to store program ids 491f371b304SYonghong Song */ 492f371b304SYonghong Song __u32 ids[0]; 493f371b304SYonghong Song }; 494f371b304SYonghong Song 495607ca46eSDavid Howells /* 496607ca46eSDavid Howells * Ioctls that can be done on a perf event fd: 497607ca46eSDavid Howells */ 498607ca46eSDavid Howells #define PERF_EVENT_IOC_ENABLE _IO ('$', 0) 499607ca46eSDavid Howells #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) 500607ca46eSDavid Howells #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) 501607ca46eSDavid Howells #define PERF_EVENT_IOC_RESET _IO ('$', 3) 502607ca46eSDavid Howells #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) 503607ca46eSDavid Howells #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) 504607ca46eSDavid Howells #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) 505a8e0108cSVince Weaver #define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *) 5062541517cSAlexei Starovoitov #define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32) 50786e7972fSWang Nan #define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32) 508f371b304SYonghong Song #define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *) 50932ff77e8SMilind Chabbi #define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW('$', 11, struct perf_event_attr *) 510607ca46eSDavid Howells 511607ca46eSDavid Howells enum perf_event_ioc_flags { 512607ca46eSDavid Howells PERF_IOC_FLAG_GROUP = 1U << 0, 513607ca46eSDavid Howells }; 514607ca46eSDavid Howells 515607ca46eSDavid Howells /* 516607ca46eSDavid Howells * Structure of the page that can be mapped via mmap 517607ca46eSDavid Howells */ 518607ca46eSDavid Howells struct perf_event_mmap_page { 519607ca46eSDavid Howells __u32 version; /* version number of this structure */ 520607ca46eSDavid Howells __u32 compat_version; /* lowest version this is compat with */ 521607ca46eSDavid Howells 522607ca46eSDavid Howells /* 523607ca46eSDavid Howells * Bits needed to read the hw events in user-space. 524607ca46eSDavid Howells * 525b438b1abSAndy Lutomirski * u32 seq, time_mult, time_shift, index, width; 526607ca46eSDavid Howells * u64 count, enabled, running; 527607ca46eSDavid Howells * u64 cyc, time_offset; 528607ca46eSDavid Howells * s64 pmc = 0; 529607ca46eSDavid Howells * 530607ca46eSDavid Howells * do { 531607ca46eSDavid Howells * seq = pc->lock; 532607ca46eSDavid Howells * barrier() 533607ca46eSDavid Howells * 534607ca46eSDavid Howells * enabled = pc->time_enabled; 535607ca46eSDavid Howells * running = pc->time_running; 536607ca46eSDavid Howells * 537607ca46eSDavid Howells * if (pc->cap_usr_time && enabled != running) { 538607ca46eSDavid Howells * cyc = rdtsc(); 539607ca46eSDavid Howells * time_offset = pc->time_offset; 540607ca46eSDavid Howells * time_mult = pc->time_mult; 541607ca46eSDavid Howells * time_shift = pc->time_shift; 542607ca46eSDavid Howells * } 543607ca46eSDavid Howells * 544b438b1abSAndy Lutomirski * index = pc->index; 545607ca46eSDavid Howells * count = pc->offset; 546b438b1abSAndy Lutomirski * if (pc->cap_user_rdpmc && index) { 547607ca46eSDavid Howells * width = pc->pmc_width; 548b438b1abSAndy Lutomirski * pmc = rdpmc(index - 1); 549607ca46eSDavid Howells * } 550607ca46eSDavid Howells * 551607ca46eSDavid Howells * barrier(); 552607ca46eSDavid Howells * } while (pc->lock != seq); 553607ca46eSDavid Howells * 554607ca46eSDavid Howells * NOTE: for obvious reason this only works on self-monitoring 555607ca46eSDavid Howells * processes. 556607ca46eSDavid Howells */ 557607ca46eSDavid Howells __u32 lock; /* seqlock for synchronization */ 558607ca46eSDavid Howells __u32 index; /* hardware event identifier */ 559607ca46eSDavid Howells __s64 offset; /* add to hardware event value */ 560607ca46eSDavid Howells __u64 time_enabled; /* time event active */ 561607ca46eSDavid Howells __u64 time_running; /* time event on cpu */ 562607ca46eSDavid Howells union { 563607ca46eSDavid Howells __u64 capabilities; 564860f085bSAdrian Hunter struct { 565fa731587SPeter Zijlstra __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */ 566fa731587SPeter Zijlstra cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */ 567fa731587SPeter Zijlstra 568fa731587SPeter Zijlstra cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */ 5696c0246a4SPeter Zijlstra cap_user_time : 1, /* The time_{shift,mult,offset} fields are used */ 570fa731587SPeter Zijlstra cap_user_time_zero : 1, /* The time_zero field is used */ 5716c0246a4SPeter Zijlstra cap_user_time_short : 1, /* the time_{cycle,mask} fields are used */ 5726c0246a4SPeter Zijlstra cap_____res : 58; 573607ca46eSDavid Howells }; 574860f085bSAdrian Hunter }; 575607ca46eSDavid Howells 576607ca46eSDavid Howells /* 577b438b1abSAndy Lutomirski * If cap_user_rdpmc this field provides the bit-width of the value 578607ca46eSDavid Howells * read using the rdpmc() or equivalent instruction. This can be used 579607ca46eSDavid Howells * to sign extend the result like: 580607ca46eSDavid Howells * 581607ca46eSDavid Howells * pmc <<= 64 - width; 582607ca46eSDavid Howells * pmc >>= 64 - width; // signed shift right 583607ca46eSDavid Howells * count += pmc; 584607ca46eSDavid Howells */ 585607ca46eSDavid Howells __u16 pmc_width; 586607ca46eSDavid Howells 587607ca46eSDavid Howells /* 588607ca46eSDavid Howells * If cap_usr_time the below fields can be used to compute the time 589607ca46eSDavid Howells * delta since time_enabled (in ns) using rdtsc or similar. 590607ca46eSDavid Howells * 591607ca46eSDavid Howells * u64 quot, rem; 592607ca46eSDavid Howells * u64 delta; 593607ca46eSDavid Howells * 594607ca46eSDavid Howells * quot = (cyc >> time_shift); 595b9511cd7SAdrian Hunter * rem = cyc & (((u64)1 << time_shift) - 1); 596607ca46eSDavid Howells * delta = time_offset + quot * time_mult + 597607ca46eSDavid Howells * ((rem * time_mult) >> time_shift); 598607ca46eSDavid Howells * 599607ca46eSDavid Howells * Where time_offset,time_mult,time_shift and cyc are read in the 600607ca46eSDavid Howells * seqcount loop described above. This delta can then be added to 601b438b1abSAndy Lutomirski * enabled and possible running (if index), improving the scaling: 602607ca46eSDavid Howells * 603607ca46eSDavid Howells * enabled += delta; 604b438b1abSAndy Lutomirski * if (index) 605607ca46eSDavid Howells * running += delta; 606607ca46eSDavid Howells * 607607ca46eSDavid Howells * quot = count / running; 608607ca46eSDavid Howells * rem = count % running; 609607ca46eSDavid Howells * count = quot * enabled + (rem * enabled) / running; 610607ca46eSDavid Howells */ 611607ca46eSDavid Howells __u16 time_shift; 612607ca46eSDavid Howells __u32 time_mult; 613607ca46eSDavid Howells __u64 time_offset; 614c73deb6aSAdrian Hunter /* 615c73deb6aSAdrian Hunter * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated 616c73deb6aSAdrian Hunter * from sample timestamps. 617c73deb6aSAdrian Hunter * 618c73deb6aSAdrian Hunter * time = timestamp - time_zero; 619c73deb6aSAdrian Hunter * quot = time / time_mult; 620c73deb6aSAdrian Hunter * rem = time % time_mult; 621c73deb6aSAdrian Hunter * cyc = (quot << time_shift) + (rem << time_shift) / time_mult; 622c73deb6aSAdrian Hunter * 623c73deb6aSAdrian Hunter * And vice versa: 624c73deb6aSAdrian Hunter * 625c73deb6aSAdrian Hunter * quot = cyc >> time_shift; 626b9511cd7SAdrian Hunter * rem = cyc & (((u64)1 << time_shift) - 1); 627c73deb6aSAdrian Hunter * timestamp = time_zero + quot * time_mult + 628c73deb6aSAdrian Hunter * ((rem * time_mult) >> time_shift); 629c73deb6aSAdrian Hunter */ 630c73deb6aSAdrian Hunter __u64 time_zero; 6316c0246a4SPeter Zijlstra 632fa731587SPeter Zijlstra __u32 size; /* Header size up to __reserved[] fields. */ 6336c0246a4SPeter Zijlstra __u32 __reserved_1; 6346c0246a4SPeter Zijlstra 6356c0246a4SPeter Zijlstra /* 6366c0246a4SPeter Zijlstra * If cap_usr_time_short, the hardware clock is less than 64bit wide 6376c0246a4SPeter Zijlstra * and we must compute the 'cyc' value, as used by cap_usr_time, as: 6386c0246a4SPeter Zijlstra * 6396c0246a4SPeter Zijlstra * cyc = time_cycles + ((cyc - time_cycles) & time_mask) 6406c0246a4SPeter Zijlstra * 6416c0246a4SPeter Zijlstra * NOTE: this form is explicitly chosen such that cap_usr_time_short 6426c0246a4SPeter Zijlstra * is a correction on top of cap_usr_time, and code that doesn't 6436c0246a4SPeter Zijlstra * know about cap_usr_time_short still works under the assumption 6446c0246a4SPeter Zijlstra * the counter doesn't wrap. 6456c0246a4SPeter Zijlstra */ 6466c0246a4SPeter Zijlstra __u64 time_cycles; 6476c0246a4SPeter Zijlstra __u64 time_mask; 648607ca46eSDavid Howells 649607ca46eSDavid Howells /* 650607ca46eSDavid Howells * Hole for extension of the self monitor capabilities 651607ca46eSDavid Howells */ 652607ca46eSDavid Howells 6536c0246a4SPeter Zijlstra __u8 __reserved[116*8]; /* align to 1k. */ 654607ca46eSDavid Howells 655607ca46eSDavid Howells /* 656607ca46eSDavid Howells * Control data for the mmap() data buffer. 657607ca46eSDavid Howells * 658bf378d34SPeter Zijlstra * User-space reading the @data_head value should issue an smp_rmb(), 659bf378d34SPeter Zijlstra * after reading this value. 660607ca46eSDavid Howells * 661607ca46eSDavid Howells * When the mapping is PROT_WRITE the @data_tail value should be 662bf378d34SPeter Zijlstra * written by userspace to reflect the last read data, after issueing 663bf378d34SPeter Zijlstra * an smp_mb() to separate the data read from the ->data_tail store. 664bf378d34SPeter Zijlstra * In this case the kernel will not over-write unread data. 665bf378d34SPeter Zijlstra * 666bf378d34SPeter Zijlstra * See perf_output_put_handle() for the data ordering. 667e8c6deacSAlexander Shishkin * 668e8c6deacSAlexander Shishkin * data_{offset,size} indicate the location and size of the perf record 669e8c6deacSAlexander Shishkin * buffer within the mmapped area. 670607ca46eSDavid Howells */ 671607ca46eSDavid Howells __u64 data_head; /* head in the data section */ 672607ca46eSDavid Howells __u64 data_tail; /* user-space written tail */ 673e8c6deacSAlexander Shishkin __u64 data_offset; /* where the buffer starts */ 674e8c6deacSAlexander Shishkin __u64 data_size; /* data buffer size */ 67545bfb2e5SPeter Zijlstra 67645bfb2e5SPeter Zijlstra /* 67745bfb2e5SPeter Zijlstra * AUX area is defined by aux_{offset,size} fields that should be set 67845bfb2e5SPeter Zijlstra * by the userspace, so that 67945bfb2e5SPeter Zijlstra * 68045bfb2e5SPeter Zijlstra * aux_offset >= data_offset + data_size 68145bfb2e5SPeter Zijlstra * 68245bfb2e5SPeter Zijlstra * prior to mmap()ing it. Size of the mmap()ed area should be aux_size. 68345bfb2e5SPeter Zijlstra * 68445bfb2e5SPeter Zijlstra * Ring buffer pointers aux_{head,tail} have the same semantics as 68545bfb2e5SPeter Zijlstra * data_{head,tail} and same ordering rules apply. 68645bfb2e5SPeter Zijlstra */ 68745bfb2e5SPeter Zijlstra __u64 aux_head; 68845bfb2e5SPeter Zijlstra __u64 aux_tail; 68945bfb2e5SPeter Zijlstra __u64 aux_offset; 69045bfb2e5SPeter Zijlstra __u64 aux_size; 691607ca46eSDavid Howells }; 692607ca46eSDavid Howells 69388a16a13SJiri Olsa /* 69488a16a13SJiri Olsa * The current state of perf_event_header::misc bits usage: 69588a16a13SJiri Olsa * ('|' used bit, '-' unused bit) 69688a16a13SJiri Olsa * 69788a16a13SJiri Olsa * 012 CDEF 69888a16a13SJiri Olsa * |||---------|||| 69988a16a13SJiri Olsa * 70088a16a13SJiri Olsa * Where: 70188a16a13SJiri Olsa * 0-2 CPUMODE_MASK 70288a16a13SJiri Olsa * 70388a16a13SJiri Olsa * C PROC_MAP_PARSE_TIMEOUT 70488a16a13SJiri Olsa * D MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT 70588a16a13SJiri Olsa * E MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT 70688a16a13SJiri Olsa * F (reserved) 70788a16a13SJiri Olsa */ 70888a16a13SJiri Olsa 709607ca46eSDavid Howells #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0) 710607ca46eSDavid Howells #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) 711607ca46eSDavid Howells #define PERF_RECORD_MISC_KERNEL (1 << 0) 712607ca46eSDavid Howells #define PERF_RECORD_MISC_USER (2 << 0) 713607ca46eSDavid Howells #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) 714607ca46eSDavid Howells #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0) 715607ca46eSDavid Howells #define PERF_RECORD_MISC_GUEST_USER (5 << 0) 716607ca46eSDavid Howells 71782b89778SAdrian Hunter /* 718930e6fcdSKan Liang * Indicates that /proc/PID/maps parsing are truncated by time out. 719930e6fcdSKan Liang */ 720930e6fcdSKan Liang #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12) 721930e6fcdSKan Liang /* 722972c1488SJiri Olsa * Following PERF_RECORD_MISC_* are used on different 723972c1488SJiri Olsa * events, so can reuse the same bit position: 724972c1488SJiri Olsa * 725972c1488SJiri Olsa * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events 726972c1488SJiri Olsa * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event 7274f8f382eSDavid Miller * PERF_RECORD_MISC_FORK_EXEC - PERF_RECORD_FORK event (perf internal) 728972c1488SJiri Olsa * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events 72982b89778SAdrian Hunter */ 7302fe85427SStephane Eranian #define PERF_RECORD_MISC_MMAP_DATA (1 << 13) 73182b89778SAdrian Hunter #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) 7324f8f382eSDavid Miller #define PERF_RECORD_MISC_FORK_EXEC (1 << 13) 73345ac1403SAdrian Hunter #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) 734607ca46eSDavid Howells /* 735101592b4SAlexey Budankov * These PERF_RECORD_MISC_* flags below are safely reused 736101592b4SAlexey Budankov * for the following events: 737101592b4SAlexey Budankov * 738101592b4SAlexey Budankov * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events 739101592b4SAlexey Budankov * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events 74088a16a13SJiri Olsa * PERF_RECORD_MISC_MMAP_BUILD_ID - PERF_RECORD_MMAP2 event 741101592b4SAlexey Budankov * 742101592b4SAlexey Budankov * 743101592b4SAlexey Budankov * PERF_RECORD_MISC_EXACT_IP: 744607ca46eSDavid Howells * Indicates that the content of PERF_SAMPLE_IP points to 745607ca46eSDavid Howells * the actual instruction that triggered the event. See also 746607ca46eSDavid Howells * perf_event_attr::precise_ip. 747101592b4SAlexey Budankov * 748101592b4SAlexey Budankov * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: 749101592b4SAlexey Budankov * Indicates that thread was preempted in TASK_RUNNING state. 75088a16a13SJiri Olsa * 75188a16a13SJiri Olsa * PERF_RECORD_MISC_MMAP_BUILD_ID: 75288a16a13SJiri Olsa * Indicates that mmap2 event carries build id data. 753607ca46eSDavid Howells */ 754607ca46eSDavid Howells #define PERF_RECORD_MISC_EXACT_IP (1 << 14) 755101592b4SAlexey Budankov #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) 75688a16a13SJiri Olsa #define PERF_RECORD_MISC_MMAP_BUILD_ID (1 << 14) 757607ca46eSDavid Howells /* 758607ca46eSDavid Howells * Reserve the last bit to indicate some extended misc field 759607ca46eSDavid Howells */ 760607ca46eSDavid Howells #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) 761607ca46eSDavid Howells 762607ca46eSDavid Howells struct perf_event_header { 763607ca46eSDavid Howells __u32 type; 764607ca46eSDavid Howells __u16 misc; 765607ca46eSDavid Howells __u16 size; 766607ca46eSDavid Howells }; 767607ca46eSDavid Howells 768e4222673SHari Bathini struct perf_ns_link_info { 769e4222673SHari Bathini __u64 dev; 770e4222673SHari Bathini __u64 ino; 771e4222673SHari Bathini }; 772e4222673SHari Bathini 773e4222673SHari Bathini enum { 774e4222673SHari Bathini NET_NS_INDEX = 0, 775e4222673SHari Bathini UTS_NS_INDEX = 1, 776e4222673SHari Bathini IPC_NS_INDEX = 2, 777e4222673SHari Bathini PID_NS_INDEX = 3, 778e4222673SHari Bathini USER_NS_INDEX = 4, 779e4222673SHari Bathini MNT_NS_INDEX = 5, 780e4222673SHari Bathini CGROUP_NS_INDEX = 6, 781e4222673SHari Bathini 782e4222673SHari Bathini NR_NAMESPACES, /* number of available namespaces */ 783e4222673SHari Bathini }; 784e4222673SHari Bathini 785607ca46eSDavid Howells enum perf_event_type { 786607ca46eSDavid Howells 787607ca46eSDavid Howells /* 788607ca46eSDavid Howells * If perf_event_attr.sample_id_all is set then all event types will 789607ca46eSDavid Howells * have the sample_type selected fields related to where/when 790ff3d527cSAdrian Hunter * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU, 791ff3d527cSAdrian Hunter * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed 792ff3d527cSAdrian Hunter * just after the perf_event_header and the fields already present for 793ff3d527cSAdrian Hunter * the existing fields, i.e. at the end of the payload. That way a newer 794ff3d527cSAdrian Hunter * perf.data file will be supported by older perf tools, with these new 795ff3d527cSAdrian Hunter * optional fields being ignored. 796607ca46eSDavid Howells * 797a5cdd40cSPeter Zijlstra * struct sample_id { 798a5cdd40cSPeter Zijlstra * { u32 pid, tid; } && PERF_SAMPLE_TID 799a5cdd40cSPeter Zijlstra * { u64 time; } && PERF_SAMPLE_TIME 800a5cdd40cSPeter Zijlstra * { u64 id; } && PERF_SAMPLE_ID 801a5cdd40cSPeter Zijlstra * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 802a5cdd40cSPeter Zijlstra * { u32 cpu, res; } && PERF_SAMPLE_CPU 803ff3d527cSAdrian Hunter * { u64 id; } && PERF_SAMPLE_IDENTIFIER 804a5cdd40cSPeter Zijlstra * } && perf_event_attr::sample_id_all 805ff3d527cSAdrian Hunter * 806ff3d527cSAdrian Hunter * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The 807ff3d527cSAdrian Hunter * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed 808ff3d527cSAdrian Hunter * relative to header.size. 809a5cdd40cSPeter Zijlstra */ 810a5cdd40cSPeter Zijlstra 811a5cdd40cSPeter Zijlstra /* 812607ca46eSDavid Howells * The MMAP events record the PROT_EXEC mappings so that we can 813607ca46eSDavid Howells * correlate userspace IPs to code. They have the following structure: 814607ca46eSDavid Howells * 815607ca46eSDavid Howells * struct { 816607ca46eSDavid Howells * struct perf_event_header header; 817607ca46eSDavid Howells * 818607ca46eSDavid Howells * u32 pid, tid; 819607ca46eSDavid Howells * u64 addr; 820607ca46eSDavid Howells * u64 len; 821607ca46eSDavid Howells * u64 pgoff; 822607ca46eSDavid Howells * char filename[]; 823c5ecceefSPeter Zijlstra * struct sample_id sample_id; 824607ca46eSDavid Howells * }; 825607ca46eSDavid Howells */ 826607ca46eSDavid Howells PERF_RECORD_MMAP = 1, 827607ca46eSDavid Howells 828607ca46eSDavid Howells /* 829607ca46eSDavid Howells * struct { 830607ca46eSDavid Howells * struct perf_event_header header; 831607ca46eSDavid Howells * u64 id; 832607ca46eSDavid Howells * u64 lost; 833a5cdd40cSPeter Zijlstra * struct sample_id sample_id; 834607ca46eSDavid Howells * }; 835607ca46eSDavid Howells */ 836607ca46eSDavid Howells PERF_RECORD_LOST = 2, 837607ca46eSDavid Howells 838607ca46eSDavid Howells /* 839607ca46eSDavid Howells * struct { 840607ca46eSDavid Howells * struct perf_event_header header; 841607ca46eSDavid Howells * 842607ca46eSDavid Howells * u32 pid, tid; 843607ca46eSDavid Howells * char comm[]; 844a5cdd40cSPeter Zijlstra * struct sample_id sample_id; 845607ca46eSDavid Howells * }; 846607ca46eSDavid Howells */ 847607ca46eSDavid Howells PERF_RECORD_COMM = 3, 848607ca46eSDavid Howells 849607ca46eSDavid Howells /* 850607ca46eSDavid Howells * struct { 851607ca46eSDavid Howells * struct perf_event_header header; 852607ca46eSDavid Howells * u32 pid, ppid; 853607ca46eSDavid Howells * u32 tid, ptid; 854607ca46eSDavid Howells * u64 time; 855a5cdd40cSPeter Zijlstra * struct sample_id sample_id; 856607ca46eSDavid Howells * }; 857607ca46eSDavid Howells */ 858607ca46eSDavid Howells PERF_RECORD_EXIT = 4, 859607ca46eSDavid Howells 860607ca46eSDavid Howells /* 861607ca46eSDavid Howells * struct { 862607ca46eSDavid Howells * struct perf_event_header header; 863607ca46eSDavid Howells * u64 time; 864607ca46eSDavid Howells * u64 id; 865607ca46eSDavid Howells * u64 stream_id; 866a5cdd40cSPeter Zijlstra * struct sample_id sample_id; 867607ca46eSDavid Howells * }; 868607ca46eSDavid Howells */ 869607ca46eSDavid Howells PERF_RECORD_THROTTLE = 5, 870607ca46eSDavid Howells PERF_RECORD_UNTHROTTLE = 6, 871607ca46eSDavid Howells 872607ca46eSDavid Howells /* 873607ca46eSDavid Howells * struct { 874607ca46eSDavid Howells * struct perf_event_header header; 875607ca46eSDavid Howells * u32 pid, ppid; 876607ca46eSDavid Howells * u32 tid, ptid; 877607ca46eSDavid Howells * u64 time; 878a5cdd40cSPeter Zijlstra * struct sample_id sample_id; 879607ca46eSDavid Howells * }; 880607ca46eSDavid Howells */ 881607ca46eSDavid Howells PERF_RECORD_FORK = 7, 882607ca46eSDavid Howells 883607ca46eSDavid Howells /* 884607ca46eSDavid Howells * struct { 885607ca46eSDavid Howells * struct perf_event_header header; 886607ca46eSDavid Howells * u32 pid, tid; 887607ca46eSDavid Howells * 888607ca46eSDavid Howells * struct read_format values; 889a5cdd40cSPeter Zijlstra * struct sample_id sample_id; 890607ca46eSDavid Howells * }; 891607ca46eSDavid Howells */ 892607ca46eSDavid Howells PERF_RECORD_READ = 8, 893607ca46eSDavid Howells 894607ca46eSDavid Howells /* 895607ca46eSDavid Howells * struct { 896607ca46eSDavid Howells * struct perf_event_header header; 897607ca46eSDavid Howells * 898ff3d527cSAdrian Hunter * # 899ff3d527cSAdrian Hunter * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. 900ff3d527cSAdrian Hunter * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position 901ff3d527cSAdrian Hunter * # is fixed relative to header. 902ff3d527cSAdrian Hunter * # 903ff3d527cSAdrian Hunter * 904ff3d527cSAdrian Hunter * { u64 id; } && PERF_SAMPLE_IDENTIFIER 905607ca46eSDavid Howells * { u64 ip; } && PERF_SAMPLE_IP 906607ca46eSDavid Howells * { u32 pid, tid; } && PERF_SAMPLE_TID 907607ca46eSDavid Howells * { u64 time; } && PERF_SAMPLE_TIME 908607ca46eSDavid Howells * { u64 addr; } && PERF_SAMPLE_ADDR 909607ca46eSDavid Howells * { u64 id; } && PERF_SAMPLE_ID 910607ca46eSDavid Howells * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 911607ca46eSDavid Howells * { u32 cpu, res; } && PERF_SAMPLE_CPU 912607ca46eSDavid Howells * { u64 period; } && PERF_SAMPLE_PERIOD 913607ca46eSDavid Howells * 914607ca46eSDavid Howells * { struct read_format values; } && PERF_SAMPLE_READ 915607ca46eSDavid Howells * 916607ca46eSDavid Howells * { u64 nr, 917607ca46eSDavid Howells * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 918607ca46eSDavid Howells * 919607ca46eSDavid Howells * # 920607ca46eSDavid Howells * # The RAW record below is opaque data wrt the ABI 921607ca46eSDavid Howells * # 922607ca46eSDavid Howells * # That is, the ABI doesn't make any promises wrt to 923607ca46eSDavid Howells * # the stability of its content, it may vary depending 924607ca46eSDavid Howells * # on event, hardware, kernel version and phase of 925607ca46eSDavid Howells * # the moon. 926607ca46eSDavid Howells * # 927607ca46eSDavid Howells * # In other words, PERF_SAMPLE_RAW contents are not an ABI. 928607ca46eSDavid Howells * # 929607ca46eSDavid Howells * 930607ca46eSDavid Howells * { u32 size; 931607ca46eSDavid Howells * char data[size];}&& PERF_SAMPLE_RAW 932607ca46eSDavid Howells * 933b878e7fbSVince Weaver * { u64 nr; 934bbfd5e4fSKan Liang * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX 935bbfd5e4fSKan Liang * { u64 from, to, flags } lbr[nr]; 936bbfd5e4fSKan Liang * } && PERF_SAMPLE_BRANCH_STACK 937607ca46eSDavid Howells * 938607ca46eSDavid Howells * { u64 abi; # enum perf_sample_regs_abi 939607ca46eSDavid Howells * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER 940607ca46eSDavid Howells * 941607ca46eSDavid Howells * { u64 size; 942607ca46eSDavid Howells * char data[size]; 943607ca46eSDavid Howells * u64 dyn_size; } && PERF_SAMPLE_STACK_USER 944c3feedf2SAndi Kleen * 9452a6c6b7dSKan Liang * { union perf_sample_weight 9462a6c6b7dSKan Liang * { 9472a6c6b7dSKan Liang * u64 full; && PERF_SAMPLE_WEIGHT 9482a6c6b7dSKan Liang * #if defined(__LITTLE_ENDIAN_BITFIELD) 9492a6c6b7dSKan Liang * struct { 9502a6c6b7dSKan Liang * u32 var1_dw; 9512a6c6b7dSKan Liang * u16 var2_w; 9522a6c6b7dSKan Liang * u16 var3_w; 9532a6c6b7dSKan Liang * } && PERF_SAMPLE_WEIGHT_STRUCT 9542a6c6b7dSKan Liang * #elif defined(__BIG_ENDIAN_BITFIELD) 9552a6c6b7dSKan Liang * struct { 9562a6c6b7dSKan Liang * u16 var3_w; 9572a6c6b7dSKan Liang * u16 var2_w; 9582a6c6b7dSKan Liang * u32 var1_dw; 9592a6c6b7dSKan Liang * } && PERF_SAMPLE_WEIGHT_STRUCT 9602a6c6b7dSKan Liang * #endif 9612a6c6b7dSKan Liang * } 9622a6c6b7dSKan Liang * } 963d6be9ad6SStephane Eranian * { u64 data_src; } && PERF_SAMPLE_DATA_SRC 964189b84fbSVince Weaver * { u64 transaction; } && PERF_SAMPLE_TRANSACTION 96560e2364eSStephane Eranian * { u64 abi; # enum perf_sample_regs_abi 96660e2364eSStephane Eranian * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR 967fc7ce9c7SKan Liang * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR 968a4faf00dSAlexander Shishkin * { u64 size; 969a4faf00dSAlexander Shishkin * char data[size]; } && PERF_SAMPLE_AUX 9708d97e718SKan Liang * { u64 data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE 971995f088eSStephane Eranian * { u64 code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE 972607ca46eSDavid Howells * }; 973607ca46eSDavid Howells */ 974607ca46eSDavid Howells PERF_RECORD_SAMPLE = 9, 975607ca46eSDavid Howells 97613d7a241SStephane Eranian /* 97713d7a241SStephane Eranian * The MMAP2 records are an augmented version of MMAP, they add 97813d7a241SStephane Eranian * maj, min, ino numbers to be used to uniquely identify each mapping 97913d7a241SStephane Eranian * 98013d7a241SStephane Eranian * struct { 98113d7a241SStephane Eranian * struct perf_event_header header; 98213d7a241SStephane Eranian * 98313d7a241SStephane Eranian * u32 pid, tid; 98413d7a241SStephane Eranian * u64 addr; 98513d7a241SStephane Eranian * u64 len; 98613d7a241SStephane Eranian * u64 pgoff; 98788a16a13SJiri Olsa * union { 98888a16a13SJiri Olsa * struct { 98913d7a241SStephane Eranian * u32 maj; 99013d7a241SStephane Eranian * u32 min; 99113d7a241SStephane Eranian * u64 ino; 99213d7a241SStephane Eranian * u64 ino_generation; 99388a16a13SJiri Olsa * }; 99488a16a13SJiri Olsa * struct { 99588a16a13SJiri Olsa * u8 build_id_size; 99688a16a13SJiri Olsa * u8 __reserved_1; 99788a16a13SJiri Olsa * u16 __reserved_2; 99888a16a13SJiri Olsa * u8 build_id[20]; 99988a16a13SJiri Olsa * }; 100088a16a13SJiri Olsa * }; 1001f972eb63SPeter Zijlstra * u32 prot, flags; 100213d7a241SStephane Eranian * char filename[]; 100313d7a241SStephane Eranian * struct sample_id sample_id; 100413d7a241SStephane Eranian * }; 100513d7a241SStephane Eranian */ 100613d7a241SStephane Eranian PERF_RECORD_MMAP2 = 10, 100713d7a241SStephane Eranian 100868db7e98SAlexander Shishkin /* 100968db7e98SAlexander Shishkin * Records that new data landed in the AUX buffer part. 101068db7e98SAlexander Shishkin * 101168db7e98SAlexander Shishkin * struct { 101268db7e98SAlexander Shishkin * struct perf_event_header header; 101368db7e98SAlexander Shishkin * 101468db7e98SAlexander Shishkin * u64 aux_offset; 101568db7e98SAlexander Shishkin * u64 aux_size; 101668db7e98SAlexander Shishkin * u64 flags; 101768db7e98SAlexander Shishkin * struct sample_id sample_id; 101868db7e98SAlexander Shishkin * }; 101968db7e98SAlexander Shishkin */ 102068db7e98SAlexander Shishkin PERF_RECORD_AUX = 11, 102168db7e98SAlexander Shishkin 1022ec0d7729SAlexander Shishkin /* 1023ec0d7729SAlexander Shishkin * Indicates that instruction trace has started 1024ec0d7729SAlexander Shishkin * 1025ec0d7729SAlexander Shishkin * struct { 1026ec0d7729SAlexander Shishkin * struct perf_event_header header; 1027ec0d7729SAlexander Shishkin * u32 pid; 1028ec0d7729SAlexander Shishkin * u32 tid; 102981df978cSJiri Olsa * struct sample_id sample_id; 1030ec0d7729SAlexander Shishkin * }; 1031ec0d7729SAlexander Shishkin */ 1032ec0d7729SAlexander Shishkin PERF_RECORD_ITRACE_START = 12, 1033ec0d7729SAlexander Shishkin 1034f38b0dbbSKan Liang /* 1035f38b0dbbSKan Liang * Records the dropped/lost sample number. 1036f38b0dbbSKan Liang * 1037f38b0dbbSKan Liang * struct { 1038f38b0dbbSKan Liang * struct perf_event_header header; 1039f38b0dbbSKan Liang * 1040f38b0dbbSKan Liang * u64 lost; 1041f38b0dbbSKan Liang * struct sample_id sample_id; 1042f38b0dbbSKan Liang * }; 1043f38b0dbbSKan Liang */ 1044f38b0dbbSKan Liang PERF_RECORD_LOST_SAMPLES = 13, 1045f38b0dbbSKan Liang 104645ac1403SAdrian Hunter /* 104745ac1403SAdrian Hunter * Records a context switch in or out (flagged by 104845ac1403SAdrian Hunter * PERF_RECORD_MISC_SWITCH_OUT). See also 104945ac1403SAdrian Hunter * PERF_RECORD_SWITCH_CPU_WIDE. 105045ac1403SAdrian Hunter * 105145ac1403SAdrian Hunter * struct { 105245ac1403SAdrian Hunter * struct perf_event_header header; 105345ac1403SAdrian Hunter * struct sample_id sample_id; 105445ac1403SAdrian Hunter * }; 105545ac1403SAdrian Hunter */ 105645ac1403SAdrian Hunter PERF_RECORD_SWITCH = 14, 105745ac1403SAdrian Hunter 105845ac1403SAdrian Hunter /* 105945ac1403SAdrian Hunter * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and 106045ac1403SAdrian Hunter * next_prev_tid that are the next (switching out) or previous 106145ac1403SAdrian Hunter * (switching in) pid/tid. 106245ac1403SAdrian Hunter * 106345ac1403SAdrian Hunter * struct { 106445ac1403SAdrian Hunter * struct perf_event_header header; 106545ac1403SAdrian Hunter * u32 next_prev_pid; 106645ac1403SAdrian Hunter * u32 next_prev_tid; 106745ac1403SAdrian Hunter * struct sample_id sample_id; 106845ac1403SAdrian Hunter * }; 106945ac1403SAdrian Hunter */ 107045ac1403SAdrian Hunter PERF_RECORD_SWITCH_CPU_WIDE = 15, 107145ac1403SAdrian Hunter 1072e4222673SHari Bathini /* 1073e4222673SHari Bathini * struct { 1074e4222673SHari Bathini * struct perf_event_header header; 1075e4222673SHari Bathini * u32 pid; 1076e4222673SHari Bathini * u32 tid; 1077e4222673SHari Bathini * u64 nr_namespaces; 1078e4222673SHari Bathini * { u64 dev, inode; } [nr_namespaces]; 1079e4222673SHari Bathini * struct sample_id sample_id; 1080e4222673SHari Bathini * }; 1081e4222673SHari Bathini */ 1082e4222673SHari Bathini PERF_RECORD_NAMESPACES = 16, 1083e4222673SHari Bathini 108476193a94SSong Liu /* 108576193a94SSong Liu * Record ksymbol register/unregister events: 108676193a94SSong Liu * 108776193a94SSong Liu * struct { 108876193a94SSong Liu * struct perf_event_header header; 108976193a94SSong Liu * u64 addr; 109076193a94SSong Liu * u32 len; 109176193a94SSong Liu * u16 ksym_type; 109276193a94SSong Liu * u16 flags; 109376193a94SSong Liu * char name[]; 109476193a94SSong Liu * struct sample_id sample_id; 109576193a94SSong Liu * }; 109676193a94SSong Liu */ 109776193a94SSong Liu PERF_RECORD_KSYMBOL = 17, 109876193a94SSong Liu 10996ee52e2aSSong Liu /* 11006ee52e2aSSong Liu * Record bpf events: 11016ee52e2aSSong Liu * enum perf_bpf_event_type { 11026ee52e2aSSong Liu * PERF_BPF_EVENT_UNKNOWN = 0, 11036ee52e2aSSong Liu * PERF_BPF_EVENT_PROG_LOAD = 1, 11046ee52e2aSSong Liu * PERF_BPF_EVENT_PROG_UNLOAD = 2, 11056ee52e2aSSong Liu * }; 11066ee52e2aSSong Liu * 11076ee52e2aSSong Liu * struct { 11086ee52e2aSSong Liu * struct perf_event_header header; 11096ee52e2aSSong Liu * u16 type; 11106ee52e2aSSong Liu * u16 flags; 11116ee52e2aSSong Liu * u32 id; 11126ee52e2aSSong Liu * u8 tag[BPF_TAG_SIZE]; 11136ee52e2aSSong Liu * struct sample_id sample_id; 11146ee52e2aSSong Liu * }; 11156ee52e2aSSong Liu */ 11166ee52e2aSSong Liu PERF_RECORD_BPF_EVENT = 18, 11176ee52e2aSSong Liu 111896aaab68SNamhyung Kim /* 111996aaab68SNamhyung Kim * struct { 112096aaab68SNamhyung Kim * struct perf_event_header header; 112196aaab68SNamhyung Kim * u64 id; 112296aaab68SNamhyung Kim * char path[]; 112396aaab68SNamhyung Kim * struct sample_id sample_id; 112496aaab68SNamhyung Kim * }; 112596aaab68SNamhyung Kim */ 112696aaab68SNamhyung Kim PERF_RECORD_CGROUP = 19, 112796aaab68SNamhyung Kim 1128e17d43b9SAdrian Hunter /* 1129e17d43b9SAdrian Hunter * Records changes to kernel text i.e. self-modified code. 'old_len' is 1130e17d43b9SAdrian Hunter * the number of old bytes, 'new_len' is the number of new bytes. Either 1131e17d43b9SAdrian Hunter * 'old_len' or 'new_len' may be zero to indicate, for example, the 1132e17d43b9SAdrian Hunter * addition or removal of a trampoline. 'bytes' contains the old bytes 1133e17d43b9SAdrian Hunter * followed immediately by the new bytes. 1134e17d43b9SAdrian Hunter * 1135e17d43b9SAdrian Hunter * struct { 1136e17d43b9SAdrian Hunter * struct perf_event_header header; 1137e17d43b9SAdrian Hunter * u64 addr; 1138e17d43b9SAdrian Hunter * u16 old_len; 1139e17d43b9SAdrian Hunter * u16 new_len; 1140e17d43b9SAdrian Hunter * u8 bytes[]; 1141e17d43b9SAdrian Hunter * struct sample_id sample_id; 1142e17d43b9SAdrian Hunter * }; 1143e17d43b9SAdrian Hunter */ 1144e17d43b9SAdrian Hunter PERF_RECORD_TEXT_POKE = 20, 1145e17d43b9SAdrian Hunter 11468b8ff8ccSAdrian Hunter /* 11478b8ff8ccSAdrian Hunter * Data written to the AUX area by hardware due to aux_output, may need 11488b8ff8ccSAdrian Hunter * to be matched to the event by an architecture-specific hardware ID. 11498b8ff8ccSAdrian Hunter * This records the hardware ID, but requires sample_id to provide the 11508b8ff8ccSAdrian Hunter * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT 11518b8ff8ccSAdrian Hunter * records from multiple events. 11528b8ff8ccSAdrian Hunter * 11538b8ff8ccSAdrian Hunter * struct { 11548b8ff8ccSAdrian Hunter * struct perf_event_header header; 11558b8ff8ccSAdrian Hunter * u64 hw_id; 11568b8ff8ccSAdrian Hunter * struct sample_id sample_id; 11578b8ff8ccSAdrian Hunter * }; 11588b8ff8ccSAdrian Hunter */ 11598b8ff8ccSAdrian Hunter PERF_RECORD_AUX_OUTPUT_HW_ID = 21, 11608b8ff8ccSAdrian Hunter 1161607ca46eSDavid Howells PERF_RECORD_MAX, /* non-ABI */ 1162607ca46eSDavid Howells }; 1163607ca46eSDavid Howells 116476193a94SSong Liu enum perf_record_ksymbol_type { 116576193a94SSong Liu PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, 116676193a94SSong Liu PERF_RECORD_KSYMBOL_TYPE_BPF = 1, 116769e49088SAdrian Hunter /* 116869e49088SAdrian Hunter * Out of line code such as kprobe-replaced instructions or optimized 1169dd9ddf46SAdrian Hunter * kprobes or ftrace trampolines. 117069e49088SAdrian Hunter */ 117169e49088SAdrian Hunter PERF_RECORD_KSYMBOL_TYPE_OOL = 2, 117276193a94SSong Liu PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */ 117376193a94SSong Liu }; 117476193a94SSong Liu 117576193a94SSong Liu #define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0) 117676193a94SSong Liu 11776ee52e2aSSong Liu enum perf_bpf_event_type { 11786ee52e2aSSong Liu PERF_BPF_EVENT_UNKNOWN = 0, 11796ee52e2aSSong Liu PERF_BPF_EVENT_PROG_LOAD = 1, 11806ee52e2aSSong Liu PERF_BPF_EVENT_PROG_UNLOAD = 2, 11816ee52e2aSSong Liu PERF_BPF_EVENT_MAX, /* non-ABI */ 11826ee52e2aSSong Liu }; 11836ee52e2aSSong Liu 1184607ca46eSDavid Howells #define PERF_MAX_STACK_DEPTH 127 1185c85b0334SArnaldo Carvalho de Melo #define PERF_MAX_CONTEXTS_PER_STACK 8 1186607ca46eSDavid Howells 1187607ca46eSDavid Howells enum perf_callchain_context { 1188607ca46eSDavid Howells PERF_CONTEXT_HV = (__u64)-32, 1189607ca46eSDavid Howells PERF_CONTEXT_KERNEL = (__u64)-128, 1190607ca46eSDavid Howells PERF_CONTEXT_USER = (__u64)-512, 1191607ca46eSDavid Howells 1192607ca46eSDavid Howells PERF_CONTEXT_GUEST = (__u64)-2048, 1193607ca46eSDavid Howells PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, 1194607ca46eSDavid Howells PERF_CONTEXT_GUEST_USER = (__u64)-2560, 1195607ca46eSDavid Howells 1196607ca46eSDavid Howells PERF_CONTEXT_MAX = (__u64)-4095, 1197607ca46eSDavid Howells }; 1198607ca46eSDavid Howells 119968db7e98SAlexander Shishkin /** 120068db7e98SAlexander Shishkin * PERF_RECORD_AUX::flags bits 120168db7e98SAlexander Shishkin */ 120268db7e98SAlexander Shishkin #define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ 12032023a0d2SAlexander Shishkin #define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ 1204ae0c2d99SAlexander Shishkin #define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ 1205085b3062SWill Deacon #define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */ 1206547b6098SSuzuki K Poulose #define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */ 120768db7e98SAlexander Shishkin 12087dde5176SSuzuki K Poulose /* CoreSight PMU AUX buffer formats */ 12097dde5176SSuzuki K Poulose #define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */ 12107dde5176SSuzuki K Poulose #define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */ 1211d6be9ad6SStephane Eranian 1212d6be9ad6SStephane Eranian #define PERF_FLAG_FD_NO_GROUP (1UL << 0) 1213d6be9ad6SStephane Eranian #define PERF_FLAG_FD_OUTPUT (1UL << 1) 1214d6be9ad6SStephane Eranian #define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */ 1215d6be9ad6SStephane Eranian #define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */ 1216d6be9ad6SStephane Eranian 12178c5073dbSSukadev Bhattiprolu #if defined(__LITTLE_ENDIAN_BITFIELD) 1218d6be9ad6SStephane Eranian union perf_mem_data_src { 1219d6be9ad6SStephane Eranian __u64 val; 1220d6be9ad6SStephane Eranian struct { 1221d6be9ad6SStephane Eranian __u64 mem_op:5, /* type of opcode */ 1222d6be9ad6SStephane Eranian mem_lvl:14, /* memory hierarchy level */ 1223d6be9ad6SStephane Eranian mem_snoop:5, /* snoop mode */ 1224d6be9ad6SStephane Eranian mem_lock:2, /* lock instr */ 1225d6be9ad6SStephane Eranian mem_dtlb:7, /* tlb access */ 12266ae5fa61SAndi Kleen mem_lvl_num:4, /* memory hierarchy level number */ 12276ae5fa61SAndi Kleen mem_remote:1, /* remote */ 12286ae5fa61SAndi Kleen mem_snoopx:2, /* snoop mode, ext */ 122961b985e3SKan Liang mem_blk:3, /* access blocked */ 1230fec9cc61SKajol Jain mem_hops:3, /* hop level */ 1231fec9cc61SKajol Jain mem_rsvd:18; 1232d6be9ad6SStephane Eranian }; 1233d6be9ad6SStephane Eranian }; 12348c5073dbSSukadev Bhattiprolu #elif defined(__BIG_ENDIAN_BITFIELD) 12358c5073dbSSukadev Bhattiprolu union perf_mem_data_src { 12368c5073dbSSukadev Bhattiprolu __u64 val; 12378c5073dbSSukadev Bhattiprolu struct { 1238fec9cc61SKajol Jain __u64 mem_rsvd:18, 1239fec9cc61SKajol Jain mem_hops:3, /* hop level */ 124061b985e3SKan Liang mem_blk:3, /* access blocked */ 12416ae5fa61SAndi Kleen mem_snoopx:2, /* snoop mode, ext */ 12426ae5fa61SAndi Kleen mem_remote:1, /* remote */ 12436ae5fa61SAndi Kleen mem_lvl_num:4, /* memory hierarchy level number */ 12448c5073dbSSukadev Bhattiprolu mem_dtlb:7, /* tlb access */ 12458c5073dbSSukadev Bhattiprolu mem_lock:2, /* lock instr */ 12468c5073dbSSukadev Bhattiprolu mem_snoop:5, /* snoop mode */ 12478c5073dbSSukadev Bhattiprolu mem_lvl:14, /* memory hierarchy level */ 12488c5073dbSSukadev Bhattiprolu mem_op:5; /* type of opcode */ 12498c5073dbSSukadev Bhattiprolu }; 12508c5073dbSSukadev Bhattiprolu }; 12518c5073dbSSukadev Bhattiprolu #else 12528c5073dbSSukadev Bhattiprolu #error "Unknown endianness" 12538c5073dbSSukadev Bhattiprolu #endif 1254d6be9ad6SStephane Eranian 1255d6be9ad6SStephane Eranian /* type of opcode (load/store/prefetch,code) */ 1256d6be9ad6SStephane Eranian #define PERF_MEM_OP_NA 0x01 /* not available */ 1257d6be9ad6SStephane Eranian #define PERF_MEM_OP_LOAD 0x02 /* load instruction */ 1258d6be9ad6SStephane Eranian #define PERF_MEM_OP_STORE 0x04 /* store instruction */ 1259d6be9ad6SStephane Eranian #define PERF_MEM_OP_PFETCH 0x08 /* prefetch */ 1260d6be9ad6SStephane Eranian #define PERF_MEM_OP_EXEC 0x10 /* code (execution) */ 1261d6be9ad6SStephane Eranian #define PERF_MEM_OP_SHIFT 0 1262d6be9ad6SStephane Eranian 1263f4c6217fSKajol Jain /* 1264f4c6217fSKajol Jain * PERF_MEM_LVL_* namespace being depricated to some extent in the 1265f4c6217fSKajol Jain * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields. 1266f4c6217fSKajol Jain * Supporting this namespace inorder to not break defined ABIs. 1267f4c6217fSKajol Jain * 1268f4c6217fSKajol Jain * memory hierarchy (memory level, hit or miss) 1269f4c6217fSKajol Jain */ 1270d6be9ad6SStephane Eranian #define PERF_MEM_LVL_NA 0x01 /* not available */ 1271d6be9ad6SStephane Eranian #define PERF_MEM_LVL_HIT 0x02 /* hit level */ 1272d6be9ad6SStephane Eranian #define PERF_MEM_LVL_MISS 0x04 /* miss level */ 1273d6be9ad6SStephane Eranian #define PERF_MEM_LVL_L1 0x08 /* L1 */ 1274d6be9ad6SStephane Eranian #define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */ 1275cc2f5a8aSStephane Eranian #define PERF_MEM_LVL_L2 0x20 /* L2 */ 1276cc2f5a8aSStephane Eranian #define PERF_MEM_LVL_L3 0x40 /* L3 */ 1277d6be9ad6SStephane Eranian #define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */ 1278d6be9ad6SStephane Eranian #define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */ 1279d6be9ad6SStephane Eranian #define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */ 1280d6be9ad6SStephane Eranian #define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */ 1281d6be9ad6SStephane Eranian #define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */ 1282d6be9ad6SStephane Eranian #define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ 1283d6be9ad6SStephane Eranian #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ 1284d6be9ad6SStephane Eranian #define PERF_MEM_LVL_SHIFT 5 1285d6be9ad6SStephane Eranian 12866ae5fa61SAndi Kleen #define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */ 12876ae5fa61SAndi Kleen #define PERF_MEM_REMOTE_SHIFT 37 12886ae5fa61SAndi Kleen 12896ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_L1 0x01 /* L1 */ 12906ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_L2 0x02 /* L2 */ 12916ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_L3 0x03 /* L3 */ 12926ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_L4 0x04 /* L4 */ 12936ae5fa61SAndi Kleen /* 5-0xa available */ 12946ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */ 12956ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */ 12966ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */ 12976ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */ 12986ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_NA 0x0f /* N/A */ 12996ae5fa61SAndi Kleen 13006ae5fa61SAndi Kleen #define PERF_MEM_LVLNUM_SHIFT 33 13016ae5fa61SAndi Kleen 1302d6be9ad6SStephane Eranian /* snoop mode */ 1303d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_NA 0x01 /* not available */ 1304d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */ 1305d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */ 1306d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */ 1307d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */ 1308d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_SHIFT 19 1309d6be9ad6SStephane Eranian 13106ae5fa61SAndi Kleen #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ 13116ae5fa61SAndi Kleen /* 1 free */ 1312f3d301c1SAl Grant #define PERF_MEM_SNOOPX_SHIFT 38 13136ae5fa61SAndi Kleen 1314d6be9ad6SStephane Eranian /* locked instruction */ 1315d6be9ad6SStephane Eranian #define PERF_MEM_LOCK_NA 0x01 /* not available */ 1316d6be9ad6SStephane Eranian #define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */ 1317d6be9ad6SStephane Eranian #define PERF_MEM_LOCK_SHIFT 24 1318d6be9ad6SStephane Eranian 1319d6be9ad6SStephane Eranian /* TLB access */ 1320d6be9ad6SStephane Eranian #define PERF_MEM_TLB_NA 0x01 /* not available */ 1321d6be9ad6SStephane Eranian #define PERF_MEM_TLB_HIT 0x02 /* hit level */ 1322d6be9ad6SStephane Eranian #define PERF_MEM_TLB_MISS 0x04 /* miss level */ 1323d6be9ad6SStephane Eranian #define PERF_MEM_TLB_L1 0x08 /* L1 */ 1324d6be9ad6SStephane Eranian #define PERF_MEM_TLB_L2 0x10 /* L2 */ 1325d6be9ad6SStephane Eranian #define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/ 1326d6be9ad6SStephane Eranian #define PERF_MEM_TLB_OS 0x40 /* OS fault handler */ 1327d6be9ad6SStephane Eranian #define PERF_MEM_TLB_SHIFT 26 1328d6be9ad6SStephane Eranian 132961b985e3SKan Liang /* Access blocked */ 133061b985e3SKan Liang #define PERF_MEM_BLK_NA 0x01 /* not available */ 133161b985e3SKan Liang #define PERF_MEM_BLK_DATA 0x02 /* data could not be forwarded */ 133261b985e3SKan Liang #define PERF_MEM_BLK_ADDR 0x04 /* address conflict */ 133361b985e3SKan Liang #define PERF_MEM_BLK_SHIFT 40 133461b985e3SKan Liang 1335fec9cc61SKajol Jain /* hop level */ 1336fec9cc61SKajol Jain #define PERF_MEM_HOPS_0 0x01 /* remote core, same node */ 1337cb1c4abaSKajol Jain #define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */ 1338cb1c4abaSKajol Jain #define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */ 1339cb1c4abaSKajol Jain #define PERF_MEM_HOPS_3 0x04 /* remote board */ 1340cb1c4abaSKajol Jain /* 5-7 available */ 1341fec9cc61SKajol Jain #define PERF_MEM_HOPS_SHIFT 43 1342fec9cc61SKajol Jain 1343d6be9ad6SStephane Eranian #define PERF_MEM_S(a, s) \ 13440d9dfc23SMike Frysinger (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) 1345d6be9ad6SStephane Eranian 1346274481deSVince Weaver /* 1347274481deSVince Weaver * single taken branch record layout: 1348274481deSVince Weaver * 1349274481deSVince Weaver * from: source instruction (may not always be a branch insn) 1350274481deSVince Weaver * to: branch target 1351274481deSVince Weaver * mispred: branch target was mispredicted 1352274481deSVince Weaver * predicted: branch target was predicted 1353274481deSVince Weaver * 1354274481deSVince Weaver * support for mispred, predicted is optional. In case it 1355274481deSVince Weaver * is not supported mispred = predicted = 0. 1356274481deSVince Weaver * 1357274481deSVince Weaver * in_tx: running in a hardware transaction 1358274481deSVince Weaver * abort: aborting a hardware transaction 135971ef3c6bSAndi Kleen * cycles: cycles from last branch (or 0 if not supported) 1360eb0baf8aSJin Yao * type: branch type 1361274481deSVince Weaver */ 1362274481deSVince Weaver struct perf_branch_entry { 1363274481deSVince Weaver __u64 from; 1364274481deSVince Weaver __u64 to; 1365274481deSVince Weaver __u64 mispred:1, /* target mispredicted */ 1366274481deSVince Weaver predicted:1,/* target predicted */ 1367274481deSVince Weaver in_tx:1, /* in transaction */ 1368274481deSVince Weaver abort:1, /* transaction abort */ 136971ef3c6bSAndi Kleen cycles:16, /* cycle count to last branch */ 1370eb0baf8aSJin Yao type:4, /* branch type */ 1371eb0baf8aSJin Yao reserved:40; 1372274481deSVince Weaver }; 1373274481deSVince Weaver 13742a6c6b7dSKan Liang union perf_sample_weight { 13752a6c6b7dSKan Liang __u64 full; 13762a6c6b7dSKan Liang #if defined(__LITTLE_ENDIAN_BITFIELD) 13772a6c6b7dSKan Liang struct { 13782a6c6b7dSKan Liang __u32 var1_dw; 13792a6c6b7dSKan Liang __u16 var2_w; 13802a6c6b7dSKan Liang __u16 var3_w; 13812a6c6b7dSKan Liang }; 13822a6c6b7dSKan Liang #elif defined(__BIG_ENDIAN_BITFIELD) 13832a6c6b7dSKan Liang struct { 13842a6c6b7dSKan Liang __u16 var3_w; 13852a6c6b7dSKan Liang __u16 var2_w; 13862a6c6b7dSKan Liang __u32 var1_dw; 13872a6c6b7dSKan Liang }; 13882a6c6b7dSKan Liang #else 13892a6c6b7dSKan Liang #error "Unknown endianness" 13902a6c6b7dSKan Liang #endif 13912a6c6b7dSKan Liang }; 13922a6c6b7dSKan Liang 1393607ca46eSDavid Howells #endif /* _UAPI_LINUX_PERF_EVENT_H */ 1394