1607ca46eSDavid Howells /* 2607ca46eSDavid Howells * Performance events: 3607ca46eSDavid Howells * 4607ca46eSDavid Howells * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 5607ca46eSDavid Howells * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar 6607ca46eSDavid Howells * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra 7607ca46eSDavid Howells * 8607ca46eSDavid Howells * Data type definitions, declarations, prototypes. 9607ca46eSDavid Howells * 10607ca46eSDavid Howells * Started by: Thomas Gleixner and Ingo Molnar 11607ca46eSDavid Howells * 12607ca46eSDavid Howells * For licencing details see kernel-base/COPYING 13607ca46eSDavid Howells */ 14607ca46eSDavid Howells #ifndef _UAPI_LINUX_PERF_EVENT_H 15607ca46eSDavid Howells #define _UAPI_LINUX_PERF_EVENT_H 16607ca46eSDavid Howells 17607ca46eSDavid Howells #include <linux/types.h> 18607ca46eSDavid Howells #include <linux/ioctl.h> 19607ca46eSDavid Howells #include <asm/byteorder.h> 20607ca46eSDavid Howells 21607ca46eSDavid Howells /* 22607ca46eSDavid Howells * User-space ABI bits: 23607ca46eSDavid Howells */ 24607ca46eSDavid Howells 25607ca46eSDavid Howells /* 26607ca46eSDavid Howells * attr.type 27607ca46eSDavid Howells */ 28607ca46eSDavid Howells enum perf_type_id { 29607ca46eSDavid Howells PERF_TYPE_HARDWARE = 0, 30607ca46eSDavid Howells PERF_TYPE_SOFTWARE = 1, 31607ca46eSDavid Howells PERF_TYPE_TRACEPOINT = 2, 32607ca46eSDavid Howells PERF_TYPE_HW_CACHE = 3, 33607ca46eSDavid Howells PERF_TYPE_RAW = 4, 34607ca46eSDavid Howells PERF_TYPE_BREAKPOINT = 5, 35607ca46eSDavid Howells 36607ca46eSDavid Howells PERF_TYPE_MAX, /* non-ABI */ 37607ca46eSDavid Howells }; 38607ca46eSDavid Howells 39607ca46eSDavid Howells /* 40607ca46eSDavid Howells * Generalized performance event event_id types, used by the 41607ca46eSDavid Howells * attr.event_id parameter of the sys_perf_event_open() 42607ca46eSDavid Howells * syscall: 43607ca46eSDavid Howells */ 44607ca46eSDavid Howells enum perf_hw_id { 45607ca46eSDavid Howells /* 46607ca46eSDavid Howells * Common hardware events, generalized by the kernel: 47607ca46eSDavid Howells */ 48607ca46eSDavid Howells PERF_COUNT_HW_CPU_CYCLES = 0, 49607ca46eSDavid Howells PERF_COUNT_HW_INSTRUCTIONS = 1, 50607ca46eSDavid Howells PERF_COUNT_HW_CACHE_REFERENCES = 2, 51607ca46eSDavid Howells PERF_COUNT_HW_CACHE_MISSES = 3, 52607ca46eSDavid Howells PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, 53607ca46eSDavid Howells PERF_COUNT_HW_BRANCH_MISSES = 5, 54607ca46eSDavid Howells PERF_COUNT_HW_BUS_CYCLES = 6, 55607ca46eSDavid Howells PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, 56607ca46eSDavid Howells PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, 57607ca46eSDavid Howells PERF_COUNT_HW_REF_CPU_CYCLES = 9, 58607ca46eSDavid Howells 59607ca46eSDavid Howells PERF_COUNT_HW_MAX, /* non-ABI */ 60607ca46eSDavid Howells }; 61607ca46eSDavid Howells 62607ca46eSDavid Howells /* 63607ca46eSDavid Howells * Generalized hardware cache events: 64607ca46eSDavid Howells * 65607ca46eSDavid Howells * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x 66607ca46eSDavid Howells * { read, write, prefetch } x 67607ca46eSDavid Howells * { accesses, misses } 68607ca46eSDavid Howells */ 69607ca46eSDavid Howells enum perf_hw_cache_id { 70607ca46eSDavid Howells PERF_COUNT_HW_CACHE_L1D = 0, 71607ca46eSDavid Howells PERF_COUNT_HW_CACHE_L1I = 1, 72607ca46eSDavid Howells PERF_COUNT_HW_CACHE_LL = 2, 73607ca46eSDavid Howells PERF_COUNT_HW_CACHE_DTLB = 3, 74607ca46eSDavid Howells PERF_COUNT_HW_CACHE_ITLB = 4, 75607ca46eSDavid Howells PERF_COUNT_HW_CACHE_BPU = 5, 76607ca46eSDavid Howells PERF_COUNT_HW_CACHE_NODE = 6, 77607ca46eSDavid Howells 78607ca46eSDavid Howells PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ 79607ca46eSDavid Howells }; 80607ca46eSDavid Howells 81607ca46eSDavid Howells enum perf_hw_cache_op_id { 82607ca46eSDavid Howells PERF_COUNT_HW_CACHE_OP_READ = 0, 83607ca46eSDavid Howells PERF_COUNT_HW_CACHE_OP_WRITE = 1, 84607ca46eSDavid Howells PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, 85607ca46eSDavid Howells 86607ca46eSDavid Howells PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ 87607ca46eSDavid Howells }; 88607ca46eSDavid Howells 89607ca46eSDavid Howells enum perf_hw_cache_op_result_id { 90607ca46eSDavid Howells PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, 91607ca46eSDavid Howells PERF_COUNT_HW_CACHE_RESULT_MISS = 1, 92607ca46eSDavid Howells 93607ca46eSDavid Howells PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ 94607ca46eSDavid Howells }; 95607ca46eSDavid Howells 96607ca46eSDavid Howells /* 97607ca46eSDavid Howells * Special "software" events provided by the kernel, even if the hardware 98607ca46eSDavid Howells * does not support performance events. These events measure various 99607ca46eSDavid Howells * physical and sw events of the kernel (and allow the profiling of them as 100607ca46eSDavid Howells * well): 101607ca46eSDavid Howells */ 102607ca46eSDavid Howells enum perf_sw_ids { 103607ca46eSDavid Howells PERF_COUNT_SW_CPU_CLOCK = 0, 104607ca46eSDavid Howells PERF_COUNT_SW_TASK_CLOCK = 1, 105607ca46eSDavid Howells PERF_COUNT_SW_PAGE_FAULTS = 2, 106607ca46eSDavid Howells PERF_COUNT_SW_CONTEXT_SWITCHES = 3, 107607ca46eSDavid Howells PERF_COUNT_SW_CPU_MIGRATIONS = 4, 108607ca46eSDavid Howells PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, 109607ca46eSDavid Howells PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, 110607ca46eSDavid Howells PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, 111607ca46eSDavid Howells PERF_COUNT_SW_EMULATION_FAULTS = 8, 112607ca46eSDavid Howells 113607ca46eSDavid Howells PERF_COUNT_SW_MAX, /* non-ABI */ 114607ca46eSDavid Howells }; 115607ca46eSDavid Howells 116607ca46eSDavid Howells /* 117607ca46eSDavid Howells * Bits that can be set in attr.sample_type to request information 118607ca46eSDavid Howells * in the overflow packets. 119607ca46eSDavid Howells */ 120607ca46eSDavid Howells enum perf_event_sample_format { 121607ca46eSDavid Howells PERF_SAMPLE_IP = 1U << 0, 122607ca46eSDavid Howells PERF_SAMPLE_TID = 1U << 1, 123607ca46eSDavid Howells PERF_SAMPLE_TIME = 1U << 2, 124607ca46eSDavid Howells PERF_SAMPLE_ADDR = 1U << 3, 125607ca46eSDavid Howells PERF_SAMPLE_READ = 1U << 4, 126607ca46eSDavid Howells PERF_SAMPLE_CALLCHAIN = 1U << 5, 127607ca46eSDavid Howells PERF_SAMPLE_ID = 1U << 6, 128607ca46eSDavid Howells PERF_SAMPLE_CPU = 1U << 7, 129607ca46eSDavid Howells PERF_SAMPLE_PERIOD = 1U << 8, 130607ca46eSDavid Howells PERF_SAMPLE_STREAM_ID = 1U << 9, 131607ca46eSDavid Howells PERF_SAMPLE_RAW = 1U << 10, 132607ca46eSDavid Howells PERF_SAMPLE_BRANCH_STACK = 1U << 11, 133607ca46eSDavid Howells PERF_SAMPLE_REGS_USER = 1U << 12, 134607ca46eSDavid Howells PERF_SAMPLE_STACK_USER = 1U << 13, 135c3feedf2SAndi Kleen PERF_SAMPLE_WEIGHT = 1U << 14, 136d6be9ad6SStephane Eranian PERF_SAMPLE_DATA_SRC = 1U << 15, 137607ca46eSDavid Howells 138d6be9ad6SStephane Eranian PERF_SAMPLE_MAX = 1U << 16, /* non-ABI */ 139607ca46eSDavid Howells }; 140607ca46eSDavid Howells 141607ca46eSDavid Howells /* 142607ca46eSDavid Howells * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set 143607ca46eSDavid Howells * 144607ca46eSDavid Howells * If the user does not pass priv level information via branch_sample_type, 145607ca46eSDavid Howells * the kernel uses the event's priv level. Branch and event priv levels do 146607ca46eSDavid Howells * not have to match. Branch priv level is checked for permissions. 147607ca46eSDavid Howells * 148607ca46eSDavid Howells * The branch types can be combined, however BRANCH_ANY covers all types 149607ca46eSDavid Howells * of branches and therefore it supersedes all the other types. 150607ca46eSDavid Howells */ 151607ca46eSDavid Howells enum perf_branch_sample_type { 152607ca46eSDavid Howells PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */ 153607ca46eSDavid Howells PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */ 154607ca46eSDavid Howells PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */ 155607ca46eSDavid Howells 156607ca46eSDavid Howells PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */ 157607ca46eSDavid Howells PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */ 158607ca46eSDavid Howells PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */ 159607ca46eSDavid Howells PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */ 160607ca46eSDavid Howells 161607ca46eSDavid Howells PERF_SAMPLE_BRANCH_MAX = 1U << 7, /* non-ABI */ 162607ca46eSDavid Howells }; 163607ca46eSDavid Howells 164607ca46eSDavid Howells #define PERF_SAMPLE_BRANCH_PLM_ALL \ 165607ca46eSDavid Howells (PERF_SAMPLE_BRANCH_USER|\ 166607ca46eSDavid Howells PERF_SAMPLE_BRANCH_KERNEL|\ 167607ca46eSDavid Howells PERF_SAMPLE_BRANCH_HV) 168607ca46eSDavid Howells 169607ca46eSDavid Howells /* 170607ca46eSDavid Howells * Values to determine ABI of the registers dump. 171607ca46eSDavid Howells */ 172607ca46eSDavid Howells enum perf_sample_regs_abi { 173607ca46eSDavid Howells PERF_SAMPLE_REGS_ABI_NONE = 0, 174607ca46eSDavid Howells PERF_SAMPLE_REGS_ABI_32 = 1, 175607ca46eSDavid Howells PERF_SAMPLE_REGS_ABI_64 = 2, 176607ca46eSDavid Howells }; 177607ca46eSDavid Howells 178607ca46eSDavid Howells /* 179607ca46eSDavid Howells * The format of the data returned by read() on a perf event fd, 180607ca46eSDavid Howells * as specified by attr.read_format: 181607ca46eSDavid Howells * 182607ca46eSDavid Howells * struct read_format { 183607ca46eSDavid Howells * { u64 value; 184607ca46eSDavid Howells * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 185607ca46eSDavid Howells * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 186607ca46eSDavid Howells * { u64 id; } && PERF_FORMAT_ID 187607ca46eSDavid Howells * } && !PERF_FORMAT_GROUP 188607ca46eSDavid Howells * 189607ca46eSDavid Howells * { u64 nr; 190607ca46eSDavid Howells * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 191607ca46eSDavid Howells * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 192607ca46eSDavid Howells * { u64 value; 193607ca46eSDavid Howells * { u64 id; } && PERF_FORMAT_ID 194607ca46eSDavid Howells * } cntr[nr]; 195607ca46eSDavid Howells * } && PERF_FORMAT_GROUP 196607ca46eSDavid Howells * }; 197607ca46eSDavid Howells */ 198607ca46eSDavid Howells enum perf_event_read_format { 199607ca46eSDavid Howells PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 200607ca46eSDavid Howells PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 201607ca46eSDavid Howells PERF_FORMAT_ID = 1U << 2, 202607ca46eSDavid Howells PERF_FORMAT_GROUP = 1U << 3, 203607ca46eSDavid Howells 204607ca46eSDavid Howells PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ 205607ca46eSDavid Howells }; 206607ca46eSDavid Howells 207607ca46eSDavid Howells #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ 208607ca46eSDavid Howells #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ 209607ca46eSDavid Howells #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ 210607ca46eSDavid Howells #define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */ 211607ca46eSDavid Howells /* add: sample_stack_user */ 212607ca46eSDavid Howells 213607ca46eSDavid Howells /* 214607ca46eSDavid Howells * Hardware event_id to monitor via a performance monitoring event: 215607ca46eSDavid Howells */ 216607ca46eSDavid Howells struct perf_event_attr { 217607ca46eSDavid Howells 218607ca46eSDavid Howells /* 219607ca46eSDavid Howells * Major type: hardware/software/tracepoint/etc. 220607ca46eSDavid Howells */ 221607ca46eSDavid Howells __u32 type; 222607ca46eSDavid Howells 223607ca46eSDavid Howells /* 224607ca46eSDavid Howells * Size of the attr structure, for fwd/bwd compat. 225607ca46eSDavid Howells */ 226607ca46eSDavid Howells __u32 size; 227607ca46eSDavid Howells 228607ca46eSDavid Howells /* 229607ca46eSDavid Howells * Type specific configuration information. 230607ca46eSDavid Howells */ 231607ca46eSDavid Howells __u64 config; 232607ca46eSDavid Howells 233607ca46eSDavid Howells union { 234607ca46eSDavid Howells __u64 sample_period; 235607ca46eSDavid Howells __u64 sample_freq; 236607ca46eSDavid Howells }; 237607ca46eSDavid Howells 238607ca46eSDavid Howells __u64 sample_type; 239607ca46eSDavid Howells __u64 read_format; 240607ca46eSDavid Howells 241607ca46eSDavid Howells __u64 disabled : 1, /* off by default */ 242607ca46eSDavid Howells inherit : 1, /* children inherit it */ 243607ca46eSDavid Howells pinned : 1, /* must always be on PMU */ 244607ca46eSDavid Howells exclusive : 1, /* only group on PMU */ 245607ca46eSDavid Howells exclude_user : 1, /* don't count user */ 246607ca46eSDavid Howells exclude_kernel : 1, /* ditto kernel */ 247607ca46eSDavid Howells exclude_hv : 1, /* ditto hypervisor */ 248607ca46eSDavid Howells exclude_idle : 1, /* don't count when idle */ 249607ca46eSDavid Howells mmap : 1, /* include mmap data */ 250607ca46eSDavid Howells comm : 1, /* include comm data */ 251607ca46eSDavid Howells freq : 1, /* use freq, not period */ 252607ca46eSDavid Howells inherit_stat : 1, /* per task counts */ 253607ca46eSDavid Howells enable_on_exec : 1, /* next exec enables */ 254607ca46eSDavid Howells task : 1, /* trace fork/exit */ 255607ca46eSDavid Howells watermark : 1, /* wakeup_watermark */ 256607ca46eSDavid Howells /* 257607ca46eSDavid Howells * precise_ip: 258607ca46eSDavid Howells * 259607ca46eSDavid Howells * 0 - SAMPLE_IP can have arbitrary skid 260607ca46eSDavid Howells * 1 - SAMPLE_IP must have constant skid 261607ca46eSDavid Howells * 2 - SAMPLE_IP requested to have 0 skid 262607ca46eSDavid Howells * 3 - SAMPLE_IP must have 0 skid 263607ca46eSDavid Howells * 264607ca46eSDavid Howells * See also PERF_RECORD_MISC_EXACT_IP 265607ca46eSDavid Howells */ 266607ca46eSDavid Howells precise_ip : 2, /* skid constraint */ 267607ca46eSDavid Howells mmap_data : 1, /* non-exec mmap data */ 268607ca46eSDavid Howells sample_id_all : 1, /* sample_type all events */ 269607ca46eSDavid Howells 270607ca46eSDavid Howells exclude_host : 1, /* don't count in host */ 271607ca46eSDavid Howells exclude_guest : 1, /* don't count in guest */ 272607ca46eSDavid Howells 273607ca46eSDavid Howells exclude_callchain_kernel : 1, /* exclude kernel callchains */ 274607ca46eSDavid Howells exclude_callchain_user : 1, /* exclude user callchains */ 275607ca46eSDavid Howells 276607ca46eSDavid Howells __reserved_1 : 41; 277607ca46eSDavid Howells 278607ca46eSDavid Howells union { 279607ca46eSDavid Howells __u32 wakeup_events; /* wakeup every n events */ 280607ca46eSDavid Howells __u32 wakeup_watermark; /* bytes before wakeup */ 281607ca46eSDavid Howells }; 282607ca46eSDavid Howells 283607ca46eSDavid Howells __u32 bp_type; 284607ca46eSDavid Howells union { 285607ca46eSDavid Howells __u64 bp_addr; 286607ca46eSDavid Howells __u64 config1; /* extension of config */ 287607ca46eSDavid Howells }; 288607ca46eSDavid Howells union { 289607ca46eSDavid Howells __u64 bp_len; 290607ca46eSDavid Howells __u64 config2; /* extension of config1 */ 291607ca46eSDavid Howells }; 292607ca46eSDavid Howells __u64 branch_sample_type; /* enum perf_branch_sample_type */ 293607ca46eSDavid Howells 294607ca46eSDavid Howells /* 295607ca46eSDavid Howells * Defines set of user regs to dump on samples. 296607ca46eSDavid Howells * See asm/perf_regs.h for details. 297607ca46eSDavid Howells */ 298607ca46eSDavid Howells __u64 sample_regs_user; 299607ca46eSDavid Howells 300607ca46eSDavid Howells /* 301607ca46eSDavid Howells * Defines size of the user stack to dump on samples. 302607ca46eSDavid Howells */ 303607ca46eSDavid Howells __u32 sample_stack_user; 304607ca46eSDavid Howells 305607ca46eSDavid Howells /* Align to u64. */ 306607ca46eSDavid Howells __u32 __reserved_2; 307607ca46eSDavid Howells }; 308607ca46eSDavid Howells 309607ca46eSDavid Howells #define perf_flags(attr) (*(&(attr)->read_format + 1)) 310607ca46eSDavid Howells 311607ca46eSDavid Howells /* 312607ca46eSDavid Howells * Ioctls that can be done on a perf event fd: 313607ca46eSDavid Howells */ 314607ca46eSDavid Howells #define PERF_EVENT_IOC_ENABLE _IO ('$', 0) 315607ca46eSDavid Howells #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) 316607ca46eSDavid Howells #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) 317607ca46eSDavid Howells #define PERF_EVENT_IOC_RESET _IO ('$', 3) 318607ca46eSDavid Howells #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) 319607ca46eSDavid Howells #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) 320607ca46eSDavid Howells #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) 321607ca46eSDavid Howells 322607ca46eSDavid Howells enum perf_event_ioc_flags { 323607ca46eSDavid Howells PERF_IOC_FLAG_GROUP = 1U << 0, 324607ca46eSDavid Howells }; 325607ca46eSDavid Howells 326607ca46eSDavid Howells /* 327607ca46eSDavid Howells * Structure of the page that can be mapped via mmap 328607ca46eSDavid Howells */ 329607ca46eSDavid Howells struct perf_event_mmap_page { 330607ca46eSDavid Howells __u32 version; /* version number of this structure */ 331607ca46eSDavid Howells __u32 compat_version; /* lowest version this is compat with */ 332607ca46eSDavid Howells 333607ca46eSDavid Howells /* 334607ca46eSDavid Howells * Bits needed to read the hw events in user-space. 335607ca46eSDavid Howells * 336607ca46eSDavid Howells * u32 seq, time_mult, time_shift, idx, width; 337607ca46eSDavid Howells * u64 count, enabled, running; 338607ca46eSDavid Howells * u64 cyc, time_offset; 339607ca46eSDavid Howells * s64 pmc = 0; 340607ca46eSDavid Howells * 341607ca46eSDavid Howells * do { 342607ca46eSDavid Howells * seq = pc->lock; 343607ca46eSDavid Howells * barrier() 344607ca46eSDavid Howells * 345607ca46eSDavid Howells * enabled = pc->time_enabled; 346607ca46eSDavid Howells * running = pc->time_running; 347607ca46eSDavid Howells * 348607ca46eSDavid Howells * if (pc->cap_usr_time && enabled != running) { 349607ca46eSDavid Howells * cyc = rdtsc(); 350607ca46eSDavid Howells * time_offset = pc->time_offset; 351607ca46eSDavid Howells * time_mult = pc->time_mult; 352607ca46eSDavid Howells * time_shift = pc->time_shift; 353607ca46eSDavid Howells * } 354607ca46eSDavid Howells * 355607ca46eSDavid Howells * idx = pc->index; 356607ca46eSDavid Howells * count = pc->offset; 357607ca46eSDavid Howells * if (pc->cap_usr_rdpmc && idx) { 358607ca46eSDavid Howells * width = pc->pmc_width; 359607ca46eSDavid Howells * pmc = rdpmc(idx - 1); 360607ca46eSDavid Howells * } 361607ca46eSDavid Howells * 362607ca46eSDavid Howells * barrier(); 363607ca46eSDavid Howells * } while (pc->lock != seq); 364607ca46eSDavid Howells * 365607ca46eSDavid Howells * NOTE: for obvious reason this only works on self-monitoring 366607ca46eSDavid Howells * processes. 367607ca46eSDavid Howells */ 368607ca46eSDavid Howells __u32 lock; /* seqlock for synchronization */ 369607ca46eSDavid Howells __u32 index; /* hardware event identifier */ 370607ca46eSDavid Howells __s64 offset; /* add to hardware event value */ 371607ca46eSDavid Howells __u64 time_enabled; /* time event active */ 372607ca46eSDavid Howells __u64 time_running; /* time event on cpu */ 373607ca46eSDavid Howells union { 374607ca46eSDavid Howells __u64 capabilities; 375607ca46eSDavid Howells __u64 cap_usr_time : 1, 376607ca46eSDavid Howells cap_usr_rdpmc : 1, 377607ca46eSDavid Howells cap_____res : 62; 378607ca46eSDavid Howells }; 379607ca46eSDavid Howells 380607ca46eSDavid Howells /* 381607ca46eSDavid Howells * If cap_usr_rdpmc this field provides the bit-width of the value 382607ca46eSDavid Howells * read using the rdpmc() or equivalent instruction. This can be used 383607ca46eSDavid Howells * to sign extend the result like: 384607ca46eSDavid Howells * 385607ca46eSDavid Howells * pmc <<= 64 - width; 386607ca46eSDavid Howells * pmc >>= 64 - width; // signed shift right 387607ca46eSDavid Howells * count += pmc; 388607ca46eSDavid Howells */ 389607ca46eSDavid Howells __u16 pmc_width; 390607ca46eSDavid Howells 391607ca46eSDavid Howells /* 392607ca46eSDavid Howells * If cap_usr_time the below fields can be used to compute the time 393607ca46eSDavid Howells * delta since time_enabled (in ns) using rdtsc or similar. 394607ca46eSDavid Howells * 395607ca46eSDavid Howells * u64 quot, rem; 396607ca46eSDavid Howells * u64 delta; 397607ca46eSDavid Howells * 398607ca46eSDavid Howells * quot = (cyc >> time_shift); 399607ca46eSDavid Howells * rem = cyc & ((1 << time_shift) - 1); 400607ca46eSDavid Howells * delta = time_offset + quot * time_mult + 401607ca46eSDavid Howells * ((rem * time_mult) >> time_shift); 402607ca46eSDavid Howells * 403607ca46eSDavid Howells * Where time_offset,time_mult,time_shift and cyc are read in the 404607ca46eSDavid Howells * seqcount loop described above. This delta can then be added to 405607ca46eSDavid Howells * enabled and possible running (if idx), improving the scaling: 406607ca46eSDavid Howells * 407607ca46eSDavid Howells * enabled += delta; 408607ca46eSDavid Howells * if (idx) 409607ca46eSDavid Howells * running += delta; 410607ca46eSDavid Howells * 411607ca46eSDavid Howells * quot = count / running; 412607ca46eSDavid Howells * rem = count % running; 413607ca46eSDavid Howells * count = quot * enabled + (rem * enabled) / running; 414607ca46eSDavid Howells */ 415607ca46eSDavid Howells __u16 time_shift; 416607ca46eSDavid Howells __u32 time_mult; 417607ca46eSDavid Howells __u64 time_offset; 418607ca46eSDavid Howells 419607ca46eSDavid Howells /* 420607ca46eSDavid Howells * Hole for extension of the self monitor capabilities 421607ca46eSDavid Howells */ 422607ca46eSDavid Howells 423607ca46eSDavid Howells __u64 __reserved[120]; /* align to 1k */ 424607ca46eSDavid Howells 425607ca46eSDavid Howells /* 426607ca46eSDavid Howells * Control data for the mmap() data buffer. 427607ca46eSDavid Howells * 428607ca46eSDavid Howells * User-space reading the @data_head value should issue an rmb(), on 429607ca46eSDavid Howells * SMP capable platforms, after reading this value -- see 430607ca46eSDavid Howells * perf_event_wakeup(). 431607ca46eSDavid Howells * 432607ca46eSDavid Howells * When the mapping is PROT_WRITE the @data_tail value should be 433607ca46eSDavid Howells * written by userspace to reflect the last read data. In this case 434607ca46eSDavid Howells * the kernel will not over-write unread data. 435607ca46eSDavid Howells */ 436607ca46eSDavid Howells __u64 data_head; /* head in the data section */ 437607ca46eSDavid Howells __u64 data_tail; /* user-space written tail */ 438607ca46eSDavid Howells }; 439607ca46eSDavid Howells 440607ca46eSDavid Howells #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0) 441607ca46eSDavid Howells #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) 442607ca46eSDavid Howells #define PERF_RECORD_MISC_KERNEL (1 << 0) 443607ca46eSDavid Howells #define PERF_RECORD_MISC_USER (2 << 0) 444607ca46eSDavid Howells #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) 445607ca46eSDavid Howells #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0) 446607ca46eSDavid Howells #define PERF_RECORD_MISC_GUEST_USER (5 << 0) 447607ca46eSDavid Howells 448*2fe85427SStephane Eranian #define PERF_RECORD_MISC_MMAP_DATA (1 << 13) 449607ca46eSDavid Howells /* 450607ca46eSDavid Howells * Indicates that the content of PERF_SAMPLE_IP points to 451607ca46eSDavid Howells * the actual instruction that triggered the event. See also 452607ca46eSDavid Howells * perf_event_attr::precise_ip. 453607ca46eSDavid Howells */ 454607ca46eSDavid Howells #define PERF_RECORD_MISC_EXACT_IP (1 << 14) 455607ca46eSDavid Howells /* 456607ca46eSDavid Howells * Reserve the last bit to indicate some extended misc field 457607ca46eSDavid Howells */ 458607ca46eSDavid Howells #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) 459607ca46eSDavid Howells 460607ca46eSDavid Howells struct perf_event_header { 461607ca46eSDavid Howells __u32 type; 462607ca46eSDavid Howells __u16 misc; 463607ca46eSDavid Howells __u16 size; 464607ca46eSDavid Howells }; 465607ca46eSDavid Howells 466607ca46eSDavid Howells enum perf_event_type { 467607ca46eSDavid Howells 468607ca46eSDavid Howells /* 469607ca46eSDavid Howells * If perf_event_attr.sample_id_all is set then all event types will 470607ca46eSDavid Howells * have the sample_type selected fields related to where/when 471607ca46eSDavid Howells * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID) 472607ca46eSDavid Howells * described in PERF_RECORD_SAMPLE below, it will be stashed just after 473607ca46eSDavid Howells * the perf_event_header and the fields already present for the existing 474607ca46eSDavid Howells * fields, i.e. at the end of the payload. That way a newer perf.data 475607ca46eSDavid Howells * file will be supported by older perf tools, with these new optional 476607ca46eSDavid Howells * fields being ignored. 477607ca46eSDavid Howells * 478607ca46eSDavid Howells * The MMAP events record the PROT_EXEC mappings so that we can 479607ca46eSDavid Howells * correlate userspace IPs to code. They have the following structure: 480607ca46eSDavid Howells * 481607ca46eSDavid Howells * struct { 482607ca46eSDavid Howells * struct perf_event_header header; 483607ca46eSDavid Howells * 484607ca46eSDavid Howells * u32 pid, tid; 485607ca46eSDavid Howells * u64 addr; 486607ca46eSDavid Howells * u64 len; 487607ca46eSDavid Howells * u64 pgoff; 488607ca46eSDavid Howells * char filename[]; 489607ca46eSDavid Howells * }; 490607ca46eSDavid Howells */ 491607ca46eSDavid Howells PERF_RECORD_MMAP = 1, 492607ca46eSDavid Howells 493607ca46eSDavid Howells /* 494607ca46eSDavid Howells * struct { 495607ca46eSDavid Howells * struct perf_event_header header; 496607ca46eSDavid Howells * u64 id; 497607ca46eSDavid Howells * u64 lost; 498607ca46eSDavid Howells * }; 499607ca46eSDavid Howells */ 500607ca46eSDavid Howells PERF_RECORD_LOST = 2, 501607ca46eSDavid Howells 502607ca46eSDavid Howells /* 503607ca46eSDavid Howells * struct { 504607ca46eSDavid Howells * struct perf_event_header header; 505607ca46eSDavid Howells * 506607ca46eSDavid Howells * u32 pid, tid; 507607ca46eSDavid Howells * char comm[]; 508607ca46eSDavid Howells * }; 509607ca46eSDavid Howells */ 510607ca46eSDavid Howells PERF_RECORD_COMM = 3, 511607ca46eSDavid Howells 512607ca46eSDavid Howells /* 513607ca46eSDavid Howells * struct { 514607ca46eSDavid Howells * struct perf_event_header header; 515607ca46eSDavid Howells * u32 pid, ppid; 516607ca46eSDavid Howells * u32 tid, ptid; 517607ca46eSDavid Howells * u64 time; 518607ca46eSDavid Howells * }; 519607ca46eSDavid Howells */ 520607ca46eSDavid Howells PERF_RECORD_EXIT = 4, 521607ca46eSDavid Howells 522607ca46eSDavid Howells /* 523607ca46eSDavid Howells * struct { 524607ca46eSDavid Howells * struct perf_event_header header; 525607ca46eSDavid Howells * u64 time; 526607ca46eSDavid Howells * u64 id; 527607ca46eSDavid Howells * u64 stream_id; 528607ca46eSDavid Howells * }; 529607ca46eSDavid Howells */ 530607ca46eSDavid Howells PERF_RECORD_THROTTLE = 5, 531607ca46eSDavid Howells PERF_RECORD_UNTHROTTLE = 6, 532607ca46eSDavid Howells 533607ca46eSDavid Howells /* 534607ca46eSDavid Howells * struct { 535607ca46eSDavid Howells * struct perf_event_header header; 536607ca46eSDavid Howells * u32 pid, ppid; 537607ca46eSDavid Howells * u32 tid, ptid; 538607ca46eSDavid Howells * u64 time; 539607ca46eSDavid Howells * }; 540607ca46eSDavid Howells */ 541607ca46eSDavid Howells PERF_RECORD_FORK = 7, 542607ca46eSDavid Howells 543607ca46eSDavid Howells /* 544607ca46eSDavid Howells * struct { 545607ca46eSDavid Howells * struct perf_event_header header; 546607ca46eSDavid Howells * u32 pid, tid; 547607ca46eSDavid Howells * 548607ca46eSDavid Howells * struct read_format values; 549607ca46eSDavid Howells * }; 550607ca46eSDavid Howells */ 551607ca46eSDavid Howells PERF_RECORD_READ = 8, 552607ca46eSDavid Howells 553607ca46eSDavid Howells /* 554607ca46eSDavid Howells * struct { 555607ca46eSDavid Howells * struct perf_event_header header; 556607ca46eSDavid Howells * 557607ca46eSDavid Howells * { u64 ip; } && PERF_SAMPLE_IP 558607ca46eSDavid Howells * { u32 pid, tid; } && PERF_SAMPLE_TID 559607ca46eSDavid Howells * { u64 time; } && PERF_SAMPLE_TIME 560607ca46eSDavid Howells * { u64 addr; } && PERF_SAMPLE_ADDR 561607ca46eSDavid Howells * { u64 id; } && PERF_SAMPLE_ID 562607ca46eSDavid Howells * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 563607ca46eSDavid Howells * { u32 cpu, res; } && PERF_SAMPLE_CPU 564607ca46eSDavid Howells * { u64 period; } && PERF_SAMPLE_PERIOD 565607ca46eSDavid Howells * 566607ca46eSDavid Howells * { struct read_format values; } && PERF_SAMPLE_READ 567607ca46eSDavid Howells * 568607ca46eSDavid Howells * { u64 nr, 569607ca46eSDavid Howells * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 570607ca46eSDavid Howells * 571607ca46eSDavid Howells * # 572607ca46eSDavid Howells * # The RAW record below is opaque data wrt the ABI 573607ca46eSDavid Howells * # 574607ca46eSDavid Howells * # That is, the ABI doesn't make any promises wrt to 575607ca46eSDavid Howells * # the stability of its content, it may vary depending 576607ca46eSDavid Howells * # on event, hardware, kernel version and phase of 577607ca46eSDavid Howells * # the moon. 578607ca46eSDavid Howells * # 579607ca46eSDavid Howells * # In other words, PERF_SAMPLE_RAW contents are not an ABI. 580607ca46eSDavid Howells * # 581607ca46eSDavid Howells * 582607ca46eSDavid Howells * { u32 size; 583607ca46eSDavid Howells * char data[size];}&& PERF_SAMPLE_RAW 584607ca46eSDavid Howells * 585b878e7fbSVince Weaver * { u64 nr; 586607ca46eSDavid Howells * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK 587607ca46eSDavid Howells * 588607ca46eSDavid Howells * { u64 abi; # enum perf_sample_regs_abi 589607ca46eSDavid Howells * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER 590607ca46eSDavid Howells * 591607ca46eSDavid Howells * { u64 size; 592607ca46eSDavid Howells * char data[size]; 593607ca46eSDavid Howells * u64 dyn_size; } && PERF_SAMPLE_STACK_USER 594c3feedf2SAndi Kleen * 595c3feedf2SAndi Kleen * { u64 weight; } && PERF_SAMPLE_WEIGHT 596d6be9ad6SStephane Eranian * { u64 data_src; } && PERF_SAMPLE_DATA_SRC 597607ca46eSDavid Howells * }; 598607ca46eSDavid Howells */ 599607ca46eSDavid Howells PERF_RECORD_SAMPLE = 9, 600607ca46eSDavid Howells 601607ca46eSDavid Howells PERF_RECORD_MAX, /* non-ABI */ 602607ca46eSDavid Howells }; 603607ca46eSDavid Howells 604607ca46eSDavid Howells #define PERF_MAX_STACK_DEPTH 127 605607ca46eSDavid Howells 606607ca46eSDavid Howells enum perf_callchain_context { 607607ca46eSDavid Howells PERF_CONTEXT_HV = (__u64)-32, 608607ca46eSDavid Howells PERF_CONTEXT_KERNEL = (__u64)-128, 609607ca46eSDavid Howells PERF_CONTEXT_USER = (__u64)-512, 610607ca46eSDavid Howells 611607ca46eSDavid Howells PERF_CONTEXT_GUEST = (__u64)-2048, 612607ca46eSDavid Howells PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, 613607ca46eSDavid Howells PERF_CONTEXT_GUEST_USER = (__u64)-2560, 614607ca46eSDavid Howells 615607ca46eSDavid Howells PERF_CONTEXT_MAX = (__u64)-4095, 616607ca46eSDavid Howells }; 617607ca46eSDavid Howells 618607ca46eSDavid Howells #define PERF_FLAG_FD_NO_GROUP (1U << 0) 619607ca46eSDavid Howells #define PERF_FLAG_FD_OUTPUT (1U << 1) 620607ca46eSDavid Howells #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ 621607ca46eSDavid Howells 622d6be9ad6SStephane Eranian union perf_mem_data_src { 623d6be9ad6SStephane Eranian __u64 val; 624d6be9ad6SStephane Eranian struct { 625d6be9ad6SStephane Eranian __u64 mem_op:5, /* type of opcode */ 626d6be9ad6SStephane Eranian mem_lvl:14, /* memory hierarchy level */ 627d6be9ad6SStephane Eranian mem_snoop:5, /* snoop mode */ 628d6be9ad6SStephane Eranian mem_lock:2, /* lock instr */ 629d6be9ad6SStephane Eranian mem_dtlb:7, /* tlb access */ 630d6be9ad6SStephane Eranian mem_rsvd:31; 631d6be9ad6SStephane Eranian }; 632d6be9ad6SStephane Eranian }; 633d6be9ad6SStephane Eranian 634d6be9ad6SStephane Eranian /* type of opcode (load/store/prefetch,code) */ 635d6be9ad6SStephane Eranian #define PERF_MEM_OP_NA 0x01 /* not available */ 636d6be9ad6SStephane Eranian #define PERF_MEM_OP_LOAD 0x02 /* load instruction */ 637d6be9ad6SStephane Eranian #define PERF_MEM_OP_STORE 0x04 /* store instruction */ 638d6be9ad6SStephane Eranian #define PERF_MEM_OP_PFETCH 0x08 /* prefetch */ 639d6be9ad6SStephane Eranian #define PERF_MEM_OP_EXEC 0x10 /* code (execution) */ 640d6be9ad6SStephane Eranian #define PERF_MEM_OP_SHIFT 0 641d6be9ad6SStephane Eranian 642d6be9ad6SStephane Eranian /* memory hierarchy (memory level, hit or miss) */ 643d6be9ad6SStephane Eranian #define PERF_MEM_LVL_NA 0x01 /* not available */ 644d6be9ad6SStephane Eranian #define PERF_MEM_LVL_HIT 0x02 /* hit level */ 645d6be9ad6SStephane Eranian #define PERF_MEM_LVL_MISS 0x04 /* miss level */ 646d6be9ad6SStephane Eranian #define PERF_MEM_LVL_L1 0x08 /* L1 */ 647d6be9ad6SStephane Eranian #define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */ 648d6be9ad6SStephane Eranian #define PERF_MEM_LVL_L2 0x20 /* L2 hit */ 649d6be9ad6SStephane Eranian #define PERF_MEM_LVL_L3 0x40 /* L3 hit */ 650d6be9ad6SStephane Eranian #define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */ 651d6be9ad6SStephane Eranian #define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */ 652d6be9ad6SStephane Eranian #define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */ 653d6be9ad6SStephane Eranian #define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */ 654d6be9ad6SStephane Eranian #define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */ 655d6be9ad6SStephane Eranian #define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ 656d6be9ad6SStephane Eranian #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ 657d6be9ad6SStephane Eranian #define PERF_MEM_LVL_SHIFT 5 658d6be9ad6SStephane Eranian 659d6be9ad6SStephane Eranian /* snoop mode */ 660d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_NA 0x01 /* not available */ 661d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */ 662d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */ 663d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */ 664d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */ 665d6be9ad6SStephane Eranian #define PERF_MEM_SNOOP_SHIFT 19 666d6be9ad6SStephane Eranian 667d6be9ad6SStephane Eranian /* locked instruction */ 668d6be9ad6SStephane Eranian #define PERF_MEM_LOCK_NA 0x01 /* not available */ 669d6be9ad6SStephane Eranian #define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */ 670d6be9ad6SStephane Eranian #define PERF_MEM_LOCK_SHIFT 24 671d6be9ad6SStephane Eranian 672d6be9ad6SStephane Eranian /* TLB access */ 673d6be9ad6SStephane Eranian #define PERF_MEM_TLB_NA 0x01 /* not available */ 674d6be9ad6SStephane Eranian #define PERF_MEM_TLB_HIT 0x02 /* hit level */ 675d6be9ad6SStephane Eranian #define PERF_MEM_TLB_MISS 0x04 /* miss level */ 676d6be9ad6SStephane Eranian #define PERF_MEM_TLB_L1 0x08 /* L1 */ 677d6be9ad6SStephane Eranian #define PERF_MEM_TLB_L2 0x10 /* L2 */ 678d6be9ad6SStephane Eranian #define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/ 679d6be9ad6SStephane Eranian #define PERF_MEM_TLB_OS 0x40 /* OS fault handler */ 680d6be9ad6SStephane Eranian #define PERF_MEM_TLB_SHIFT 26 681d6be9ad6SStephane Eranian 682d6be9ad6SStephane Eranian #define PERF_MEM_S(a, s) \ 683d6be9ad6SStephane Eranian (((u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) 684d6be9ad6SStephane Eranian 685607ca46eSDavid Howells #endif /* _UAPI_LINUX_PERF_EVENT_H */ 686