1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LIBPERF_EVENT_H 3 #define __LIBPERF_EVENT_H 4 5 #include <linux/perf_event.h> 6 #include <linux/types.h> 7 #include <linux/limits.h> 8 #include <linux/bpf.h> 9 #include <sys/types.h> /* pid_t */ 10 11 #define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem)) 12 13 struct perf_record_mmap { 14 struct perf_event_header header; 15 __u32 pid, tid; 16 __u64 start; 17 __u64 len; 18 __u64 pgoff; 19 char filename[PATH_MAX]; 20 }; 21 22 struct perf_record_mmap2 { 23 struct perf_event_header header; 24 __u32 pid, tid; 25 __u64 start; 26 __u64 len; 27 __u64 pgoff; 28 union { 29 struct { 30 __u32 maj; 31 __u32 min; 32 __u64 ino; 33 __u64 ino_generation; 34 }; 35 struct { 36 __u8 build_id_size; 37 __u8 __reserved_1; 38 __u16 __reserved_2; 39 __u8 build_id[20]; 40 }; 41 }; 42 __u32 prot; 43 __u32 flags; 44 char filename[PATH_MAX]; 45 }; 46 47 struct perf_record_comm { 48 struct perf_event_header header; 49 __u32 pid, tid; 50 char comm[16]; 51 }; 52 53 struct perf_record_namespaces { 54 struct perf_event_header header; 55 __u32 pid, tid; 56 __u64 nr_namespaces; 57 struct perf_ns_link_info link_info[]; 58 }; 59 60 struct perf_record_fork { 61 struct perf_event_header header; 62 __u32 pid, ppid; 63 __u32 tid, ptid; 64 __u64 time; 65 }; 66 67 struct perf_record_lost { 68 struct perf_event_header header; 69 __u64 id; 70 __u64 lost; 71 }; 72 73 #define PERF_RECORD_MISC_LOST_SAMPLES_BPF (1 << 15) 74 75 struct perf_record_lost_samples { 76 struct perf_event_header header; 77 __u64 lost; 78 }; 79 80 #define MAX_ID_HDR_ENTRIES 6 81 struct perf_record_lost_samples_and_ids { 82 struct perf_record_lost_samples lost; 83 __u64 sample_ids[MAX_ID_HDR_ENTRIES]; 84 }; 85 86 /* 87 * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID | PERF_FORMAT_LOST 88 */ 89 struct perf_record_read { 90 struct perf_event_header header; 91 __u32 pid, tid; 92 __u64 value; 93 __u64 time_enabled; 94 __u64 time_running; 95 __u64 id; 96 __u64 lost; 97 }; 98 99 struct perf_record_throttle { 100 struct perf_event_header header; 101 __u64 time; 102 __u64 id; 103 __u64 stream_id; 104 }; 105 106 #ifndef KSYM_NAME_LEN 107 #define KSYM_NAME_LEN 512 108 #endif 109 110 struct perf_record_ksymbol { 111 struct perf_event_header header; 112 __u64 addr; 113 __u32 len; 114 __u16 ksym_type; 115 __u16 flags; 116 char name[KSYM_NAME_LEN]; 117 }; 118 119 struct perf_record_bpf_event { 120 struct perf_event_header header; 121 __u16 type; 122 __u16 flags; 123 __u32 id; 124 125 /* for bpf_prog types */ 126 __u8 tag[BPF_TAG_SIZE]; // prog tag 127 }; 128 129 struct perf_record_cgroup { 130 struct perf_event_header header; 131 __u64 id; 132 char path[PATH_MAX]; 133 }; 134 135 struct perf_record_text_poke_event { 136 struct perf_event_header header; 137 __u64 addr; 138 __u16 old_len; 139 __u16 new_len; 140 __u8 bytes[]; 141 }; 142 143 struct perf_record_sample { 144 struct perf_event_header header; 145 __u64 array[]; 146 }; 147 148 struct perf_record_switch { 149 struct perf_event_header header; 150 __u32 next_prev_pid; 151 __u32 next_prev_tid; 152 }; 153 154 struct perf_record_callchain_deferred { 155 struct perf_event_header header; 156 /* 157 * This is to match kernel and (deferred) user stacks together. 158 * The kernel part will be in the sample callchain array after 159 * the PERF_CONTEXT_USER_DEFERRED entry. 160 */ 161 __u64 cookie; 162 __u64 nr; 163 __u64 ips[]; 164 }; 165 166 struct perf_record_header_attr { 167 struct perf_event_header header; 168 struct perf_event_attr attr; 169 /* 170 * Array of u64 id follows here but we cannot use a flexible array 171 * because size of attr in the data can be different then current 172 * version. Please use perf_record_header_attr_id() below. 173 * 174 * __u64 id[]; // do not use this 175 */ 176 }; 177 178 /* Returns the pointer to id array based on the actual attr size. */ 179 #define perf_record_header_attr_id(evt) \ 180 ((void *)&(evt)->attr.attr + (evt)->attr.attr.size) 181 182 enum { 183 PERF_CPU_MAP__CPUS = 0, 184 PERF_CPU_MAP__MASK = 1, 185 PERF_CPU_MAP__RANGE_CPUS = 2, 186 }; 187 188 /* 189 * Array encoding of a perf_cpu_map where nr is the number of entries in cpu[] 190 * and each entry is a value for a CPU in the map. 191 */ 192 struct cpu_map_entries { 193 __u16 nr; 194 __u16 cpu[]; 195 }; 196 197 /* Bitmap encoding of a perf_cpu_map where bitmap entries are 32-bit. */ 198 struct perf_record_mask_cpu_map32 { 199 /* Number of mask values. */ 200 __u16 nr; 201 /* Constant 4. */ 202 __u16 long_size; 203 /* Bitmap data. */ 204 __u32 mask[]; 205 }; 206 207 /* Bitmap encoding of a perf_cpu_map where bitmap entries are 64-bit. */ 208 struct perf_record_mask_cpu_map64 { 209 /* Number of mask values. */ 210 __u16 nr; 211 /* Constant 8. */ 212 __u16 long_size; 213 /* Legacy padding. */ 214 char __pad[4]; 215 /* Bitmap data. */ 216 __u64 mask[]; 217 }; 218 219 /* 220 * 'struct perf_record_cpu_map_data' is packed as unfortunately an earlier 221 * version had unaligned data and we wish to retain file format compatibility. 222 * -irogers 223 */ 224 #pragma GCC diagnostic push 225 #pragma GCC diagnostic ignored "-Wpacked" 226 #pragma GCC diagnostic ignored "-Wattributes" 227 228 /* 229 * An encoding of a CPU map for a range starting at start_cpu through to 230 * end_cpu. If any_cpu is 1, an any CPU (-1) value (aka dummy value) is present. 231 */ 232 struct perf_record_range_cpu_map { 233 __u8 any_cpu; 234 __u8 __pad; 235 __u16 start_cpu; 236 __u16 end_cpu; 237 }; 238 239 struct perf_record_cpu_map_data { 240 __u16 type; 241 union { 242 /* Used when type == PERF_CPU_MAP__CPUS. */ 243 struct cpu_map_entries cpus_data; 244 /* Used when type == PERF_CPU_MAP__MASK and long_size == 4. */ 245 struct perf_record_mask_cpu_map32 mask32_data; 246 /* Used when type == PERF_CPU_MAP__MASK and long_size == 8. */ 247 struct perf_record_mask_cpu_map64 mask64_data; 248 /* Used when type == PERF_CPU_MAP__RANGE_CPUS. */ 249 struct perf_record_range_cpu_map range_cpu_data; 250 }; 251 } __attribute__((packed)); 252 253 #pragma GCC diagnostic pop 254 255 struct perf_record_cpu_map { 256 struct perf_event_header header; 257 struct perf_record_cpu_map_data data; 258 }; 259 260 enum { 261 PERF_EVENT_UPDATE__UNIT = 0, 262 PERF_EVENT_UPDATE__SCALE = 1, 263 PERF_EVENT_UPDATE__NAME = 2, 264 PERF_EVENT_UPDATE__CPUS = 3, 265 }; 266 267 struct perf_record_event_update_cpus { 268 struct perf_record_cpu_map_data cpus; 269 }; 270 271 struct perf_record_event_update_scale { 272 double scale; 273 }; 274 275 struct perf_record_event_update { 276 struct perf_event_header header; 277 __u64 type; 278 __u64 id; 279 union { 280 /* Used when type == PERF_EVENT_UPDATE__SCALE. */ 281 struct perf_record_event_update_scale scale; 282 /* Used when type == PERF_EVENT_UPDATE__UNIT. */ 283 char unit[0]; 284 /* Used when type == PERF_EVENT_UPDATE__NAME. */ 285 char name[0]; 286 /* Used when type == PERF_EVENT_UPDATE__CPUS. */ 287 struct perf_record_event_update_cpus cpus; 288 }; 289 }; 290 291 #define MAX_EVENT_NAME 64 292 293 struct perf_trace_event_type { 294 __u64 event_id; 295 char name[MAX_EVENT_NAME]; 296 }; 297 298 struct perf_record_header_event_type { 299 struct perf_event_header header; 300 struct perf_trace_event_type event_type; 301 }; 302 303 struct perf_record_header_tracing_data { 304 struct perf_event_header header; 305 __u32 size; 306 __u32 pad; 307 }; 308 309 #define PERF_RECORD_MISC_BUILD_ID_SIZE (1 << 15) 310 311 struct perf_record_header_build_id { 312 struct perf_event_header header; 313 pid_t pid; 314 union { 315 __u8 build_id[24]; 316 struct { 317 __u8 data[20]; 318 __u8 size; 319 __u8 reserved1__; 320 __u16 reserved2__; 321 }; 322 }; 323 char filename[]; 324 }; 325 326 struct id_index_entry { 327 __u64 id; 328 __u64 idx; 329 __u64 cpu; 330 __u64 tid; 331 }; 332 333 struct id_index_entry_2 { 334 __u64 machine_pid; 335 __u64 vcpu; 336 }; 337 338 struct perf_record_id_index { 339 struct perf_event_header header; 340 __u64 nr; 341 struct id_index_entry entries[]; 342 }; 343 344 struct perf_record_auxtrace_info { 345 struct perf_event_header header; 346 __u32 type; 347 __u32 reserved__; /* For alignment */ 348 __u64 priv[]; 349 }; 350 351 struct perf_record_auxtrace { 352 struct perf_event_header header; 353 __u64 size; 354 __u64 offset; 355 __u64 reference; 356 __u32 idx; 357 __u32 tid; 358 __u32 cpu; 359 __u32 reserved__; /* For alignment */ 360 }; 361 362 #define MAX_AUXTRACE_ERROR_MSG 64 363 364 struct perf_record_auxtrace_error { 365 struct perf_event_header header; 366 __u32 type; 367 __u32 code; 368 __u32 cpu; 369 __u32 pid; 370 __u32 tid; 371 __u32 fmt; 372 __u64 ip; 373 __u64 time; 374 char msg[MAX_AUXTRACE_ERROR_MSG]; 375 __u32 machine_pid; 376 __u32 vcpu; 377 }; 378 379 struct perf_record_aux { 380 struct perf_event_header header; 381 __u64 aux_offset; 382 __u64 aux_size; 383 __u64 flags; 384 }; 385 386 struct perf_record_itrace_start { 387 struct perf_event_header header; 388 __u32 pid; 389 __u32 tid; 390 }; 391 392 struct perf_record_aux_output_hw_id { 393 struct perf_event_header header; 394 __u64 hw_id; 395 }; 396 397 struct perf_record_thread_map_entry { 398 __u64 pid; 399 char comm[16]; 400 }; 401 402 struct perf_record_thread_map { 403 struct perf_event_header header; 404 __u64 nr; 405 struct perf_record_thread_map_entry entries[]; 406 }; 407 408 enum { 409 PERF_STAT_CONFIG_TERM__AGGR_MODE = 0, 410 PERF_STAT_CONFIG_TERM__INTERVAL = 1, 411 PERF_STAT_CONFIG_TERM__SCALE = 2, 412 PERF_STAT_CONFIG_TERM__AGGR_LEVEL = 3, 413 PERF_STAT_CONFIG_TERM__MAX = 4, 414 }; 415 416 struct perf_record_stat_config_entry { 417 __u64 tag; 418 __u64 val; 419 }; 420 421 struct perf_record_stat_config { 422 struct perf_event_header header; 423 __u64 nr; 424 struct perf_record_stat_config_entry data[]; 425 }; 426 427 struct perf_record_stat { 428 struct perf_event_header header; 429 430 __u64 id; 431 __u32 cpu; 432 __u32 thread; 433 434 union { 435 struct { 436 __u64 val; 437 __u64 ena; 438 __u64 run; 439 }; 440 __u64 values[3]; 441 }; 442 }; 443 444 struct perf_record_stat_round { 445 struct perf_event_header header; 446 __u64 type; 447 __u64 time; 448 }; 449 450 struct perf_record_time_conv { 451 struct perf_event_header header; 452 __u64 time_shift; 453 __u64 time_mult; 454 __u64 time_zero; 455 __u64 time_cycles; 456 __u64 time_mask; 457 __u8 cap_user_time_zero; 458 __u8 cap_user_time_short; 459 __u8 reserved[6]; /* For alignment */ 460 }; 461 462 struct perf_record_header_feature { 463 struct perf_event_header header; 464 __u64 feat_id; 465 char data[]; 466 }; 467 468 struct perf_record_compressed { 469 struct perf_event_header header; 470 char data[]; 471 }; 472 473 /* 474 * `header.size` includes the padding we are going to add while writing the record. 475 * `data_size` only includes the size of `data[]` itself. 476 */ 477 struct perf_record_compressed2 { 478 struct perf_event_header header; 479 __u64 data_size; 480 char data[]; 481 }; 482 483 #define BPF_METADATA_KEY_LEN 64 484 #define BPF_METADATA_VALUE_LEN 256 485 #define BPF_PROG_NAME_LEN KSYM_NAME_LEN 486 487 struct perf_record_bpf_metadata_entry { 488 char key[BPF_METADATA_KEY_LEN]; 489 char value[BPF_METADATA_VALUE_LEN]; 490 }; 491 492 struct perf_record_bpf_metadata { 493 struct perf_event_header header; 494 char prog_name[BPF_PROG_NAME_LEN]; 495 __u64 nr_entries; 496 struct perf_record_bpf_metadata_entry entries[]; 497 }; 498 499 enum perf_user_event_type { /* above any possible kernel type */ 500 PERF_RECORD_USER_TYPE_START = 64, 501 PERF_RECORD_HEADER_ATTR = 64, 502 PERF_RECORD_HEADER_EVENT_TYPE = 65, /* deprecated */ 503 PERF_RECORD_HEADER_TRACING_DATA = 66, 504 PERF_RECORD_HEADER_BUILD_ID = 67, 505 PERF_RECORD_FINISHED_ROUND = 68, 506 PERF_RECORD_ID_INDEX = 69, 507 PERF_RECORD_AUXTRACE_INFO = 70, 508 PERF_RECORD_AUXTRACE = 71, 509 PERF_RECORD_AUXTRACE_ERROR = 72, 510 PERF_RECORD_THREAD_MAP = 73, 511 PERF_RECORD_CPU_MAP = 74, 512 PERF_RECORD_STAT_CONFIG = 75, 513 PERF_RECORD_STAT = 76, 514 PERF_RECORD_STAT_ROUND = 77, 515 PERF_RECORD_EVENT_UPDATE = 78, 516 PERF_RECORD_TIME_CONV = 79, 517 PERF_RECORD_HEADER_FEATURE = 80, 518 PERF_RECORD_COMPRESSED = 81, 519 PERF_RECORD_FINISHED_INIT = 82, 520 PERF_RECORD_COMPRESSED2 = 83, 521 PERF_RECORD_BPF_METADATA = 84, 522 PERF_RECORD_HEADER_MAX 523 }; 524 525 union perf_event { 526 struct perf_event_header header; 527 struct perf_record_mmap mmap; 528 struct perf_record_mmap2 mmap2; 529 struct perf_record_comm comm; 530 struct perf_record_namespaces namespaces; 531 struct perf_record_cgroup cgroup; 532 struct perf_record_fork fork; 533 struct perf_record_lost lost; 534 struct perf_record_lost_samples lost_samples; 535 struct perf_record_read read; 536 struct perf_record_throttle throttle; 537 struct perf_record_sample sample; 538 struct perf_record_callchain_deferred callchain_deferred; 539 struct perf_record_bpf_event bpf; 540 struct perf_record_ksymbol ksymbol; 541 struct perf_record_text_poke_event text_poke; 542 struct perf_record_header_attr attr; 543 struct perf_record_event_update event_update; 544 struct perf_record_header_event_type event_type; 545 struct perf_record_header_tracing_data tracing_data; 546 struct perf_record_header_build_id build_id; 547 struct perf_record_id_index id_index; 548 struct perf_record_auxtrace_info auxtrace_info; 549 struct perf_record_auxtrace auxtrace; 550 struct perf_record_auxtrace_error auxtrace_error; 551 struct perf_record_aux aux; 552 struct perf_record_itrace_start itrace_start; 553 struct perf_record_aux_output_hw_id aux_output_hw_id; 554 struct perf_record_switch context_switch; 555 struct perf_record_thread_map thread_map; 556 struct perf_record_cpu_map cpu_map; 557 struct perf_record_stat_config stat_config; 558 struct perf_record_stat stat; 559 struct perf_record_stat_round stat_round; 560 struct perf_record_time_conv time_conv; 561 struct perf_record_header_feature feat; 562 struct perf_record_compressed pack; 563 struct perf_record_compressed2 pack2; 564 struct perf_record_bpf_metadata bpf_metadata; 565 }; 566 567 #endif /* __LIBPERF_EVENT_H */ 568