1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LIBPERF_EVENT_H 3 #define __LIBPERF_EVENT_H 4 5 #include <linux/perf_event.h> 6 #include <linux/types.h> 7 #include <linux/limits.h> 8 #include <linux/bpf.h> 9 #include <sys/types.h> /* pid_t */ 10 11 #define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem)) 12 13 struct perf_record_mmap { 14 struct perf_event_header header; 15 __u32 pid, tid; 16 __u64 start; 17 __u64 len; 18 __u64 pgoff; 19 char filename[PATH_MAX]; 20 }; 21 22 struct perf_record_mmap2 { 23 struct perf_event_header header; 24 __u32 pid, tid; 25 __u64 start; 26 __u64 len; 27 __u64 pgoff; 28 union { 29 struct { 30 __u32 maj; 31 __u32 min; 32 __u64 ino; 33 __u64 ino_generation; 34 }; 35 struct { 36 __u8 build_id_size; 37 __u8 __reserved_1; 38 __u16 __reserved_2; 39 __u8 build_id[20]; 40 }; 41 }; 42 __u32 prot; 43 __u32 flags; 44 char filename[PATH_MAX]; 45 }; 46 47 struct perf_record_comm { 48 struct perf_event_header header; 49 __u32 pid, tid; 50 char comm[16]; 51 }; 52 53 struct perf_record_namespaces { 54 struct perf_event_header header; 55 __u32 pid, tid; 56 __u64 nr_namespaces; 57 struct perf_ns_link_info link_info[]; 58 }; 59 60 struct perf_record_fork { 61 struct perf_event_header header; 62 __u32 pid, ppid; 63 __u32 tid, ptid; 64 __u64 time; 65 }; 66 67 struct perf_record_lost { 68 struct perf_event_header header; 69 __u64 id; 70 __u64 lost; 71 }; 72 73 #define PERF_RECORD_MISC_LOST_SAMPLES_BPF (1 << 15) 74 75 struct perf_record_lost_samples { 76 struct perf_event_header header; 77 __u64 lost; 78 }; 79 80 #define MAX_ID_HDR_ENTRIES 6 81 struct perf_record_lost_samples_and_ids { 82 struct perf_record_lost_samples lost; 83 __u64 sample_ids[MAX_ID_HDR_ENTRIES]; 84 }; 85 86 /* 87 * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID | PERF_FORMAT_LOST 88 */ 89 struct perf_record_read { 90 struct perf_event_header header; 91 __u32 pid, tid; 92 __u64 value; 93 __u64 time_enabled; 94 __u64 time_running; 95 __u64 id; 96 __u64 lost; 97 }; 98 99 struct perf_record_throttle { 100 struct perf_event_header header; 101 __u64 time; 102 __u64 id; 103 __u64 stream_id; 104 }; 105 106 #ifndef KSYM_NAME_LEN 107 #define KSYM_NAME_LEN 512 108 #endif 109 110 struct perf_record_ksymbol { 111 struct perf_event_header header; 112 __u64 addr; 113 __u32 len; 114 __u16 ksym_type; 115 __u16 flags; 116 char name[KSYM_NAME_LEN]; 117 }; 118 119 struct perf_record_bpf_event { 120 struct perf_event_header header; 121 __u16 type; 122 __u16 flags; 123 __u32 id; 124 125 /* for bpf_prog types */ 126 __u8 tag[BPF_TAG_SIZE]; // prog tag 127 }; 128 129 struct perf_record_cgroup { 130 struct perf_event_header header; 131 __u64 id; 132 char path[PATH_MAX]; 133 }; 134 135 struct perf_record_text_poke_event { 136 struct perf_event_header header; 137 __u64 addr; 138 __u16 old_len; 139 __u16 new_len; 140 __u8 bytes[]; 141 }; 142 143 struct perf_record_sample { 144 struct perf_event_header header; 145 __u64 array[]; 146 }; 147 148 struct perf_record_switch { 149 struct perf_event_header header; 150 __u32 next_prev_pid; 151 __u32 next_prev_tid; 152 }; 153 154 struct perf_record_header_attr { 155 struct perf_event_header header; 156 struct perf_event_attr attr; 157 /* 158 * Array of u64 id follows here but we cannot use a flexible array 159 * because size of attr in the data can be different then current 160 * version. Please use perf_record_header_attr_id() below. 161 * 162 * __u64 id[]; // do not use this 163 */ 164 }; 165 166 /* Returns the pointer to id array based on the actual attr size. */ 167 #define perf_record_header_attr_id(evt) \ 168 ((void *)&(evt)->attr.attr + (evt)->attr.attr.size) 169 170 enum { 171 PERF_CPU_MAP__CPUS = 0, 172 PERF_CPU_MAP__MASK = 1, 173 PERF_CPU_MAP__RANGE_CPUS = 2, 174 }; 175 176 /* 177 * Array encoding of a perf_cpu_map where nr is the number of entries in cpu[] 178 * and each entry is a value for a CPU in the map. 179 */ 180 struct cpu_map_entries { 181 __u16 nr; 182 __u16 cpu[]; 183 }; 184 185 /* Bitmap encoding of a perf_cpu_map where bitmap entries are 32-bit. */ 186 struct perf_record_mask_cpu_map32 { 187 /* Number of mask values. */ 188 __u16 nr; 189 /* Constant 4. */ 190 __u16 long_size; 191 /* Bitmap data. */ 192 __u32 mask[]; 193 }; 194 195 /* Bitmap encoding of a perf_cpu_map where bitmap entries are 64-bit. */ 196 struct perf_record_mask_cpu_map64 { 197 /* Number of mask values. */ 198 __u16 nr; 199 /* Constant 8. */ 200 __u16 long_size; 201 /* Legacy padding. */ 202 char __pad[4]; 203 /* Bitmap data. */ 204 __u64 mask[]; 205 }; 206 207 /* 208 * 'struct perf_record_cpu_map_data' is packed as unfortunately an earlier 209 * version had unaligned data and we wish to retain file format compatibility. 210 * -irogers 211 */ 212 #pragma GCC diagnostic push 213 #pragma GCC diagnostic ignored "-Wpacked" 214 #pragma GCC diagnostic ignored "-Wattributes" 215 216 /* 217 * An encoding of a CPU map for a range starting at start_cpu through to 218 * end_cpu. If any_cpu is 1, an any CPU (-1) value (aka dummy value) is present. 219 */ 220 struct perf_record_range_cpu_map { 221 __u8 any_cpu; 222 __u8 __pad; 223 __u16 start_cpu; 224 __u16 end_cpu; 225 }; 226 227 struct perf_record_cpu_map_data { 228 __u16 type; 229 union { 230 /* Used when type == PERF_CPU_MAP__CPUS. */ 231 struct cpu_map_entries cpus_data; 232 /* Used when type == PERF_CPU_MAP__MASK and long_size == 4. */ 233 struct perf_record_mask_cpu_map32 mask32_data; 234 /* Used when type == PERF_CPU_MAP__MASK and long_size == 8. */ 235 struct perf_record_mask_cpu_map64 mask64_data; 236 /* Used when type == PERF_CPU_MAP__RANGE_CPUS. */ 237 struct perf_record_range_cpu_map range_cpu_data; 238 }; 239 } __attribute__((packed)); 240 241 #pragma GCC diagnostic pop 242 243 struct perf_record_cpu_map { 244 struct perf_event_header header; 245 struct perf_record_cpu_map_data data; 246 }; 247 248 enum { 249 PERF_EVENT_UPDATE__UNIT = 0, 250 PERF_EVENT_UPDATE__SCALE = 1, 251 PERF_EVENT_UPDATE__NAME = 2, 252 PERF_EVENT_UPDATE__CPUS = 3, 253 }; 254 255 struct perf_record_event_update_cpus { 256 struct perf_record_cpu_map_data cpus; 257 }; 258 259 struct perf_record_event_update_scale { 260 double scale; 261 }; 262 263 struct perf_record_event_update { 264 struct perf_event_header header; 265 __u64 type; 266 __u64 id; 267 union { 268 /* Used when type == PERF_EVENT_UPDATE__SCALE. */ 269 struct perf_record_event_update_scale scale; 270 /* Used when type == PERF_EVENT_UPDATE__UNIT. */ 271 char unit[0]; 272 /* Used when type == PERF_EVENT_UPDATE__NAME. */ 273 char name[0]; 274 /* Used when type == PERF_EVENT_UPDATE__CPUS. */ 275 struct perf_record_event_update_cpus cpus; 276 }; 277 }; 278 279 #define MAX_EVENT_NAME 64 280 281 struct perf_trace_event_type { 282 __u64 event_id; 283 char name[MAX_EVENT_NAME]; 284 }; 285 286 struct perf_record_header_event_type { 287 struct perf_event_header header; 288 struct perf_trace_event_type event_type; 289 }; 290 291 struct perf_record_header_tracing_data { 292 struct perf_event_header header; 293 __u32 size; 294 __u32 pad; 295 }; 296 297 #define PERF_RECORD_MISC_BUILD_ID_SIZE (1 << 15) 298 299 struct perf_record_header_build_id { 300 struct perf_event_header header; 301 pid_t pid; 302 union { 303 __u8 build_id[24]; 304 struct { 305 __u8 data[20]; 306 __u8 size; 307 __u8 reserved1__; 308 __u16 reserved2__; 309 }; 310 }; 311 char filename[]; 312 }; 313 314 struct id_index_entry { 315 __u64 id; 316 __u64 idx; 317 __u64 cpu; 318 __u64 tid; 319 }; 320 321 struct id_index_entry_2 { 322 __u64 machine_pid; 323 __u64 vcpu; 324 }; 325 326 struct perf_record_id_index { 327 struct perf_event_header header; 328 __u64 nr; 329 struct id_index_entry entries[]; 330 }; 331 332 struct perf_record_auxtrace_info { 333 struct perf_event_header header; 334 __u32 type; 335 __u32 reserved__; /* For alignment */ 336 __u64 priv[]; 337 }; 338 339 struct perf_record_auxtrace { 340 struct perf_event_header header; 341 __u64 size; 342 __u64 offset; 343 __u64 reference; 344 __u32 idx; 345 __u32 tid; 346 __u32 cpu; 347 __u32 reserved__; /* For alignment */ 348 }; 349 350 #define MAX_AUXTRACE_ERROR_MSG 64 351 352 struct perf_record_auxtrace_error { 353 struct perf_event_header header; 354 __u32 type; 355 __u32 code; 356 __u32 cpu; 357 __u32 pid; 358 __u32 tid; 359 __u32 fmt; 360 __u64 ip; 361 __u64 time; 362 char msg[MAX_AUXTRACE_ERROR_MSG]; 363 __u32 machine_pid; 364 __u32 vcpu; 365 }; 366 367 struct perf_record_aux { 368 struct perf_event_header header; 369 __u64 aux_offset; 370 __u64 aux_size; 371 __u64 flags; 372 }; 373 374 struct perf_record_itrace_start { 375 struct perf_event_header header; 376 __u32 pid; 377 __u32 tid; 378 }; 379 380 struct perf_record_aux_output_hw_id { 381 struct perf_event_header header; 382 __u64 hw_id; 383 }; 384 385 struct perf_record_thread_map_entry { 386 __u64 pid; 387 char comm[16]; 388 }; 389 390 struct perf_record_thread_map { 391 struct perf_event_header header; 392 __u64 nr; 393 struct perf_record_thread_map_entry entries[]; 394 }; 395 396 enum { 397 PERF_STAT_CONFIG_TERM__AGGR_MODE = 0, 398 PERF_STAT_CONFIG_TERM__INTERVAL = 1, 399 PERF_STAT_CONFIG_TERM__SCALE = 2, 400 PERF_STAT_CONFIG_TERM__AGGR_LEVEL = 3, 401 PERF_STAT_CONFIG_TERM__MAX = 4, 402 }; 403 404 struct perf_record_stat_config_entry { 405 __u64 tag; 406 __u64 val; 407 }; 408 409 struct perf_record_stat_config { 410 struct perf_event_header header; 411 __u64 nr; 412 struct perf_record_stat_config_entry data[]; 413 }; 414 415 struct perf_record_stat { 416 struct perf_event_header header; 417 418 __u64 id; 419 __u32 cpu; 420 __u32 thread; 421 422 union { 423 struct { 424 __u64 val; 425 __u64 ena; 426 __u64 run; 427 }; 428 __u64 values[3]; 429 }; 430 }; 431 432 struct perf_record_stat_round { 433 struct perf_event_header header; 434 __u64 type; 435 __u64 time; 436 }; 437 438 struct perf_record_time_conv { 439 struct perf_event_header header; 440 __u64 time_shift; 441 __u64 time_mult; 442 __u64 time_zero; 443 __u64 time_cycles; 444 __u64 time_mask; 445 __u8 cap_user_time_zero; 446 __u8 cap_user_time_short; 447 __u8 reserved[6]; /* For alignment */ 448 }; 449 450 struct perf_record_header_feature { 451 struct perf_event_header header; 452 __u64 feat_id; 453 char data[]; 454 }; 455 456 struct perf_record_compressed { 457 struct perf_event_header header; 458 char data[]; 459 }; 460 461 /* 462 * `header.size` includes the padding we are going to add while writing the record. 463 * `data_size` only includes the size of `data[]` itself. 464 */ 465 struct perf_record_compressed2 { 466 struct perf_event_header header; 467 __u64 data_size; 468 char data[]; 469 }; 470 471 #define BPF_METADATA_KEY_LEN 64 472 #define BPF_METADATA_VALUE_LEN 256 473 #define BPF_PROG_NAME_LEN KSYM_NAME_LEN 474 475 struct perf_record_bpf_metadata_entry { 476 char key[BPF_METADATA_KEY_LEN]; 477 char value[BPF_METADATA_VALUE_LEN]; 478 }; 479 480 struct perf_record_bpf_metadata { 481 struct perf_event_header header; 482 char prog_name[BPF_PROG_NAME_LEN]; 483 __u64 nr_entries; 484 struct perf_record_bpf_metadata_entry entries[]; 485 }; 486 487 enum perf_user_event_type { /* above any possible kernel type */ 488 PERF_RECORD_USER_TYPE_START = 64, 489 PERF_RECORD_HEADER_ATTR = 64, 490 PERF_RECORD_HEADER_EVENT_TYPE = 65, /* deprecated */ 491 PERF_RECORD_HEADER_TRACING_DATA = 66, 492 PERF_RECORD_HEADER_BUILD_ID = 67, 493 PERF_RECORD_FINISHED_ROUND = 68, 494 PERF_RECORD_ID_INDEX = 69, 495 PERF_RECORD_AUXTRACE_INFO = 70, 496 PERF_RECORD_AUXTRACE = 71, 497 PERF_RECORD_AUXTRACE_ERROR = 72, 498 PERF_RECORD_THREAD_MAP = 73, 499 PERF_RECORD_CPU_MAP = 74, 500 PERF_RECORD_STAT_CONFIG = 75, 501 PERF_RECORD_STAT = 76, 502 PERF_RECORD_STAT_ROUND = 77, 503 PERF_RECORD_EVENT_UPDATE = 78, 504 PERF_RECORD_TIME_CONV = 79, 505 PERF_RECORD_HEADER_FEATURE = 80, 506 PERF_RECORD_COMPRESSED = 81, 507 PERF_RECORD_FINISHED_INIT = 82, 508 PERF_RECORD_COMPRESSED2 = 83, 509 PERF_RECORD_BPF_METADATA = 84, 510 PERF_RECORD_HEADER_MAX 511 }; 512 513 union perf_event { 514 struct perf_event_header header; 515 struct perf_record_mmap mmap; 516 struct perf_record_mmap2 mmap2; 517 struct perf_record_comm comm; 518 struct perf_record_namespaces namespaces; 519 struct perf_record_cgroup cgroup; 520 struct perf_record_fork fork; 521 struct perf_record_lost lost; 522 struct perf_record_lost_samples lost_samples; 523 struct perf_record_read read; 524 struct perf_record_throttle throttle; 525 struct perf_record_sample sample; 526 struct perf_record_bpf_event bpf; 527 struct perf_record_ksymbol ksymbol; 528 struct perf_record_text_poke_event text_poke; 529 struct perf_record_header_attr attr; 530 struct perf_record_event_update event_update; 531 struct perf_record_header_event_type event_type; 532 struct perf_record_header_tracing_data tracing_data; 533 struct perf_record_header_build_id build_id; 534 struct perf_record_id_index id_index; 535 struct perf_record_auxtrace_info auxtrace_info; 536 struct perf_record_auxtrace auxtrace; 537 struct perf_record_auxtrace_error auxtrace_error; 538 struct perf_record_aux aux; 539 struct perf_record_itrace_start itrace_start; 540 struct perf_record_aux_output_hw_id aux_output_hw_id; 541 struct perf_record_switch context_switch; 542 struct perf_record_thread_map thread_map; 543 struct perf_record_cpu_map cpu_map; 544 struct perf_record_stat_config stat_config; 545 struct perf_record_stat stat; 546 struct perf_record_stat_round stat_round; 547 struct perf_record_time_conv time_conv; 548 struct perf_record_header_feature feat; 549 struct perf_record_compressed pack; 550 struct perf_record_compressed2 pack2; 551 struct perf_record_bpf_metadata bpf_metadata; 552 }; 553 554 #endif /* __LIBPERF_EVENT_H */ 555