1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * auxtrace.h: AUX area trace support 4 * Copyright (c) 2013-2015, Intel Corporation. 5 */ 6 7 #ifndef __PERF_AUXTRACE_H 8 #define __PERF_AUXTRACE_H 9 10 #include <sys/types.h> 11 #include <errno.h> 12 #include <stdbool.h> 13 #include <stddef.h> 14 #include <stdio.h> // FILE 15 #include <linux/list.h> 16 #include <linux/perf_event.h> 17 #include <linux/types.h> 18 #include <asm/bitsperlong.h> 19 #include <asm/barrier.h> 20 21 union perf_event; 22 struct perf_session; 23 struct evlist; 24 struct evsel; 25 struct perf_tool; 26 struct mmap; 27 struct perf_sample; 28 struct option; 29 struct record_opts; 30 struct perf_record_auxtrace_error; 31 struct perf_record_auxtrace_info; 32 struct events_stats; 33 struct perf_pmu; 34 35 enum auxtrace_error_type { 36 PERF_AUXTRACE_ERROR_ITRACE = 1, 37 PERF_AUXTRACE_ERROR_MAX 38 }; 39 40 /* Auxtrace records must have the same alignment as perf event records */ 41 #define PERF_AUXTRACE_RECORD_ALIGNMENT 8 42 43 enum auxtrace_type { 44 PERF_AUXTRACE_UNKNOWN, 45 PERF_AUXTRACE_INTEL_PT, 46 PERF_AUXTRACE_INTEL_BTS, 47 PERF_AUXTRACE_CS_ETM, 48 PERF_AUXTRACE_ARM_SPE, 49 PERF_AUXTRACE_S390_CPUMSF, 50 }; 51 52 enum itrace_period_type { 53 PERF_ITRACE_PERIOD_INSTRUCTIONS, 54 PERF_ITRACE_PERIOD_TICKS, 55 PERF_ITRACE_PERIOD_NANOSECS, 56 }; 57 58 #define AUXTRACE_ERR_FLG_OVERFLOW (1 << ('o' - 'a')) 59 #define AUXTRACE_ERR_FLG_DATA_LOST (1 << ('l' - 'a')) 60 61 #define AUXTRACE_LOG_FLG_ALL_PERF_EVTS (1 << ('a' - 'a')) 62 63 /** 64 * struct itrace_synth_opts - AUX area tracing synthesis options. 65 * @set: indicates whether or not options have been set 66 * @default_no_sample: Default to no sampling. 67 * @inject: indicates the event (not just the sample) must be fully synthesized 68 * because 'perf inject' will write it out 69 * @instructions: whether to synthesize 'instructions' events 70 * @branches: whether to synthesize 'branches' events 71 * (branch misses only for Arm SPE) 72 * @transactions: whether to synthesize events for transactions 73 * @ptwrites: whether to synthesize events for ptwrites 74 * @pwr_events: whether to synthesize power events 75 * @other_events: whether to synthesize other events recorded due to the use of 76 * aux_output 77 * @errors: whether to synthesize decoder error events 78 * @dont_decode: whether to skip decoding entirely 79 * @log: write a decoding log 80 * @calls: limit branch samples to calls (can be combined with @returns) 81 * @returns: limit branch samples to returns (can be combined with @calls) 82 * @callchain: add callchain to 'instructions' events 83 * @add_callchain: add callchain to existing event records 84 * @thread_stack: feed branches to the thread_stack 85 * @last_branch: add branch context to 'instruction' events 86 * @add_last_branch: add branch context to existing event records 87 * @flc: whether to synthesize first level cache events 88 * @llc: whether to synthesize last level cache events 89 * @tlb: whether to synthesize TLB events 90 * @remote_access: whether to synthesize remote access events 91 * @mem: whether to synthesize memory events 92 * @timeless_decoding: prefer "timeless" decoding i.e. ignore timestamps 93 * @vm_time_correlation: perform VM Time Correlation 94 * @vm_tm_corr_dry_run: VM Time Correlation dry-run 95 * @vm_tm_corr_args: VM Time Correlation implementation-specific arguments 96 * @callchain_sz: maximum callchain size 97 * @last_branch_sz: branch context size 98 * @period: 'instructions' events period 99 * @period_type: 'instructions' events period type 100 * @initial_skip: skip N events at the beginning. 101 * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all 102 * @ptime_range: time intervals to trace or NULL 103 * @range_num: number of time intervals to trace 104 * @error_plus_flags: flags to affect what errors are reported 105 * @error_minus_flags: flags to affect what errors are reported 106 * @log_plus_flags: flags to affect what is logged 107 * @log_minus_flags: flags to affect what is logged 108 * @quick: quicker (less detailed) decoding 109 */ 110 struct itrace_synth_opts { 111 bool set; 112 bool default_no_sample; 113 bool inject; 114 bool instructions; 115 bool branches; 116 bool transactions; 117 bool ptwrites; 118 bool pwr_events; 119 bool other_events; 120 bool errors; 121 bool dont_decode; 122 bool log; 123 bool calls; 124 bool returns; 125 bool callchain; 126 bool add_callchain; 127 bool thread_stack; 128 bool last_branch; 129 bool add_last_branch; 130 bool flc; 131 bool llc; 132 bool tlb; 133 bool remote_access; 134 bool mem; 135 bool timeless_decoding; 136 bool vm_time_correlation; 137 bool vm_tm_corr_dry_run; 138 char *vm_tm_corr_args; 139 unsigned int callchain_sz; 140 unsigned int last_branch_sz; 141 unsigned long long period; 142 enum itrace_period_type period_type; 143 unsigned long initial_skip; 144 unsigned long *cpu_bitmap; 145 struct perf_time_interval *ptime_range; 146 int range_num; 147 unsigned int error_plus_flags; 148 unsigned int error_minus_flags; 149 unsigned int log_plus_flags; 150 unsigned int log_minus_flags; 151 unsigned int quick; 152 }; 153 154 /** 155 * struct auxtrace_index_entry - indexes a AUX area tracing event within a 156 * perf.data file. 157 * @file_offset: offset within the perf.data file 158 * @sz: size of the event 159 */ 160 struct auxtrace_index_entry { 161 u64 file_offset; 162 u64 sz; 163 }; 164 165 #define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256 166 167 /** 168 * struct auxtrace_index - index of AUX area tracing events within a perf.data 169 * file. 170 * @list: linking a number of arrays of entries 171 * @nr: number of entries 172 * @entries: array of entries 173 */ 174 struct auxtrace_index { 175 struct list_head list; 176 size_t nr; 177 struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT]; 178 }; 179 180 /** 181 * struct auxtrace - session callbacks to allow AUX area data decoding. 182 * @process_event: lets the decoder see all session events 183 * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event 184 * @queue_data: queue an AUX sample or PERF_RECORD_AUXTRACE event for later 185 * processing 186 * @dump_auxtrace_sample: dump AUX area sample data 187 * @flush_events: process any remaining data 188 * @free_events: free resources associated with event processing 189 * @free: free resources associated with the session 190 */ 191 struct auxtrace { 192 int (*process_event)(struct perf_session *session, 193 union perf_event *event, 194 struct perf_sample *sample, 195 struct perf_tool *tool); 196 int (*process_auxtrace_event)(struct perf_session *session, 197 union perf_event *event, 198 struct perf_tool *tool); 199 int (*queue_data)(struct perf_session *session, 200 struct perf_sample *sample, union perf_event *event, 201 u64 data_offset); 202 void (*dump_auxtrace_sample)(struct perf_session *session, 203 struct perf_sample *sample); 204 int (*flush_events)(struct perf_session *session, 205 struct perf_tool *tool); 206 void (*free_events)(struct perf_session *session); 207 void (*free)(struct perf_session *session); 208 bool (*evsel_is_auxtrace)(struct perf_session *session, 209 struct evsel *evsel); 210 }; 211 212 /** 213 * struct auxtrace_buffer - a buffer containing AUX area tracing data. 214 * @list: buffers are queued in a list held by struct auxtrace_queue 215 * @size: size of the buffer in bytes 216 * @pid: in per-thread mode, the pid this buffer is associated with 217 * @tid: in per-thread mode, the tid this buffer is associated with 218 * @cpu: in per-cpu mode, the cpu this buffer is associated with 219 * @data: actual buffer data (can be null if the data has not been loaded) 220 * @data_offset: file offset at which the buffer can be read 221 * @mmap_addr: mmap address at which the buffer can be read 222 * @mmap_size: size of the mmap at @mmap_addr 223 * @data_needs_freeing: @data was malloc'd so free it when it is no longer 224 * needed 225 * @consecutive: the original data was split up and this buffer is consecutive 226 * to the previous buffer 227 * @offset: offset as determined by aux_head / aux_tail members of struct 228 * perf_event_mmap_page 229 * @reference: an implementation-specific reference determined when the data is 230 * recorded 231 * @buffer_nr: used to number each buffer 232 * @use_size: implementation actually only uses this number of bytes 233 * @use_data: implementation actually only uses data starting at this address 234 */ 235 struct auxtrace_buffer { 236 struct list_head list; 237 size_t size; 238 pid_t pid; 239 pid_t tid; 240 int cpu; 241 void *data; 242 off_t data_offset; 243 void *mmap_addr; 244 size_t mmap_size; 245 bool data_needs_freeing; 246 bool consecutive; 247 u64 offset; 248 u64 reference; 249 u64 buffer_nr; 250 size_t use_size; 251 void *use_data; 252 }; 253 254 /** 255 * struct auxtrace_queue - a queue of AUX area tracing data buffers. 256 * @head: head of buffer list 257 * @tid: in per-thread mode, the tid this queue is associated with 258 * @cpu: in per-cpu mode, the cpu this queue is associated with 259 * @set: %true once this queue has been dedicated to a specific thread or cpu 260 * @priv: implementation-specific data 261 */ 262 struct auxtrace_queue { 263 struct list_head head; 264 pid_t tid; 265 int cpu; 266 bool set; 267 void *priv; 268 }; 269 270 /** 271 * struct auxtrace_queues - an array of AUX area tracing queues. 272 * @queue_array: array of queues 273 * @nr_queues: number of queues 274 * @new_data: set whenever new data is queued 275 * @populated: queues have been fully populated using the auxtrace_index 276 * @next_buffer_nr: used to number each buffer 277 */ 278 struct auxtrace_queues { 279 struct auxtrace_queue *queue_array; 280 unsigned int nr_queues; 281 bool new_data; 282 bool populated; 283 u64 next_buffer_nr; 284 }; 285 286 /** 287 * struct auxtrace_heap_item - element of struct auxtrace_heap. 288 * @queue_nr: queue number 289 * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected 290 * to be a timestamp 291 */ 292 struct auxtrace_heap_item { 293 unsigned int queue_nr; 294 u64 ordinal; 295 }; 296 297 /** 298 * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues. 299 * @heap_array: the heap 300 * @heap_cnt: the number of elements in the heap 301 * @heap_sz: maximum number of elements (grows as needed) 302 */ 303 struct auxtrace_heap { 304 struct auxtrace_heap_item *heap_array; 305 unsigned int heap_cnt; 306 unsigned int heap_sz; 307 }; 308 309 /** 310 * struct auxtrace_mmap - records an mmap of the auxtrace buffer. 311 * @base: address of mapped area 312 * @userpg: pointer to buffer's perf_event_mmap_page 313 * @mask: %0 if @len is not a power of two, otherwise (@len - %1) 314 * @len: size of mapped area 315 * @prev: previous aux_head 316 * @idx: index of this mmap 317 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu 318 * mmap) otherwise %0 319 * @cpu: cpu number for a per-cpu mmap otherwise %-1 320 */ 321 struct auxtrace_mmap { 322 void *base; 323 void *userpg; 324 size_t mask; 325 size_t len; 326 u64 prev; 327 int idx; 328 pid_t tid; 329 int cpu; 330 }; 331 332 /** 333 * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap. 334 * @mask: %0 if @len is not a power of two, otherwise (@len - %1) 335 * @offset: file offset of mapped area 336 * @len: size of mapped area 337 * @prot: mmap memory protection 338 * @idx: index of this mmap 339 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu 340 * mmap) otherwise %0 341 * @cpu: cpu number for a per-cpu mmap otherwise %-1 342 */ 343 struct auxtrace_mmap_params { 344 size_t mask; 345 off_t offset; 346 size_t len; 347 int prot; 348 int idx; 349 pid_t tid; 350 int cpu; 351 }; 352 353 /** 354 * struct auxtrace_record - callbacks for recording AUX area data. 355 * @recording_options: validate and process recording options 356 * @info_priv_size: return the size of the private data in auxtrace_info_event 357 * @info_fill: fill-in the private data in auxtrace_info_event 358 * @free: free this auxtrace record structure 359 * @snapshot_start: starting a snapshot 360 * @snapshot_finish: finishing a snapshot 361 * @find_snapshot: find data to snapshot within auxtrace mmap 362 * @parse_snapshot_options: parse snapshot options 363 * @reference: provide a 64-bit reference number for auxtrace_event 364 * @read_finish: called after reading from an auxtrace mmap 365 * @alignment: alignment (if any) for AUX area data 366 * @default_aux_sample_size: default sample size for --aux sample option 367 * @pmu: associated pmu 368 * @evlist: selected events list 369 */ 370 struct auxtrace_record { 371 int (*recording_options)(struct auxtrace_record *itr, 372 struct evlist *evlist, 373 struct record_opts *opts); 374 size_t (*info_priv_size)(struct auxtrace_record *itr, 375 struct evlist *evlist); 376 int (*info_fill)(struct auxtrace_record *itr, 377 struct perf_session *session, 378 struct perf_record_auxtrace_info *auxtrace_info, 379 size_t priv_size); 380 void (*free)(struct auxtrace_record *itr); 381 int (*snapshot_start)(struct auxtrace_record *itr); 382 int (*snapshot_finish)(struct auxtrace_record *itr); 383 int (*find_snapshot)(struct auxtrace_record *itr, int idx, 384 struct auxtrace_mmap *mm, unsigned char *data, 385 u64 *head, u64 *old); 386 int (*parse_snapshot_options)(struct auxtrace_record *itr, 387 struct record_opts *opts, 388 const char *str); 389 u64 (*reference)(struct auxtrace_record *itr); 390 int (*read_finish)(struct auxtrace_record *itr, int idx); 391 unsigned int alignment; 392 unsigned int default_aux_sample_size; 393 struct perf_pmu *pmu; 394 struct evlist *evlist; 395 }; 396 397 /** 398 * struct addr_filter - address filter. 399 * @list: list node 400 * @range: true if it is a range filter 401 * @start: true if action is 'filter' or 'start' 402 * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted 403 * to 'stop') 404 * @sym_from: symbol name for the filter address 405 * @sym_to: symbol name that determines the filter size 406 * @sym_from_idx: selects n'th from symbols with the same name (0 means global 407 * and less than 0 means symbol must be unique) 408 * @sym_to_idx: same as @sym_from_idx but for @sym_to 409 * @addr: filter address 410 * @size: filter region size (for range filters) 411 * @filename: DSO file name or NULL for the kernel 412 * @str: allocated string that contains the other string members 413 */ 414 struct addr_filter { 415 struct list_head list; 416 bool range; 417 bool start; 418 const char *action; 419 const char *sym_from; 420 const char *sym_to; 421 int sym_from_idx; 422 int sym_to_idx; 423 u64 addr; 424 u64 size; 425 const char *filename; 426 char *str; 427 }; 428 429 /** 430 * struct addr_filters - list of address filters. 431 * @head: list of address filters 432 * @cnt: number of address filters 433 */ 434 struct addr_filters { 435 struct list_head head; 436 int cnt; 437 }; 438 439 struct auxtrace_cache; 440 441 #ifdef HAVE_AUXTRACE_SUPPORT 442 443 /* 444 * In snapshot mode the mmapped page is read-only which makes using 445 * __sync_val_compare_and_swap() problematic. However, snapshot mode expects 446 * the buffer is not updated while the snapshot is made (e.g. Intel PT disables 447 * the event) so there is not a race anyway. 448 */ 449 static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm) 450 { 451 struct perf_event_mmap_page *pc = mm->userpg; 452 u64 head = READ_ONCE(pc->aux_head); 453 454 /* Ensure all reads are done after we read the head */ 455 smp_rmb(); 456 return head; 457 } 458 459 static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm) 460 { 461 struct perf_event_mmap_page *pc = mm->userpg; 462 #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) 463 u64 head = READ_ONCE(pc->aux_head); 464 #else 465 u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0); 466 #endif 467 468 /* Ensure all reads are done after we read the head */ 469 smp_rmb(); 470 return head; 471 } 472 473 static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail) 474 { 475 struct perf_event_mmap_page *pc = mm->userpg; 476 #if BITS_PER_LONG != 64 && defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) 477 u64 old_tail; 478 #endif 479 480 /* Ensure all reads are done before we write the tail out */ 481 smp_mb(); 482 #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) 483 pc->aux_tail = tail; 484 #else 485 do { 486 old_tail = __sync_val_compare_and_swap(&pc->aux_tail, 0, 0); 487 } while (!__sync_bool_compare_and_swap(&pc->aux_tail, old_tail, tail)); 488 #endif 489 } 490 491 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, 492 struct auxtrace_mmap_params *mp, 493 void *userpg, int fd); 494 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm); 495 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, 496 off_t auxtrace_offset, 497 unsigned int auxtrace_pages, 498 bool auxtrace_overwrite); 499 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, 500 struct evlist *evlist, int idx, 501 bool per_cpu); 502 503 typedef int (*process_auxtrace_t)(struct perf_tool *tool, 504 struct mmap *map, 505 union perf_event *event, void *data1, 506 size_t len1, void *data2, size_t len2); 507 508 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr, 509 struct perf_tool *tool, process_auxtrace_t fn); 510 511 int auxtrace_mmap__read_snapshot(struct mmap *map, 512 struct auxtrace_record *itr, 513 struct perf_tool *tool, process_auxtrace_t fn, 514 size_t snapshot_size); 515 516 int auxtrace_queues__init(struct auxtrace_queues *queues); 517 int auxtrace_queues__add_event(struct auxtrace_queues *queues, 518 struct perf_session *session, 519 union perf_event *event, off_t data_offset, 520 struct auxtrace_buffer **buffer_ptr); 521 struct auxtrace_queue * 522 auxtrace_queues__sample_queue(struct auxtrace_queues *queues, 523 struct perf_sample *sample, 524 struct perf_session *session); 525 int auxtrace_queues__add_sample(struct auxtrace_queues *queues, 526 struct perf_session *session, 527 struct perf_sample *sample, u64 data_offset, 528 u64 reference); 529 void auxtrace_queues__free(struct auxtrace_queues *queues); 530 int auxtrace_queues__process_index(struct auxtrace_queues *queues, 531 struct perf_session *session); 532 int auxtrace_queue_data(struct perf_session *session, bool samples, 533 bool events); 534 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue, 535 struct auxtrace_buffer *buffer); 536 void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw); 537 static inline void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd) 538 { 539 return auxtrace_buffer__get_data_rw(buffer, fd, false); 540 } 541 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer); 542 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer); 543 void auxtrace_buffer__free(struct auxtrace_buffer *buffer); 544 545 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr, 546 u64 ordinal); 547 void auxtrace_heap__pop(struct auxtrace_heap *heap); 548 void auxtrace_heap__free(struct auxtrace_heap *heap); 549 550 struct auxtrace_cache_entry { 551 struct hlist_node hash; 552 u32 key; 553 }; 554 555 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size, 556 unsigned int limit_percent); 557 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache); 558 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c); 559 void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry); 560 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key, 561 struct auxtrace_cache_entry *entry); 562 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key); 563 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key); 564 565 struct auxtrace_record *auxtrace_record__init(struct evlist *evlist, 566 int *err); 567 568 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, 569 struct record_opts *opts, 570 const char *str); 571 int auxtrace_parse_sample_options(struct auxtrace_record *itr, 572 struct evlist *evlist, 573 struct record_opts *opts, const char *str); 574 void auxtrace_regroup_aux_output(struct evlist *evlist); 575 int auxtrace_record__options(struct auxtrace_record *itr, 576 struct evlist *evlist, 577 struct record_opts *opts); 578 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr, 579 struct evlist *evlist); 580 int auxtrace_record__info_fill(struct auxtrace_record *itr, 581 struct perf_session *session, 582 struct perf_record_auxtrace_info *auxtrace_info, 583 size_t priv_size); 584 void auxtrace_record__free(struct auxtrace_record *itr); 585 int auxtrace_record__snapshot_start(struct auxtrace_record *itr); 586 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit); 587 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx, 588 struct auxtrace_mmap *mm, 589 unsigned char *data, u64 *head, u64 *old); 590 u64 auxtrace_record__reference(struct auxtrace_record *itr); 591 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx); 592 593 int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event, 594 off_t file_offset); 595 int auxtrace_index__write(int fd, struct list_head *head); 596 int auxtrace_index__process(int fd, u64 size, struct perf_session *session, 597 bool needs_swap); 598 void auxtrace_index__free(struct list_head *head); 599 600 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type, 601 int code, int cpu, pid_t pid, pid_t tid, u64 ip, 602 const char *msg, u64 timestamp); 603 604 int perf_event__process_auxtrace_info(struct perf_session *session, 605 union perf_event *event); 606 s64 perf_event__process_auxtrace(struct perf_session *session, 607 union perf_event *event); 608 int perf_event__process_auxtrace_error(struct perf_session *session, 609 union perf_event *event); 610 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts, 611 const char *str, int unset); 612 int itrace_parse_synth_opts(const struct option *opt, const char *str, 613 int unset); 614 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts, 615 bool no_sample); 616 617 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp); 618 void perf_session__auxtrace_error_inc(struct perf_session *session, 619 union perf_event *event); 620 void events_stats__auxtrace_error_warn(const struct events_stats *stats); 621 622 void addr_filters__init(struct addr_filters *filts); 623 void addr_filters__exit(struct addr_filters *filts); 624 int addr_filters__parse_bare_filter(struct addr_filters *filts, 625 const char *filter); 626 int auxtrace_parse_filters(struct evlist *evlist); 627 628 int auxtrace__process_event(struct perf_session *session, union perf_event *event, 629 struct perf_sample *sample, struct perf_tool *tool); 630 void auxtrace__dump_auxtrace_sample(struct perf_session *session, 631 struct perf_sample *sample); 632 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool); 633 void auxtrace__free_events(struct perf_session *session); 634 void auxtrace__free(struct perf_session *session); 635 bool auxtrace__evsel_is_auxtrace(struct perf_session *session, 636 struct evsel *evsel); 637 638 #define ITRACE_HELP \ 639 " i[period]: synthesize instructions events\n" \ 640 " b: synthesize branches events (branch misses for Arm SPE)\n" \ 641 " c: synthesize branches events (calls only)\n" \ 642 " r: synthesize branches events (returns only)\n" \ 643 " x: synthesize transactions events\n" \ 644 " w: synthesize ptwrite events\n" \ 645 " p: synthesize power events\n" \ 646 " o: synthesize other events recorded due to the use\n" \ 647 " of aux-output (refer to perf record)\n" \ 648 " e[flags]: synthesize error events\n" \ 649 " each flag must be preceded by + or -\n" \ 650 " error flags are: o (overflow)\n" \ 651 " l (data lost)\n" \ 652 " d[flags]: create a debug log\n" \ 653 " each flag must be preceded by + or -\n" \ 654 " log flags are: a (all perf events)\n" \ 655 " f: synthesize first level cache events\n" \ 656 " m: synthesize last level cache events\n" \ 657 " t: synthesize TLB events\n" \ 658 " a: synthesize remote access events\n" \ 659 " g[len]: synthesize a call chain (use with i or x)\n" \ 660 " G[len]: synthesize a call chain on existing event records\n" \ 661 " l[len]: synthesize last branch entries (use with i or x)\n" \ 662 " L[len]: synthesize last branch entries on existing event records\n" \ 663 " sNUMBER: skip initial number of events\n" \ 664 " q: quicker (less detailed) decoding\n" \ 665 " PERIOD[ns|us|ms|i|t]: specify period to sample stream\n" \ 666 " concatenate multiple options. Default is ibxwpe or cewp\n" 667 668 static inline 669 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts, 670 struct perf_time_interval *ptime_range, 671 int range_num) 672 { 673 opts->ptime_range = ptime_range; 674 opts->range_num = range_num; 675 } 676 677 static inline 678 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts) 679 { 680 opts->ptime_range = NULL; 681 opts->range_num = 0; 682 } 683 684 #else 685 #include "debug.h" 686 687 static inline struct auxtrace_record * 688 auxtrace_record__init(struct evlist *evlist __maybe_unused, 689 int *err) 690 { 691 *err = 0; 692 return NULL; 693 } 694 695 static inline 696 void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused) 697 { 698 } 699 700 static inline 701 int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused, 702 struct evlist *evlist __maybe_unused, 703 struct record_opts *opts __maybe_unused) 704 { 705 return 0; 706 } 707 708 static inline 709 int perf_event__process_auxtrace_info(struct perf_session *session __maybe_unused, 710 union perf_event *event __maybe_unused) 711 { 712 return 0; 713 } 714 715 static inline 716 s64 perf_event__process_auxtrace(struct perf_session *session __maybe_unused, 717 union perf_event *event __maybe_unused) 718 { 719 return 0; 720 } 721 722 static inline 723 int perf_event__process_auxtrace_error(struct perf_session *session __maybe_unused, 724 union perf_event *event __maybe_unused) 725 { 726 return 0; 727 } 728 729 static inline 730 void perf_session__auxtrace_error_inc(struct perf_session *session 731 __maybe_unused, 732 union perf_event *event 733 __maybe_unused) 734 { 735 } 736 737 static inline 738 void events_stats__auxtrace_error_warn(const struct events_stats *stats 739 __maybe_unused) 740 { 741 } 742 743 static inline 744 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts __maybe_unused, 745 const char *str __maybe_unused, int unset __maybe_unused) 746 { 747 pr_err("AUX area tracing not supported\n"); 748 return -EINVAL; 749 } 750 751 static inline 752 int itrace_parse_synth_opts(const struct option *opt __maybe_unused, 753 const char *str __maybe_unused, 754 int unset __maybe_unused) 755 { 756 pr_err("AUX area tracing not supported\n"); 757 return -EINVAL; 758 } 759 760 static inline 761 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused, 762 struct record_opts *opts __maybe_unused, 763 const char *str) 764 { 765 if (!str) 766 return 0; 767 pr_err("AUX area tracing not supported\n"); 768 return -EINVAL; 769 } 770 771 static inline 772 int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused, 773 struct evlist *evlist __maybe_unused, 774 struct record_opts *opts __maybe_unused, 775 const char *str) 776 { 777 if (!str) 778 return 0; 779 pr_err("AUX area tracing not supported\n"); 780 return -EINVAL; 781 } 782 783 static inline 784 void auxtrace_regroup_aux_output(struct evlist *evlist __maybe_unused) 785 { 786 } 787 788 static inline 789 int auxtrace__process_event(struct perf_session *session __maybe_unused, 790 union perf_event *event __maybe_unused, 791 struct perf_sample *sample __maybe_unused, 792 struct perf_tool *tool __maybe_unused) 793 { 794 return 0; 795 } 796 797 static inline 798 void auxtrace__dump_auxtrace_sample(struct perf_session *session __maybe_unused, 799 struct perf_sample *sample __maybe_unused) 800 { 801 } 802 803 static inline 804 int auxtrace__flush_events(struct perf_session *session __maybe_unused, 805 struct perf_tool *tool __maybe_unused) 806 { 807 return 0; 808 } 809 810 static inline 811 void auxtrace__free_events(struct perf_session *session __maybe_unused) 812 { 813 } 814 815 static inline 816 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused) 817 { 818 } 819 820 static inline 821 void auxtrace__free(struct perf_session *session __maybe_unused) 822 { 823 } 824 825 static inline 826 int auxtrace_index__write(int fd __maybe_unused, 827 struct list_head *head __maybe_unused) 828 { 829 return -EINVAL; 830 } 831 832 static inline 833 int auxtrace_index__process(int fd __maybe_unused, 834 u64 size __maybe_unused, 835 struct perf_session *session __maybe_unused, 836 bool needs_swap __maybe_unused) 837 { 838 return -EINVAL; 839 } 840 841 static inline 842 void auxtrace_index__free(struct list_head *head __maybe_unused) 843 { 844 } 845 846 static inline 847 bool auxtrace__evsel_is_auxtrace(struct perf_session *session __maybe_unused, 848 struct evsel *evsel __maybe_unused) 849 { 850 return false; 851 } 852 853 static inline 854 int auxtrace_parse_filters(struct evlist *evlist __maybe_unused) 855 { 856 return 0; 857 } 858 859 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, 860 struct auxtrace_mmap_params *mp, 861 void *userpg, int fd); 862 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm); 863 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, 864 off_t auxtrace_offset, 865 unsigned int auxtrace_pages, 866 bool auxtrace_overwrite); 867 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, 868 struct evlist *evlist, int idx, 869 bool per_cpu); 870 871 #define ITRACE_HELP "" 872 873 static inline 874 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts 875 __maybe_unused, 876 struct perf_time_interval *ptime_range 877 __maybe_unused, 878 int range_num __maybe_unused) 879 { 880 } 881 882 static inline 883 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts 884 __maybe_unused) 885 { 886 } 887 888 #endif 889 890 #endif 891