xref: /linux/tools/perf/util/auxtrace.h (revision d465bff130bf4ca17b6980abe51164ace1e0cba4)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * auxtrace.h: AUX area trace support
4  * Copyright (c) 2013-2015, Intel Corporation.
5  */
6 
7 #ifndef __PERF_AUXTRACE_H
8 #define __PERF_AUXTRACE_H
9 
10 #include <sys/types.h>
11 #include <errno.h>
12 #include <stdbool.h>
13 #include <stddef.h>
14 #include <stdio.h> // FILE
15 #include <linux/list.h>
16 #include <linux/perf_event.h>
17 #include <linux/types.h>
18 #include <internal/cpumap.h>
19 #include <asm/bitsperlong.h>
20 #include <asm/barrier.h>
21 
22 union perf_event;
23 struct perf_session;
24 struct evlist;
25 struct evsel;
26 struct perf_tool;
27 struct mmap;
28 struct perf_sample;
29 struct option;
30 struct record_opts;
31 struct perf_record_auxtrace_error;
32 struct perf_record_auxtrace_info;
33 struct events_stats;
34 struct perf_pmu;
35 
36 enum auxtrace_error_type {
37        PERF_AUXTRACE_ERROR_ITRACE  = 1,
38        PERF_AUXTRACE_ERROR_MAX
39 };
40 
41 /* Auxtrace records must have the same alignment as perf event records */
42 #define PERF_AUXTRACE_RECORD_ALIGNMENT 8
43 
44 enum auxtrace_type {
45 	PERF_AUXTRACE_UNKNOWN,
46 	PERF_AUXTRACE_INTEL_PT,
47 	PERF_AUXTRACE_INTEL_BTS,
48 	PERF_AUXTRACE_CS_ETM,
49 	PERF_AUXTRACE_ARM_SPE,
50 	PERF_AUXTRACE_S390_CPUMSF,
51 };
52 
53 enum itrace_period_type {
54 	PERF_ITRACE_PERIOD_INSTRUCTIONS,
55 	PERF_ITRACE_PERIOD_TICKS,
56 	PERF_ITRACE_PERIOD_NANOSECS,
57 };
58 
59 #define AUXTRACE_ERR_FLG_OVERFLOW	(1 << ('o' - 'a'))
60 #define AUXTRACE_ERR_FLG_DATA_LOST	(1 << ('l' - 'a'))
61 
62 #define AUXTRACE_LOG_FLG_ALL_PERF_EVTS	(1 << ('a' - 'a'))
63 #define AUXTRACE_LOG_FLG_ON_ERROR	(1 << ('e' - 'a'))
64 #define AUXTRACE_LOG_FLG_USE_STDOUT	(1 << ('o' - 'a'))
65 
66 /**
67  * struct itrace_synth_opts - AUX area tracing synthesis options.
68  * @set: indicates whether or not options have been set
69  * @default_no_sample: Default to no sampling.
70  * @inject: indicates the event (not just the sample) must be fully synthesized
71  *          because 'perf inject' will write it out
72  * @instructions: whether to synthesize 'instructions' events
73  * @branches: whether to synthesize 'branches' events
74  *            (branch misses only for Arm SPE)
75  * @transactions: whether to synthesize events for transactions
76  * @ptwrites: whether to synthesize events for ptwrites
77  * @pwr_events: whether to synthesize power events
78  * @other_events: whether to synthesize other events recorded due to the use of
79  *                aux_output
80  * @intr_events: whether to synthesize interrupt events
81  * @errors: whether to synthesize decoder error events
82  * @dont_decode: whether to skip decoding entirely
83  * @log: write a decoding log
84  * @calls: limit branch samples to calls (can be combined with @returns)
85  * @returns: limit branch samples to returns (can be combined with @calls)
86  * @callchain: add callchain to 'instructions' events
87  * @add_callchain: add callchain to existing event records
88  * @thread_stack: feed branches to the thread_stack
89  * @last_branch: add branch context to 'instruction' events
90  * @add_last_branch: add branch context to existing event records
91  * @approx_ipc: approximate IPC
92  * @flc: whether to synthesize first level cache events
93  * @llc: whether to synthesize last level cache events
94  * @tlb: whether to synthesize TLB events
95  * @remote_access: whether to synthesize remote access events
96  * @mem: whether to synthesize memory events
97  * @timeless_decoding: prefer "timeless" decoding i.e. ignore timestamps
98  * @vm_time_correlation: perform VM Time Correlation
99  * @vm_tm_corr_dry_run: VM Time Correlation dry-run
100  * @vm_tm_corr_args:  VM Time Correlation implementation-specific arguments
101  * @callchain_sz: maximum callchain size
102  * @last_branch_sz: branch context size
103  * @period: 'instructions' events period
104  * @period_type: 'instructions' events period type
105  * @initial_skip: skip N events at the beginning.
106  * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
107  * @ptime_range: time intervals to trace or NULL
108  * @range_num: number of time intervals to trace
109  * @error_plus_flags: flags to affect what errors are reported
110  * @error_minus_flags: flags to affect what errors are reported
111  * @log_plus_flags: flags to affect what is logged
112  * @log_minus_flags: flags to affect what is logged
113  * @quick: quicker (less detailed) decoding
114  * @log_on_error_size: size of log to keep for outputting log only on errors
115  */
116 struct itrace_synth_opts {
117 	bool			set;
118 	bool			default_no_sample;
119 	bool			inject;
120 	bool			instructions;
121 	bool			branches;
122 	bool			transactions;
123 	bool			ptwrites;
124 	bool			pwr_events;
125 	bool			other_events;
126 	bool			intr_events;
127 	bool			errors;
128 	bool			dont_decode;
129 	bool			log;
130 	bool			calls;
131 	bool			returns;
132 	bool			callchain;
133 	bool			add_callchain;
134 	bool			thread_stack;
135 	bool			last_branch;
136 	bool			add_last_branch;
137 	bool			approx_ipc;
138 	bool			flc;
139 	bool			llc;
140 	bool			tlb;
141 	bool			remote_access;
142 	bool			mem;
143 	bool			timeless_decoding;
144 	bool			vm_time_correlation;
145 	bool			vm_tm_corr_dry_run;
146 	char			*vm_tm_corr_args;
147 	unsigned int		callchain_sz;
148 	unsigned int		last_branch_sz;
149 	unsigned long long	period;
150 	enum itrace_period_type	period_type;
151 	unsigned long		initial_skip;
152 	unsigned long		*cpu_bitmap;
153 	struct perf_time_interval *ptime_range;
154 	int			range_num;
155 	unsigned int		error_plus_flags;
156 	unsigned int		error_minus_flags;
157 	unsigned int		log_plus_flags;
158 	unsigned int		log_minus_flags;
159 	unsigned int		quick;
160 	unsigned int		log_on_error_size;
161 };
162 
163 /**
164  * struct auxtrace_index_entry - indexes a AUX area tracing event within a
165  *                               perf.data file.
166  * @file_offset: offset within the perf.data file
167  * @sz: size of the event
168  */
169 struct auxtrace_index_entry {
170 	u64			file_offset;
171 	u64			sz;
172 };
173 
174 #define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
175 
176 /**
177  * struct auxtrace_index - index of AUX area tracing events within a perf.data
178  *                         file.
179  * @list: linking a number of arrays of entries
180  * @nr: number of entries
181  * @entries: array of entries
182  */
183 struct auxtrace_index {
184 	struct list_head	list;
185 	size_t			nr;
186 	struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
187 };
188 
189 /**
190  * struct auxtrace - session callbacks to allow AUX area data decoding.
191  * @process_event: lets the decoder see all session events
192  * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event
193  * @queue_data: queue an AUX sample or PERF_RECORD_AUXTRACE event for later
194  *              processing
195  * @dump_auxtrace_sample: dump AUX area sample data
196  * @flush_events: process any remaining data
197  * @free_events: free resources associated with event processing
198  * @free: free resources associated with the session
199  */
200 struct auxtrace {
201 	int (*process_event)(struct perf_session *session,
202 			     union perf_event *event,
203 			     struct perf_sample *sample,
204 			     struct perf_tool *tool);
205 	int (*process_auxtrace_event)(struct perf_session *session,
206 				      union perf_event *event,
207 				      struct perf_tool *tool);
208 	int (*queue_data)(struct perf_session *session,
209 			  struct perf_sample *sample, union perf_event *event,
210 			  u64 data_offset);
211 	void (*dump_auxtrace_sample)(struct perf_session *session,
212 				     struct perf_sample *sample);
213 	int (*flush_events)(struct perf_session *session,
214 			    struct perf_tool *tool);
215 	void (*free_events)(struct perf_session *session);
216 	void (*free)(struct perf_session *session);
217 	bool (*evsel_is_auxtrace)(struct perf_session *session,
218 				  struct evsel *evsel);
219 };
220 
221 /**
222  * struct auxtrace_buffer - a buffer containing AUX area tracing data.
223  * @list: buffers are queued in a list held by struct auxtrace_queue
224  * @size: size of the buffer in bytes
225  * @pid: in per-thread mode, the pid this buffer is associated with
226  * @tid: in per-thread mode, the tid this buffer is associated with
227  * @cpu: in per-cpu mode, the cpu this buffer is associated with
228  * @data: actual buffer data (can be null if the data has not been loaded)
229  * @data_offset: file offset at which the buffer can be read
230  * @mmap_addr: mmap address at which the buffer can be read
231  * @mmap_size: size of the mmap at @mmap_addr
232  * @data_needs_freeing: @data was malloc'd so free it when it is no longer
233  *                      needed
234  * @consecutive: the original data was split up and this buffer is consecutive
235  *               to the previous buffer
236  * @offset: offset as determined by aux_head / aux_tail members of struct
237  *          perf_event_mmap_page
238  * @reference: an implementation-specific reference determined when the data is
239  *             recorded
240  * @buffer_nr: used to number each buffer
241  * @use_size: implementation actually only uses this number of bytes
242  * @use_data: implementation actually only uses data starting at this address
243  */
244 struct auxtrace_buffer {
245 	struct list_head	list;
246 	size_t			size;
247 	pid_t			pid;
248 	pid_t			tid;
249 	struct perf_cpu		cpu;
250 	void			*data;
251 	off_t			data_offset;
252 	void			*mmap_addr;
253 	size_t			mmap_size;
254 	bool			data_needs_freeing;
255 	bool			consecutive;
256 	u64			offset;
257 	u64			reference;
258 	u64			buffer_nr;
259 	size_t			use_size;
260 	void			*use_data;
261 };
262 
263 /**
264  * struct auxtrace_queue - a queue of AUX area tracing data buffers.
265  * @head: head of buffer list
266  * @tid: in per-thread mode, the tid this queue is associated with
267  * @cpu: in per-cpu mode, the cpu this queue is associated with
268  * @set: %true once this queue has been dedicated to a specific thread or cpu
269  * @priv: implementation-specific data
270  */
271 struct auxtrace_queue {
272 	struct list_head	head;
273 	pid_t			tid;
274 	int			cpu;
275 	bool			set;
276 	void			*priv;
277 };
278 
279 /**
280  * struct auxtrace_queues - an array of AUX area tracing queues.
281  * @queue_array: array of queues
282  * @nr_queues: number of queues
283  * @new_data: set whenever new data is queued
284  * @populated: queues have been fully populated using the auxtrace_index
285  * @next_buffer_nr: used to number each buffer
286  */
287 struct auxtrace_queues {
288 	struct auxtrace_queue	*queue_array;
289 	unsigned int		nr_queues;
290 	bool			new_data;
291 	bool			populated;
292 	u64			next_buffer_nr;
293 };
294 
295 /**
296  * struct auxtrace_heap_item - element of struct auxtrace_heap.
297  * @queue_nr: queue number
298  * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected
299  *           to be a timestamp
300  */
301 struct auxtrace_heap_item {
302 	unsigned int		queue_nr;
303 	u64			ordinal;
304 };
305 
306 /**
307  * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues.
308  * @heap_array: the heap
309  * @heap_cnt: the number of elements in the heap
310  * @heap_sz: maximum number of elements (grows as needed)
311  */
312 struct auxtrace_heap {
313 	struct auxtrace_heap_item	*heap_array;
314 	unsigned int		heap_cnt;
315 	unsigned int		heap_sz;
316 };
317 
318 /**
319  * struct auxtrace_mmap - records an mmap of the auxtrace buffer.
320  * @base: address of mapped area
321  * @userpg: pointer to buffer's perf_event_mmap_page
322  * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
323  * @len: size of mapped area
324  * @prev: previous aux_head
325  * @idx: index of this mmap
326  * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
327  *       mmap) otherwise %0
328  * @cpu: cpu number for a per-cpu mmap otherwise %-1
329  */
330 struct auxtrace_mmap {
331 	void		*base;
332 	void		*userpg;
333 	size_t		mask;
334 	size_t		len;
335 	u64		prev;
336 	int		idx;
337 	pid_t		tid;
338 	int		cpu;
339 };
340 
341 /**
342  * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap.
343  * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
344  * @offset: file offset of mapped area
345  * @len: size of mapped area
346  * @prot: mmap memory protection
347  * @idx: index of this mmap
348  * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
349  *       mmap) otherwise %0
350  * @mmap_needed: set to %false for non-auxtrace events. This is needed because
351  *               auxtrace mmapping is done in the same code path as non-auxtrace
352  *               mmapping but not every evsel that needs non-auxtrace mmapping
353  *               also needs auxtrace mmapping.
354  * @cpu: cpu number for a per-cpu mmap otherwise %-1
355  */
356 struct auxtrace_mmap_params {
357 	size_t		mask;
358 	off_t		offset;
359 	size_t		len;
360 	int		prot;
361 	int		idx;
362 	pid_t		tid;
363 	bool		mmap_needed;
364 	struct perf_cpu	cpu;
365 };
366 
367 /**
368  * struct auxtrace_record - callbacks for recording AUX area data.
369  * @recording_options: validate and process recording options
370  * @info_priv_size: return the size of the private data in auxtrace_info_event
371  * @info_fill: fill-in the private data in auxtrace_info_event
372  * @free: free this auxtrace record structure
373  * @snapshot_start: starting a snapshot
374  * @snapshot_finish: finishing a snapshot
375  * @find_snapshot: find data to snapshot within auxtrace mmap
376  * @parse_snapshot_options: parse snapshot options
377  * @reference: provide a 64-bit reference number for auxtrace_event
378  * @read_finish: called after reading from an auxtrace mmap
379  * @alignment: alignment (if any) for AUX area data
380  * @default_aux_sample_size: default sample size for --aux sample option
381  * @pmu: associated pmu
382  * @evlist: selected events list
383  */
384 struct auxtrace_record {
385 	int (*recording_options)(struct auxtrace_record *itr,
386 				 struct evlist *evlist,
387 				 struct record_opts *opts);
388 	size_t (*info_priv_size)(struct auxtrace_record *itr,
389 				 struct evlist *evlist);
390 	int (*info_fill)(struct auxtrace_record *itr,
391 			 struct perf_session *session,
392 			 struct perf_record_auxtrace_info *auxtrace_info,
393 			 size_t priv_size);
394 	void (*free)(struct auxtrace_record *itr);
395 	int (*snapshot_start)(struct auxtrace_record *itr);
396 	int (*snapshot_finish)(struct auxtrace_record *itr);
397 	int (*find_snapshot)(struct auxtrace_record *itr, int idx,
398 			     struct auxtrace_mmap *mm, unsigned char *data,
399 			     u64 *head, u64 *old);
400 	int (*parse_snapshot_options)(struct auxtrace_record *itr,
401 				      struct record_opts *opts,
402 				      const char *str);
403 	u64 (*reference)(struct auxtrace_record *itr);
404 	int (*read_finish)(struct auxtrace_record *itr, int idx);
405 	unsigned int alignment;
406 	unsigned int default_aux_sample_size;
407 	struct perf_pmu *pmu;
408 	struct evlist *evlist;
409 };
410 
411 /**
412  * struct addr_filter - address filter.
413  * @list: list node
414  * @range: true if it is a range filter
415  * @start: true if action is 'filter' or 'start'
416  * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted
417  *          to 'stop')
418  * @sym_from: symbol name for the filter address
419  * @sym_to: symbol name that determines the filter size
420  * @sym_from_idx: selects n'th from symbols with the same name (0 means global
421  *                and less than 0 means symbol must be unique)
422  * @sym_to_idx: same as @sym_from_idx but for @sym_to
423  * @addr: filter address
424  * @size: filter region size (for range filters)
425  * @filename: DSO file name or NULL for the kernel
426  * @str: allocated string that contains the other string members
427  */
428 struct addr_filter {
429 	struct list_head	list;
430 	bool			range;
431 	bool			start;
432 	const char		*action;
433 	const char		*sym_from;
434 	const char		*sym_to;
435 	int			sym_from_idx;
436 	int			sym_to_idx;
437 	u64			addr;
438 	u64			size;
439 	const char		*filename;
440 	char			*str;
441 };
442 
443 /**
444  * struct addr_filters - list of address filters.
445  * @head: list of address filters
446  * @cnt: number of address filters
447  */
448 struct addr_filters {
449 	struct list_head	head;
450 	int			cnt;
451 };
452 
453 struct auxtrace_cache;
454 
455 #ifdef HAVE_AUXTRACE_SUPPORT
456 
457 u64 compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm);
458 int compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail);
459 
460 static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm,
461 					   int kernel_is_64_bit __maybe_unused)
462 {
463 	struct perf_event_mmap_page *pc = mm->userpg;
464 	u64 head;
465 
466 #if BITS_PER_LONG == 32
467 	if (kernel_is_64_bit)
468 		return compat_auxtrace_mmap__read_head(mm);
469 #endif
470 	head = READ_ONCE(pc->aux_head);
471 
472 	/* Ensure all reads are done after we read the head */
473 	smp_rmb();
474 	return head;
475 }
476 
477 static inline int auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail,
478 					    int kernel_is_64_bit __maybe_unused)
479 {
480 	struct perf_event_mmap_page *pc = mm->userpg;
481 
482 #if BITS_PER_LONG == 32
483 	if (kernel_is_64_bit)
484 		return compat_auxtrace_mmap__write_tail(mm, tail);
485 #endif
486 	/* Ensure all reads are done before we write the tail out */
487 	smp_mb();
488 	WRITE_ONCE(pc->aux_tail, tail);
489 	return 0;
490 }
491 
492 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
493 			struct auxtrace_mmap_params *mp,
494 			void *userpg, int fd);
495 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
496 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
497 				off_t auxtrace_offset,
498 				unsigned int auxtrace_pages,
499 				bool auxtrace_overwrite);
500 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
501 				   struct evlist *evlist,
502 				   struct evsel *evsel, int idx);
503 
504 typedef int (*process_auxtrace_t)(struct perf_tool *tool,
505 				  struct mmap *map,
506 				  union perf_event *event, void *data1,
507 				  size_t len1, void *data2, size_t len2);
508 
509 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
510 			struct perf_tool *tool, process_auxtrace_t fn);
511 
512 int auxtrace_mmap__read_snapshot(struct mmap *map,
513 				 struct auxtrace_record *itr,
514 				 struct perf_tool *tool, process_auxtrace_t fn,
515 				 size_t snapshot_size);
516 
517 int auxtrace_queues__init(struct auxtrace_queues *queues);
518 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
519 			       struct perf_session *session,
520 			       union perf_event *event, off_t data_offset,
521 			       struct auxtrace_buffer **buffer_ptr);
522 struct auxtrace_queue *
523 auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
524 			      struct perf_sample *sample,
525 			      struct perf_session *session);
526 int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
527 				struct perf_session *session,
528 				struct perf_sample *sample, u64 data_offset,
529 				u64 reference);
530 void auxtrace_queues__free(struct auxtrace_queues *queues);
531 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
532 				   struct perf_session *session);
533 int auxtrace_queue_data(struct perf_session *session, bool samples,
534 			bool events);
535 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
536 					      struct auxtrace_buffer *buffer);
537 void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw);
538 static inline void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
539 {
540 	return auxtrace_buffer__get_data_rw(buffer, fd, false);
541 }
542 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
543 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
544 void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
545 
546 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
547 		       u64 ordinal);
548 void auxtrace_heap__pop(struct auxtrace_heap *heap);
549 void auxtrace_heap__free(struct auxtrace_heap *heap);
550 
551 struct auxtrace_cache_entry {
552 	struct hlist_node hash;
553 	u32 key;
554 };
555 
556 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
557 					   unsigned int limit_percent);
558 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache);
559 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
560 void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
561 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
562 			struct auxtrace_cache_entry *entry);
563 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key);
564 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
565 
566 struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
567 					      int *err);
568 
569 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
570 				    struct record_opts *opts,
571 				    const char *str);
572 int auxtrace_parse_sample_options(struct auxtrace_record *itr,
573 				  struct evlist *evlist,
574 				  struct record_opts *opts, const char *str);
575 void auxtrace_regroup_aux_output(struct evlist *evlist);
576 int auxtrace_record__options(struct auxtrace_record *itr,
577 			     struct evlist *evlist,
578 			     struct record_opts *opts);
579 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
580 				       struct evlist *evlist);
581 int auxtrace_record__info_fill(struct auxtrace_record *itr,
582 			       struct perf_session *session,
583 			       struct perf_record_auxtrace_info *auxtrace_info,
584 			       size_t priv_size);
585 void auxtrace_record__free(struct auxtrace_record *itr);
586 int auxtrace_record__snapshot_start(struct auxtrace_record *itr);
587 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit);
588 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
589 				   struct auxtrace_mmap *mm,
590 				   unsigned char *data, u64 *head, u64 *old);
591 u64 auxtrace_record__reference(struct auxtrace_record *itr);
592 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx);
593 
594 int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
595 				   off_t file_offset);
596 int auxtrace_index__write(int fd, struct list_head *head);
597 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
598 			    bool needs_swap);
599 void auxtrace_index__free(struct list_head *head);
600 
601 void auxtrace_synth_guest_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
602 				int code, int cpu, pid_t pid, pid_t tid, u64 ip,
603 				const char *msg, u64 timestamp,
604 				pid_t machine_pid, int vcpu);
605 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
606 			  int code, int cpu, pid_t pid, pid_t tid, u64 ip,
607 			  const char *msg, u64 timestamp);
608 
609 int perf_event__process_auxtrace_info(struct perf_session *session,
610 				      union perf_event *event);
611 s64 perf_event__process_auxtrace(struct perf_session *session,
612 				 union perf_event *event);
613 int perf_event__process_auxtrace_error(struct perf_session *session,
614 				       union perf_event *event);
615 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
616 			       const char *str, int unset);
617 int itrace_parse_synth_opts(const struct option *opt, const char *str,
618 			    int unset);
619 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
620 				    bool no_sample);
621 
622 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
623 void perf_session__auxtrace_error_inc(struct perf_session *session,
624 				      union perf_event *event);
625 void events_stats__auxtrace_error_warn(const struct events_stats *stats);
626 
627 void addr_filters__init(struct addr_filters *filts);
628 void addr_filters__exit(struct addr_filters *filts);
629 int addr_filters__parse_bare_filter(struct addr_filters *filts,
630 				    const char *filter);
631 int auxtrace_parse_filters(struct evlist *evlist);
632 
633 int auxtrace__process_event(struct perf_session *session, union perf_event *event,
634 			    struct perf_sample *sample, struct perf_tool *tool);
635 void auxtrace__dump_auxtrace_sample(struct perf_session *session,
636 				    struct perf_sample *sample);
637 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool);
638 void auxtrace__free_events(struct perf_session *session);
639 void auxtrace__free(struct perf_session *session);
640 bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
641 				 struct evsel *evsel);
642 
643 #define ITRACE_HELP \
644 "				i[period]:    		synthesize instructions events\n" \
645 "				b:	    		synthesize branches events (branch misses for Arm SPE)\n" \
646 "				c:	    		synthesize branches events (calls only)\n"	\
647 "				r:	    		synthesize branches events (returns only)\n" \
648 "				x:	    		synthesize transactions events\n"		\
649 "				w:	    		synthesize ptwrite events\n"		\
650 "				p:	    		synthesize power events\n"			\
651 "				o:			synthesize other events recorded due to the use\n" \
652 "							of aux-output (refer to perf record)\n"	\
653 "				I:			synthesize interrupt or similar (asynchronous) events\n" \
654 "							(e.g. Intel PT Event Trace)\n" \
655 "				e[flags]:		synthesize error events\n" \
656 "							each flag must be preceded by + or -\n" \
657 "							error flags are: o (overflow)\n" \
658 "									 l (data lost)\n" \
659 "				d[flags]:		create a debug log\n" \
660 "							each flag must be preceded by + or -\n" \
661 "							log flags are: a (all perf events)\n" \
662 "							               o (output to stdout)\n" \
663 "				f:	    		synthesize first level cache events\n" \
664 "				m:	    		synthesize last level cache events\n" \
665 "				t:	    		synthesize TLB events\n" \
666 "				a:	    		synthesize remote access events\n" \
667 "				g[len]:     		synthesize a call chain (use with i or x)\n" \
668 "				G[len]:			synthesize a call chain on existing event records\n" \
669 "				l[len]:     		synthesize last branch entries (use with i or x)\n" \
670 "				L[len]:			synthesize last branch entries on existing event records\n" \
671 "				sNUMBER:    		skip initial number of events\n"		\
672 "				q:			quicker (less detailed) decoding\n" \
673 "				A:			approximate IPC\n" \
674 "				Z:			prefer to ignore timestamps (so-called \"timeless\" decoding)\n" \
675 "				PERIOD[ns|us|ms|i|t]:   specify period to sample stream\n" \
676 "				concatenate multiple options. Default is ibxwpe or cewp\n"
677 
678 static inline
679 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts,
680 				       struct perf_time_interval *ptime_range,
681 				       int range_num)
682 {
683 	opts->ptime_range = ptime_range;
684 	opts->range_num = range_num;
685 }
686 
687 static inline
688 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts)
689 {
690 	opts->ptime_range = NULL;
691 	opts->range_num = 0;
692 }
693 
694 #else
695 #include "debug.h"
696 
697 static inline struct auxtrace_record *
698 auxtrace_record__init(struct evlist *evlist __maybe_unused,
699 		      int *err)
700 {
701 	*err = 0;
702 	return NULL;
703 }
704 
705 static inline
706 void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
707 {
708 }
709 
710 static inline
711 int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
712 			     struct evlist *evlist __maybe_unused,
713 			     struct record_opts *opts __maybe_unused)
714 {
715 	return 0;
716 }
717 
718 static inline
719 int perf_event__process_auxtrace_info(struct perf_session *session __maybe_unused,
720 				      union perf_event *event __maybe_unused)
721 {
722 	return 0;
723 }
724 
725 static inline
726 s64 perf_event__process_auxtrace(struct perf_session *session __maybe_unused,
727 				 union perf_event *event __maybe_unused)
728 {
729 	return 0;
730 }
731 
732 static inline
733 int perf_event__process_auxtrace_error(struct perf_session *session __maybe_unused,
734 				       union perf_event *event __maybe_unused)
735 {
736 	return 0;
737 }
738 
739 static inline
740 void perf_session__auxtrace_error_inc(struct perf_session *session
741 				      __maybe_unused,
742 				      union perf_event *event
743 				      __maybe_unused)
744 {
745 }
746 
747 static inline
748 void events_stats__auxtrace_error_warn(const struct events_stats *stats
749 				       __maybe_unused)
750 {
751 }
752 
753 static inline
754 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts __maybe_unused,
755 			       const char *str __maybe_unused, int unset __maybe_unused)
756 {
757 	pr_err("AUX area tracing not supported\n");
758 	return -EINVAL;
759 }
760 
761 static inline
762 int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
763 			    const char *str __maybe_unused,
764 			    int unset __maybe_unused)
765 {
766 	pr_err("AUX area tracing not supported\n");
767 	return -EINVAL;
768 }
769 
770 static inline
771 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
772 				    struct record_opts *opts __maybe_unused,
773 				    const char *str)
774 {
775 	if (!str)
776 		return 0;
777 	pr_err("AUX area tracing not supported\n");
778 	return -EINVAL;
779 }
780 
781 static inline
782 int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused,
783 				  struct evlist *evlist __maybe_unused,
784 				  struct record_opts *opts __maybe_unused,
785 				  const char *str)
786 {
787 	if (!str)
788 		return 0;
789 	pr_err("AUX area tracing not supported\n");
790 	return -EINVAL;
791 }
792 
793 static inline
794 void auxtrace_regroup_aux_output(struct evlist *evlist __maybe_unused)
795 {
796 }
797 
798 static inline
799 int auxtrace__process_event(struct perf_session *session __maybe_unused,
800 			    union perf_event *event __maybe_unused,
801 			    struct perf_sample *sample __maybe_unused,
802 			    struct perf_tool *tool __maybe_unused)
803 {
804 	return 0;
805 }
806 
807 static inline
808 void auxtrace__dump_auxtrace_sample(struct perf_session *session __maybe_unused,
809 				    struct perf_sample *sample __maybe_unused)
810 {
811 }
812 
813 static inline
814 int auxtrace__flush_events(struct perf_session *session __maybe_unused,
815 			   struct perf_tool *tool __maybe_unused)
816 {
817 	return 0;
818 }
819 
820 static inline
821 void auxtrace__free_events(struct perf_session *session __maybe_unused)
822 {
823 }
824 
825 static inline
826 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
827 {
828 }
829 
830 static inline
831 void auxtrace__free(struct perf_session *session __maybe_unused)
832 {
833 }
834 
835 static inline
836 int auxtrace_index__write(int fd __maybe_unused,
837 			  struct list_head *head __maybe_unused)
838 {
839 	return -EINVAL;
840 }
841 
842 static inline
843 int auxtrace_index__process(int fd __maybe_unused,
844 			    u64 size __maybe_unused,
845 			    struct perf_session *session __maybe_unused,
846 			    bool needs_swap __maybe_unused)
847 {
848 	return -EINVAL;
849 }
850 
851 static inline
852 void auxtrace_index__free(struct list_head *head __maybe_unused)
853 {
854 }
855 
856 static inline
857 bool auxtrace__evsel_is_auxtrace(struct perf_session *session __maybe_unused,
858 				 struct evsel *evsel __maybe_unused)
859 {
860 	return false;
861 }
862 
863 static inline
864 int auxtrace_parse_filters(struct evlist *evlist __maybe_unused)
865 {
866 	return 0;
867 }
868 
869 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
870 			struct auxtrace_mmap_params *mp,
871 			void *userpg, int fd);
872 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
873 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
874 				off_t auxtrace_offset,
875 				unsigned int auxtrace_pages,
876 				bool auxtrace_overwrite);
877 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
878 				   struct evlist *evlist,
879 				   struct evsel *evsel, int idx);
880 
881 #define ITRACE_HELP ""
882 
883 static inline
884 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts
885 				       __maybe_unused,
886 				       struct perf_time_interval *ptime_range
887 				       __maybe_unused,
888 				       int range_num __maybe_unused)
889 {
890 }
891 
892 static inline
893 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts
894 					 __maybe_unused)
895 {
896 }
897 
898 #endif
899 
900 #endif
901