xref: /linux/tools/perf/util/auxtrace.h (revision ec714e371f22f716a04e6ecb2a24988c92b26911)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * auxtrace.h: AUX area trace support
4  * Copyright (c) 2013-2015, Intel Corporation.
5  */
6 
7 #ifndef __PERF_AUXTRACE_H
8 #define __PERF_AUXTRACE_H
9 
10 #include <sys/types.h>
11 #include <errno.h>
12 #include <stdbool.h>
13 #include <stddef.h>
14 #include <stdio.h> // FILE
15 #include <linux/list.h>
16 #include <linux/perf_event.h>
17 #include <linux/types.h>
18 #include <perf/cpumap.h>
19 #include <asm/bitsperlong.h>
20 #include <asm/barrier.h>
21 
22 union perf_event;
23 struct perf_session;
24 struct evlist;
25 struct evsel;
26 struct perf_env;
27 struct perf_tool;
28 struct mmap;
29 struct perf_sample;
30 struct option;
31 struct record_opts;
32 struct perf_record_auxtrace_error;
33 struct perf_record_auxtrace_info;
34 struct events_stats;
35 struct perf_pmu;
36 
37 enum auxtrace_error_type {
38        PERF_AUXTRACE_ERROR_ITRACE  = 1,
39        PERF_AUXTRACE_ERROR_MAX
40 };
41 
42 /* Auxtrace records must have the same alignment as perf event records */
43 #define PERF_AUXTRACE_RECORD_ALIGNMENT 8
44 
45 enum auxtrace_type {
46 	PERF_AUXTRACE_UNKNOWN,
47 	PERF_AUXTRACE_INTEL_PT,
48 	PERF_AUXTRACE_INTEL_BTS,
49 	PERF_AUXTRACE_CS_ETM,
50 	PERF_AUXTRACE_ARM_SPE,
51 	PERF_AUXTRACE_S390_CPUMSF,
52 	PERF_AUXTRACE_HISI_PTT,
53 	PERF_AUXTRACE_VPA_DTL,
54 };
55 
56 enum itrace_period_type {
57 	PERF_ITRACE_PERIOD_INSTRUCTIONS,
58 	PERF_ITRACE_PERIOD_TICKS,
59 	PERF_ITRACE_PERIOD_NANOSECS,
60 };
61 
62 #define AUXTRACE_ERR_FLG_OVERFLOW	(1 << ('o' - 'a'))
63 #define AUXTRACE_ERR_FLG_DATA_LOST	(1 << ('l' - 'a'))
64 
65 #define AUXTRACE_LOG_FLG_ALL_PERF_EVTS	(1 << ('a' - 'a'))
66 #define AUXTRACE_LOG_FLG_ON_ERROR	(1 << ('e' - 'a'))
67 #define AUXTRACE_LOG_FLG_USE_STDOUT	(1 << ('o' - 'a'))
68 
69 /**
70  * struct itrace_synth_opts - AUX area tracing synthesis options.
71  * @set: indicates whether or not options have been set
72  * @default_no_sample: Default to no sampling.
73  * @inject: indicates the event (not just the sample) must be fully synthesized
74  *          because 'perf inject' will write it out
75  * @instructions: whether to synthesize 'instructions' events
76  * @cycles: whether to synthesize 'cycles' events
77  *          (not fully accurate, since CYC packets are only emitted
78  *          together with other events, such as branches)
79  * @branches: whether to synthesize 'branches' events
80  * @transactions: whether to synthesize events for transactions
81  * @ptwrites: whether to synthesize events for ptwrites
82  * @pwr_events: whether to synthesize power events
83  * @other_events: whether to synthesize other events recorded due to the use of
84  *                aux_output
85  * @intr_events: whether to synthesize interrupt events
86  * @errors: whether to synthesize decoder error events
87  * @dont_decode: whether to skip decoding entirely
88  * @log: write a decoding log
89  * @calls: limit branch samples to calls (can be combined with @returns)
90  * @returns: limit branch samples to returns (can be combined with @calls)
91  * @callchain: add callchain to 'instructions' events
92  * @add_callchain: add callchain to existing event records
93  * @thread_stack: feed branches to the thread_stack
94  * @last_branch: add branch context to 'instruction' events
95  * @add_last_branch: add branch context to existing event records
96  * @approx_ipc: approximate IPC
97  * @flc: whether to synthesize first level cache events
98  * @llc: whether to synthesize last level cache events
99  * @tlb: whether to synthesize TLB events
100  * @remote_access: whether to synthesize remote access events
101  * @mem: whether to synthesize memory events
102  * @timeless_decoding: prefer "timeless" decoding i.e. ignore timestamps
103  * @use_timestamp: use the timestamp trace as kernel time
104  * @vm_time_correlation: perform VM Time Correlation
105  * @vm_tm_corr_dry_run: VM Time Correlation dry-run
106  * @vm_tm_corr_args:  VM Time Correlation implementation-specific arguments
107  * @callchain_sz: maximum callchain size
108  * @last_branch_sz: branch context size
109  * @period: 'instructions' events period
110  * @period_type: 'instructions' events period type
111  * @initial_skip: skip N events at the beginning.
112  * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
113  * @ptime_range: time intervals to trace or NULL
114  * @range_num: number of time intervals to trace
115  * @error_plus_flags: flags to affect what errors are reported
116  * @error_minus_flags: flags to affect what errors are reported
117  * @log_plus_flags: flags to affect what is logged
118  * @log_minus_flags: flags to affect what is logged
119  * @quick: quicker (less detailed) decoding
120  * @log_on_error_size: size of log to keep for outputting log only on errors
121  */
122 struct itrace_synth_opts {
123 	bool			set;
124 	bool			default_no_sample;
125 	bool			inject;
126 	bool			instructions;
127 	bool			cycles;
128 	bool			branches;
129 	bool			transactions;
130 	bool			ptwrites;
131 	bool			pwr_events;
132 	bool			other_events;
133 	bool			intr_events;
134 	bool			errors;
135 	bool			dont_decode;
136 	bool			log;
137 	bool			calls;
138 	bool			returns;
139 	bool			callchain;
140 	bool			add_callchain;
141 	bool			thread_stack;
142 	bool			last_branch;
143 	bool			add_last_branch;
144 	bool			approx_ipc;
145 	bool			flc;
146 	bool			llc;
147 	bool			tlb;
148 	bool			remote_access;
149 	bool			mem;
150 	bool			timeless_decoding;
151 	bool			use_timestamp;
152 	bool			vm_time_correlation;
153 	bool			vm_tm_corr_dry_run;
154 	char			*vm_tm_corr_args;
155 	unsigned int		callchain_sz;
156 	unsigned int		last_branch_sz;
157 	unsigned long long	period;
158 	enum itrace_period_type	period_type;
159 	unsigned long		initial_skip;
160 	unsigned long		*cpu_bitmap;
161 	struct perf_time_interval *ptime_range;
162 	int			range_num;
163 	unsigned int		error_plus_flags;
164 	unsigned int		error_minus_flags;
165 	unsigned int		log_plus_flags;
166 	unsigned int		log_minus_flags;
167 	unsigned int		quick;
168 	unsigned int		log_on_error_size;
169 };
170 
171 /**
172  * struct auxtrace_index_entry - indexes a AUX area tracing event within a
173  *                               perf.data file.
174  * @file_offset: offset within the perf.data file
175  * @sz: size of the event
176  */
177 struct auxtrace_index_entry {
178 	u64			file_offset;
179 	u64			sz;
180 };
181 
182 #define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
183 
184 /**
185  * struct auxtrace_index - index of AUX area tracing events within a perf.data
186  *                         file.
187  * @list: linking a number of arrays of entries
188  * @nr: number of entries
189  * @entries: array of entries
190  */
191 struct auxtrace_index {
192 	struct list_head	list;
193 	size_t			nr;
194 	struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
195 };
196 
197 /**
198  * struct auxtrace - session callbacks to allow AUX area data decoding.
199  * @process_event: lets the decoder see all session events
200  * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event
201  * @queue_data: queue an AUX sample or PERF_RECORD_AUXTRACE event for later
202  *              processing
203  * @dump_auxtrace_sample: dump AUX area sample data
204  * @flush_events: process any remaining data
205  * @free_events: free resources associated with event processing
206  * @free: free resources associated with the session
207  */
208 struct auxtrace {
209 	int (*process_event)(struct perf_session *session,
210 			     union perf_event *event,
211 			     struct perf_sample *sample,
212 			     const struct perf_tool *tool);
213 	int (*process_auxtrace_event)(struct perf_session *session,
214 				      union perf_event *event,
215 				      const struct perf_tool *tool);
216 	int (*queue_data)(struct perf_session *session,
217 			  struct perf_sample *sample, union perf_event *event,
218 			  u64 data_offset);
219 	void (*dump_auxtrace_sample)(struct perf_session *session,
220 				     struct perf_sample *sample);
221 	int (*flush_events)(struct perf_session *session,
222 			    const struct perf_tool *tool);
223 	void (*free_events)(struct perf_session *session);
224 	void (*free)(struct perf_session *session);
225 	bool (*evsel_is_auxtrace)(struct perf_session *session,
226 				  struct evsel *evsel);
227 };
228 
229 /**
230  * struct auxtrace_buffer - a buffer containing AUX area tracing data.
231  * @list: buffers are queued in a list held by struct auxtrace_queue
232  * @size: size of the buffer in bytes
233  * @pid: in per-thread mode, the pid this buffer is associated with
234  * @tid: in per-thread mode, the tid this buffer is associated with
235  * @cpu: in per-cpu mode, the cpu this buffer is associated with
236  * @data: actual buffer data (can be null if the data has not been loaded)
237  * @data_offset: file offset at which the buffer can be read
238  * @mmap_addr: mmap address at which the buffer can be read
239  * @mmap_size: size of the mmap at @mmap_addr
240  * @data_needs_freeing: @data was malloc'd so free it when it is no longer
241  *                      needed
242  * @consecutive: the original data was split up and this buffer is consecutive
243  *               to the previous buffer
244  * @offset: offset as determined by aux_head / aux_tail members of struct
245  *          perf_event_mmap_page
246  * @reference: an implementation-specific reference determined when the data is
247  *             recorded
248  * @buffer_nr: used to number each buffer
249  * @use_size: implementation actually only uses this number of bytes
250  * @use_data: implementation actually only uses data starting at this address
251  */
252 struct auxtrace_buffer {
253 	struct list_head	list;
254 	size_t			size;
255 	pid_t			pid;
256 	pid_t			tid;
257 	struct perf_cpu		cpu;
258 	void			*data;
259 	off_t			data_offset;
260 	void			*mmap_addr;
261 	size_t			mmap_size;
262 	bool			data_needs_freeing;
263 	bool			consecutive;
264 	u64			offset;
265 	u64			reference;
266 	u64			buffer_nr;
267 	size_t			use_size;
268 	void			*use_data;
269 };
270 
271 /**
272  * struct auxtrace_queue - a queue of AUX area tracing data buffers.
273  * @head: head of buffer list
274  * @tid: in per-thread mode, the tid this queue is associated with
275  * @cpu: in per-cpu mode, the cpu this queue is associated with
276  * @set: %true once this queue has been dedicated to a specific thread or cpu
277  * @priv: implementation-specific data
278  */
279 struct auxtrace_queue {
280 	struct list_head	head;
281 	pid_t			tid;
282 	int			cpu;
283 	bool			set;
284 	void			*priv;
285 };
286 
287 /**
288  * struct auxtrace_queues - an array of AUX area tracing queues.
289  * @queue_array: array of queues
290  * @nr_queues: number of queues
291  * @new_data: set whenever new data is queued
292  * @populated: queues have been fully populated using the auxtrace_index
293  * @next_buffer_nr: used to number each buffer
294  */
295 struct auxtrace_queues {
296 	struct auxtrace_queue	*queue_array;
297 	unsigned int		nr_queues;
298 	bool			new_data;
299 	bool			populated;
300 	u64			next_buffer_nr;
301 };
302 
303 /**
304  * struct auxtrace_heap_item - element of struct auxtrace_heap.
305  * @queue_nr: queue number
306  * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected
307  *           to be a timestamp
308  */
309 struct auxtrace_heap_item {
310 	unsigned int		queue_nr;
311 	u64			ordinal;
312 };
313 
314 /**
315  * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues.
316  * @heap_array: the heap
317  * @heap_cnt: the number of elements in the heap
318  * @heap_sz: maximum number of elements (grows as needed)
319  */
320 struct auxtrace_heap {
321 	struct auxtrace_heap_item	*heap_array;
322 	unsigned int		heap_cnt;
323 	unsigned int		heap_sz;
324 };
325 
326 /**
327  * struct auxtrace_mmap - records an mmap of the auxtrace buffer.
328  * @base: address of mapped area
329  * @userpg: pointer to buffer's perf_event_mmap_page
330  * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
331  * @len: size of mapped area
332  * @prev: previous aux_head
333  * @idx: index of this mmap
334  * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
335  *       mmap) otherwise %0
336  * @cpu: cpu number for a per-cpu mmap otherwise %-1
337  */
338 struct auxtrace_mmap {
339 	void		*base;
340 	void		*userpg;
341 	size_t		mask;
342 	size_t		len;
343 	u64		prev;
344 	int		idx;
345 	pid_t		tid;
346 	int		cpu;
347 };
348 
349 /**
350  * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap.
351  * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
352  * @offset: file offset of mapped area
353  * @len: size of mapped area
354  * @prot: mmap memory protection
355  * @idx: index of this mmap
356  * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
357  *       mmap) otherwise %0
358  * @mmap_needed: set to %false for non-auxtrace events. This is needed because
359  *               auxtrace mmapping is done in the same code path as non-auxtrace
360  *               mmapping but not every evsel that needs non-auxtrace mmapping
361  *               also needs auxtrace mmapping.
362  * @cpu: cpu number for a per-cpu mmap otherwise %-1
363  */
364 struct auxtrace_mmap_params {
365 	size_t		mask;
366 	off_t		offset;
367 	size_t		len;
368 	int		prot;
369 	int		idx;
370 	pid_t		tid;
371 	bool		mmap_needed;
372 	struct perf_cpu	cpu;
373 };
374 
375 /**
376  * struct auxtrace_record - callbacks for recording AUX area data.
377  * @recording_options: validate and process recording options
378  * @info_priv_size: return the size of the private data in auxtrace_info_event
379  * @info_fill: fill-in the private data in auxtrace_info_event
380  * @free: free this auxtrace record structure
381  * @snapshot_start: starting a snapshot
382  * @snapshot_finish: finishing a snapshot
383  * @find_snapshot: find data to snapshot within auxtrace mmap
384  * @parse_snapshot_options: parse snapshot options
385  * @reference: provide a 64-bit reference number for auxtrace_event
386  * @read_finish: called after reading from an auxtrace mmap
387  * @alignment: alignment (if any) for AUX area data
388  * @default_aux_sample_size: default sample size for --aux sample option
389  * @pmu: associated pmu
390  * @evlist: selected events list
391  */
392 struct auxtrace_record {
393 	int (*recording_options)(struct auxtrace_record *itr,
394 				 struct evlist *evlist,
395 				 struct record_opts *opts);
396 	size_t (*info_priv_size)(struct auxtrace_record *itr,
397 				 struct evlist *evlist);
398 	int (*info_fill)(struct auxtrace_record *itr,
399 			 struct perf_session *session,
400 			 struct perf_record_auxtrace_info *auxtrace_info,
401 			 size_t priv_size);
402 	void (*free)(struct auxtrace_record *itr);
403 	int (*snapshot_start)(struct auxtrace_record *itr);
404 	int (*snapshot_finish)(struct auxtrace_record *itr);
405 	int (*find_snapshot)(struct auxtrace_record *itr, int idx,
406 			     struct auxtrace_mmap *mm, unsigned char *data,
407 			     u64 *head, u64 *old);
408 	int (*parse_snapshot_options)(struct auxtrace_record *itr,
409 				      struct record_opts *opts,
410 				      const char *str);
411 	u64 (*reference)(struct auxtrace_record *itr);
412 	int (*read_finish)(struct auxtrace_record *itr, int idx);
413 	unsigned int alignment;
414 	unsigned int default_aux_sample_size;
415 	struct evlist *evlist;
416 };
417 
418 /**
419  * struct addr_filter - address filter.
420  * @list: list node
421  * @range: true if it is a range filter
422  * @start: true if action is 'filter' or 'start'
423  * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted
424  *          to 'stop')
425  * @sym_from: symbol name for the filter address
426  * @sym_to: symbol name that determines the filter size
427  * @sym_from_idx: selects n'th from symbols with the same name (0 means global
428  *                and less than 0 means symbol must be unique)
429  * @sym_to_idx: same as @sym_from_idx but for @sym_to
430  * @addr: filter address
431  * @size: filter region size (for range filters)
432  * @filename: DSO file name or NULL for the kernel
433  * @str: allocated string that contains the other string members
434  */
435 struct addr_filter {
436 	struct list_head	list;
437 	bool			range;
438 	bool			start;
439 	const char		*action;
440 	const char		*sym_from;
441 	const char		*sym_to;
442 	int			sym_from_idx;
443 	int			sym_to_idx;
444 	u64			addr;
445 	u64			size;
446 	const char		*filename;
447 	char			*str;
448 };
449 
450 /**
451  * struct addr_filters - list of address filters.
452  * @head: list of address filters
453  * @cnt: number of address filters
454  */
455 struct addr_filters {
456 	struct list_head	head;
457 	int			cnt;
458 };
459 
460 struct auxtrace_cache;
461 
462 #ifdef HAVE_AUXTRACE_SUPPORT
463 
464 u64 compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm);
465 int compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail);
466 
auxtrace_mmap__read_head(struct auxtrace_mmap * mm,int kernel_is_64_bit __maybe_unused)467 static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm,
468 					   int kernel_is_64_bit __maybe_unused)
469 {
470 	struct perf_event_mmap_page *pc = mm->userpg;
471 	u64 head;
472 
473 #if BITS_PER_LONG == 32
474 	if (kernel_is_64_bit)
475 		return compat_auxtrace_mmap__read_head(mm);
476 #endif
477 	head = READ_ONCE(pc->aux_head);
478 
479 	/* Ensure all reads are done after we read the head */
480 	smp_rmb();
481 	return head;
482 }
483 
auxtrace_mmap__write_tail(struct auxtrace_mmap * mm,u64 tail,int kernel_is_64_bit __maybe_unused)484 static inline int auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail,
485 					    int kernel_is_64_bit __maybe_unused)
486 {
487 	struct perf_event_mmap_page *pc = mm->userpg;
488 
489 #if BITS_PER_LONG == 32
490 	if (kernel_is_64_bit)
491 		return compat_auxtrace_mmap__write_tail(mm, tail);
492 #endif
493 	/* Ensure all reads are done before we write the tail out */
494 	smp_mb();
495 	WRITE_ONCE(pc->aux_tail, tail);
496 	return 0;
497 }
498 
499 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
500 			struct auxtrace_mmap_params *mp,
501 			void *userpg, int fd);
502 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
503 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
504 				off_t auxtrace_offset,
505 				unsigned int auxtrace_pages,
506 				bool auxtrace_overwrite);
507 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
508 				   struct evlist *evlist,
509 				   struct evsel *evsel, int idx);
510 
511 typedef int (*process_auxtrace_t)(const struct perf_tool *tool,
512 				  struct mmap *map,
513 				  union perf_event *event, void *data1,
514 				  size_t len1, void *data2, size_t len2);
515 
516 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
517 			struct perf_env *env, const struct perf_tool *tool,
518 			process_auxtrace_t fn);
519 
520 int auxtrace_mmap__read_snapshot(struct mmap *map,
521 				 struct auxtrace_record *itr, struct perf_env *env,
522 				 const struct perf_tool *tool, process_auxtrace_t fn,
523 				 size_t snapshot_size);
524 
525 int auxtrace_queues__init_nr(struct auxtrace_queues *queues, int nr_queues);
526 int auxtrace_queues__init(struct auxtrace_queues *queues);
527 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
528 			       struct perf_session *session,
529 			       union perf_event *event, off_t data_offset,
530 			       struct auxtrace_buffer **buffer_ptr);
531 struct auxtrace_queue *
532 auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
533 			      struct perf_sample *sample,
534 			      struct perf_session *session);
535 int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
536 				struct perf_session *session,
537 				struct perf_sample *sample, u64 data_offset,
538 				u64 reference);
539 void auxtrace_queues__free(struct auxtrace_queues *queues);
540 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
541 				   struct perf_session *session);
542 int auxtrace_queue_data(struct perf_session *session, bool samples,
543 			bool events);
544 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
545 					      struct auxtrace_buffer *buffer);
546 void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw);
auxtrace_buffer__get_data(struct auxtrace_buffer * buffer,int fd)547 static inline void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
548 {
549 	return auxtrace_buffer__get_data_rw(buffer, fd, false);
550 }
551 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
552 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
553 void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
554 
555 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
556 		       u64 ordinal);
557 void auxtrace_heap__pop(struct auxtrace_heap *heap);
558 void auxtrace_heap__free(struct auxtrace_heap *heap);
559 
560 struct auxtrace_cache_entry {
561 	struct hlist_node hash;
562 	u32 key;
563 };
564 
565 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
566 					   unsigned int limit_percent);
567 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache);
568 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
569 void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
570 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
571 			struct auxtrace_cache_entry *entry);
572 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key);
573 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
574 
575 struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
576 					      int *err);
577 
578 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
579 				    struct record_opts *opts,
580 				    const char *str);
581 int auxtrace_parse_sample_options(struct auxtrace_record *itr,
582 				  struct evlist *evlist,
583 				  struct record_opts *opts, const char *str);
584 int auxtrace_parse_aux_action(struct evlist *evlist);
585 int auxtrace_record__options(struct auxtrace_record *itr,
586 			     struct evlist *evlist,
587 			     struct record_opts *opts);
588 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
589 				       struct evlist *evlist);
590 int auxtrace_record__info_fill(struct auxtrace_record *itr,
591 			       struct perf_session *session,
592 			       struct perf_record_auxtrace_info *auxtrace_info,
593 			       size_t priv_size);
594 void auxtrace_record__free(struct auxtrace_record *itr);
595 int auxtrace_record__snapshot_start(struct auxtrace_record *itr);
596 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit);
597 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
598 				   struct auxtrace_mmap *mm,
599 				   unsigned char *data, u64 *head, u64 *old);
600 u64 auxtrace_record__reference(struct auxtrace_record *itr);
601 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx);
602 
603 int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
604 				   off_t file_offset);
605 int auxtrace_index__write(int fd, struct list_head *head);
606 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
607 			    bool needs_swap);
608 void auxtrace_index__free(struct list_head *head);
609 
610 void auxtrace_synth_guest_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
611 				int code, int cpu, pid_t pid, pid_t tid, u64 ip,
612 				const char *msg, u64 timestamp,
613 				pid_t machine_pid, int vcpu);
614 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
615 			  int code, int cpu, pid_t pid, pid_t tid, u64 ip,
616 			  const char *msg, u64 timestamp);
617 
618 int perf_event__process_auxtrace_info(struct perf_session *session,
619 				      union perf_event *event);
620 s64 perf_event__process_auxtrace(struct perf_session *session,
621 				 union perf_event *event);
622 int perf_event__process_auxtrace_error(struct perf_session *session,
623 				       union perf_event *event);
624 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
625 			       const char *str, int unset);
626 int itrace_parse_synth_opts(const struct option *opt, const char *str,
627 			    int unset);
628 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
629 				    bool no_sample);
630 
631 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
632 void perf_session__auxtrace_error_inc(struct perf_session *session,
633 				      union perf_event *event);
634 void events_stats__auxtrace_error_warn(const struct events_stats *stats);
635 
636 void addr_filters__init(struct addr_filters *filts);
637 void addr_filters__exit(struct addr_filters *filts);
638 int addr_filters__parse_bare_filter(struct addr_filters *filts,
639 				    const char *filter);
640 int auxtrace_parse_filters(struct evlist *evlist);
641 
642 int auxtrace__process_event(struct perf_session *session, union perf_event *event,
643 			    struct perf_sample *sample, const struct perf_tool *tool);
644 void auxtrace__dump_auxtrace_sample(struct perf_session *session,
645 				    struct perf_sample *sample);
646 int auxtrace__flush_events(struct perf_session *session, const struct perf_tool *tool);
647 void auxtrace__free_events(struct perf_session *session);
648 void auxtrace__free(struct perf_session *session);
649 bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
650 				 struct evsel *evsel);
651 
652 #define ITRACE_HELP \
653 "				i[period]:    		synthesize instructions events\n" \
654 "				y[period]:    		synthesize cycles events (same period as i)\n" \
655 "				b:	    		synthesize branches events\n" \
656 "				c:	    		synthesize branches events (calls only)\n"	\
657 "				r:	    		synthesize branches events (returns only)\n" \
658 "				x:	    		synthesize transactions events\n"		\
659 "				w:	    		synthesize ptwrite events\n"		\
660 "				p:	    		synthesize power events\n"			\
661 "				o:			synthesize other events recorded due to the use\n" \
662 "							of aux-output (refer to perf record)\n"	\
663 "				I:			synthesize interrupt or similar (asynchronous) events\n" \
664 "							(e.g. Intel PT Event Trace)\n" \
665 "				e[flags]:		synthesize error events\n" \
666 "							each flag must be preceded by + or -\n" \
667 "							error flags are: o (overflow)\n" \
668 "									 l (data lost)\n" \
669 "				d[flags]:		create a debug log\n" \
670 "							each flag must be preceded by + or -\n" \
671 "							log flags are: a (all perf events)\n" \
672 "							               o (output to stdout)\n" \
673 "				f:	    		synthesize first level cache events\n" \
674 "				m:	    		synthesize last level cache events\n" \
675 "				t:	    		synthesize TLB events\n" \
676 "				a:	    		synthesize remote access events\n" \
677 "				g[len]:     		synthesize a call chain (use with i or x)\n" \
678 "				G[len]:			synthesize a call chain on existing event records\n" \
679 "				l[len]:     		synthesize last branch entries (use with i or x)\n" \
680 "				L[len]:			synthesize last branch entries on existing event records\n" \
681 "				sNUMBER:    		skip initial number of events\n"		\
682 "				q:			quicker (less detailed) decoding\n" \
683 "				A:			approximate IPC\n" \
684 "				Z:			prefer to ignore timestamps (so-called \"timeless\" decoding)\n" \
685 "				T:			use the timestamp trace as kernel time\n" \
686 "				PERIOD[ns|us|ms|i|t]:   specify period to sample stream\n" \
687 "				concatenate multiple options. Default is iybxwpe or cewp\n"
688 
689 static inline
itrace_synth_opts__set_time_range(struct itrace_synth_opts * opts,struct perf_time_interval * ptime_range,int range_num)690 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts,
691 				       struct perf_time_interval *ptime_range,
692 				       int range_num)
693 {
694 	opts->ptime_range = ptime_range;
695 	opts->range_num = range_num;
696 }
697 
698 static inline
itrace_synth_opts__clear_time_range(struct itrace_synth_opts * opts)699 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts)
700 {
701 	opts->ptime_range = NULL;
702 	opts->range_num = 0;
703 }
704 
705 #else
706 #include "debug.h"
707 
708 static inline struct auxtrace_record *
auxtrace_record__init(struct evlist * evlist __maybe_unused,int * err)709 auxtrace_record__init(struct evlist *evlist __maybe_unused,
710 		      int *err)
711 {
712 	*err = 0;
713 	return NULL;
714 }
715 
716 static inline
auxtrace_record__free(struct auxtrace_record * itr __maybe_unused)717 void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
718 {
719 }
720 
721 static inline
auxtrace_record__options(struct auxtrace_record * itr __maybe_unused,struct evlist * evlist __maybe_unused,struct record_opts * opts __maybe_unused)722 int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
723 			     struct evlist *evlist __maybe_unused,
724 			     struct record_opts *opts __maybe_unused)
725 {
726 	return 0;
727 }
728 
729 static inline
perf_event__process_auxtrace_info(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)730 int perf_event__process_auxtrace_info(struct perf_session *session __maybe_unused,
731 				      union perf_event *event __maybe_unused)
732 {
733 	return 0;
734 }
735 
736 static inline
perf_event__process_auxtrace(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)737 s64 perf_event__process_auxtrace(struct perf_session *session __maybe_unused,
738 				 union perf_event *event __maybe_unused)
739 {
740 	return 0;
741 }
742 
743 static inline
perf_event__process_auxtrace_error(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)744 int perf_event__process_auxtrace_error(struct perf_session *session __maybe_unused,
745 				       union perf_event *event __maybe_unused)
746 {
747 	return 0;
748 }
749 
750 static inline
perf_session__auxtrace_error_inc(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)751 void perf_session__auxtrace_error_inc(struct perf_session *session
752 				      __maybe_unused,
753 				      union perf_event *event
754 				      __maybe_unused)
755 {
756 }
757 
758 static inline
events_stats__auxtrace_error_warn(const struct events_stats * stats __maybe_unused)759 void events_stats__auxtrace_error_warn(const struct events_stats *stats
760 				       __maybe_unused)
761 {
762 }
763 
764 static inline
itrace_do_parse_synth_opts(struct itrace_synth_opts * synth_opts __maybe_unused,const char * str __maybe_unused,int unset __maybe_unused)765 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts __maybe_unused,
766 			       const char *str __maybe_unused, int unset __maybe_unused)
767 {
768 	pr_err("AUX area tracing not supported\n");
769 	return -EINVAL;
770 }
771 
772 static inline
itrace_parse_synth_opts(const struct option * opt __maybe_unused,const char * str __maybe_unused,int unset __maybe_unused)773 int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
774 			    const char *str __maybe_unused,
775 			    int unset __maybe_unused)
776 {
777 	pr_err("AUX area tracing not supported\n");
778 	return -EINVAL;
779 }
780 
781 static inline
auxtrace_parse_snapshot_options(struct auxtrace_record * itr __maybe_unused,struct record_opts * opts __maybe_unused,const char * str)782 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
783 				    struct record_opts *opts __maybe_unused,
784 				    const char *str)
785 {
786 	if (!str)
787 		return 0;
788 	pr_err("AUX area tracing not supported\n");
789 	return -EINVAL;
790 }
791 
792 static inline
auxtrace_parse_sample_options(struct auxtrace_record * itr __maybe_unused,struct evlist * evlist __maybe_unused,struct record_opts * opts __maybe_unused,const char * str)793 int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused,
794 				  struct evlist *evlist __maybe_unused,
795 				  struct record_opts *opts __maybe_unused,
796 				  const char *str)
797 {
798 	if (!str)
799 		return 0;
800 	pr_err("AUX area tracing not supported\n");
801 	return -EINVAL;
802 }
803 
804 static inline
auxtrace_parse_aux_action(struct evlist * evlist __maybe_unused)805 int auxtrace_parse_aux_action(struct evlist *evlist __maybe_unused)
806 {
807 	pr_err("AUX area tracing not supported\n");
808 	return -EINVAL;
809 }
810 
811 static inline
auxtrace__process_event(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,const struct perf_tool * tool __maybe_unused)812 int auxtrace__process_event(struct perf_session *session __maybe_unused,
813 			    union perf_event *event __maybe_unused,
814 			    struct perf_sample *sample __maybe_unused,
815 			    const struct perf_tool *tool __maybe_unused)
816 {
817 	return 0;
818 }
819 
820 static inline
auxtrace__dump_auxtrace_sample(struct perf_session * session __maybe_unused,struct perf_sample * sample __maybe_unused)821 void auxtrace__dump_auxtrace_sample(struct perf_session *session __maybe_unused,
822 				    struct perf_sample *sample __maybe_unused)
823 {
824 }
825 
826 static inline
auxtrace__flush_events(struct perf_session * session __maybe_unused,const struct perf_tool * tool __maybe_unused)827 int auxtrace__flush_events(struct perf_session *session __maybe_unused,
828 			   const struct perf_tool *tool __maybe_unused)
829 {
830 	return 0;
831 }
832 
833 static inline
auxtrace__free_events(struct perf_session * session __maybe_unused)834 void auxtrace__free_events(struct perf_session *session __maybe_unused)
835 {
836 }
837 
838 static inline
auxtrace_cache__free(struct auxtrace_cache * auxtrace_cache __maybe_unused)839 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
840 {
841 }
842 
843 static inline
auxtrace__free(struct perf_session * session __maybe_unused)844 void auxtrace__free(struct perf_session *session __maybe_unused)
845 {
846 }
847 
848 static inline
auxtrace_index__write(int fd __maybe_unused,struct list_head * head __maybe_unused)849 int auxtrace_index__write(int fd __maybe_unused,
850 			  struct list_head *head __maybe_unused)
851 {
852 	return -EINVAL;
853 }
854 
855 static inline
auxtrace_index__process(int fd __maybe_unused,u64 size __maybe_unused,struct perf_session * session __maybe_unused,bool needs_swap __maybe_unused)856 int auxtrace_index__process(int fd __maybe_unused,
857 			    u64 size __maybe_unused,
858 			    struct perf_session *session __maybe_unused,
859 			    bool needs_swap __maybe_unused)
860 {
861 	return -EINVAL;
862 }
863 
864 static inline
auxtrace_index__free(struct list_head * head __maybe_unused)865 void auxtrace_index__free(struct list_head *head __maybe_unused)
866 {
867 }
868 
869 static inline
auxtrace__evsel_is_auxtrace(struct perf_session * session __maybe_unused,struct evsel * evsel __maybe_unused)870 bool auxtrace__evsel_is_auxtrace(struct perf_session *session __maybe_unused,
871 				 struct evsel *evsel __maybe_unused)
872 {
873 	return false;
874 }
875 
876 static inline
auxtrace_parse_filters(struct evlist * evlist __maybe_unused)877 int auxtrace_parse_filters(struct evlist *evlist __maybe_unused)
878 {
879 	return 0;
880 }
881 
882 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
883 			struct auxtrace_mmap_params *mp,
884 			void *userpg, int fd);
885 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
886 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
887 				off_t auxtrace_offset,
888 				unsigned int auxtrace_pages,
889 				bool auxtrace_overwrite);
890 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
891 				   struct evlist *evlist,
892 				   struct evsel *evsel, int idx);
893 
894 #define ITRACE_HELP ""
895 
896 static inline
itrace_synth_opts__set_time_range(struct itrace_synth_opts * opts __maybe_unused,struct perf_time_interval * ptime_range __maybe_unused,int range_num __maybe_unused)897 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts
898 				       __maybe_unused,
899 				       struct perf_time_interval *ptime_range
900 				       __maybe_unused,
901 				       int range_num __maybe_unused)
902 {
903 }
904 
905 static inline
itrace_synth_opts__clear_time_range(struct itrace_synth_opts * opts __maybe_unused)906 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts
907 					 __maybe_unused)
908 {
909 }
910 
911 #endif
912 
913 #endif
914