xref: /linux/tools/perf/util/auxtrace.h (revision 2975489458c59ce2e348b1b3aef5d8d2acb5cc8d)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * auxtrace.h: AUX area trace support
4  * Copyright (c) 2013-2015, Intel Corporation.
5  */
6 
7 #ifndef __PERF_AUXTRACE_H
8 #define __PERF_AUXTRACE_H
9 
10 #include <sys/types.h>
11 #include <errno.h>
12 #include <stdbool.h>
13 #include <stddef.h>
14 #include <stdio.h> // FILE
15 #include <linux/list.h>
16 #include <linux/perf_event.h>
17 #include <linux/types.h>
18 #include <asm/bitsperlong.h>
19 #include <asm/barrier.h>
20 
21 union perf_event;
22 struct perf_session;
23 struct evlist;
24 struct perf_tool;
25 struct mmap;
26 struct perf_sample;
27 struct option;
28 struct record_opts;
29 struct perf_record_auxtrace_error;
30 struct perf_record_auxtrace_info;
31 struct events_stats;
32 
33 enum auxtrace_error_type {
34        PERF_AUXTRACE_ERROR_ITRACE  = 1,
35        PERF_AUXTRACE_ERROR_MAX
36 };
37 
38 /* Auxtrace records must have the same alignment as perf event records */
39 #define PERF_AUXTRACE_RECORD_ALIGNMENT 8
40 
41 enum auxtrace_type {
42 	PERF_AUXTRACE_UNKNOWN,
43 	PERF_AUXTRACE_INTEL_PT,
44 	PERF_AUXTRACE_INTEL_BTS,
45 	PERF_AUXTRACE_CS_ETM,
46 	PERF_AUXTRACE_ARM_SPE,
47 	PERF_AUXTRACE_S390_CPUMSF,
48 };
49 
50 enum itrace_period_type {
51 	PERF_ITRACE_PERIOD_INSTRUCTIONS,
52 	PERF_ITRACE_PERIOD_TICKS,
53 	PERF_ITRACE_PERIOD_NANOSECS,
54 };
55 
56 /**
57  * struct itrace_synth_opts - AUX area tracing synthesis options.
58  * @set: indicates whether or not options have been set
59  * @default_no_sample: Default to no sampling.
60  * @inject: indicates the event (not just the sample) must be fully synthesized
61  *          because 'perf inject' will write it out
62  * @instructions: whether to synthesize 'instructions' events
63  * @branches: whether to synthesize 'branches' events
64  * @transactions: whether to synthesize events for transactions
65  * @ptwrites: whether to synthesize events for ptwrites
66  * @pwr_events: whether to synthesize power events
67  * @other_events: whether to synthesize other events recorded due to the use of
68  *                aux_output
69  * @errors: whether to synthesize decoder error events
70  * @dont_decode: whether to skip decoding entirely
71  * @log: write a decoding log
72  * @calls: limit branch samples to calls (can be combined with @returns)
73  * @returns: limit branch samples to returns (can be combined with @calls)
74  * @callchain: add callchain to 'instructions' events
75  * @thread_stack: feed branches to the thread_stack
76  * @last_branch: add branch context to 'instruction' events
77  * @callchain_sz: maximum callchain size
78  * @last_branch_sz: branch context size
79  * @period: 'instructions' events period
80  * @period_type: 'instructions' events period type
81  * @initial_skip: skip N events at the beginning.
82  * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
83  * @ptime_range: time intervals to trace or NULL
84  * @range_num: number of time intervals to trace
85  */
86 struct itrace_synth_opts {
87 	bool			set;
88 	bool			default_no_sample;
89 	bool			inject;
90 	bool			instructions;
91 	bool			branches;
92 	bool			transactions;
93 	bool			ptwrites;
94 	bool			pwr_events;
95 	bool			other_events;
96 	bool			errors;
97 	bool			dont_decode;
98 	bool			log;
99 	bool			calls;
100 	bool			returns;
101 	bool			callchain;
102 	bool			thread_stack;
103 	bool			last_branch;
104 	unsigned int		callchain_sz;
105 	unsigned int		last_branch_sz;
106 	unsigned long long	period;
107 	enum itrace_period_type	period_type;
108 	unsigned long		initial_skip;
109 	unsigned long		*cpu_bitmap;
110 	struct perf_time_interval *ptime_range;
111 	int			range_num;
112 };
113 
114 /**
115  * struct auxtrace_index_entry - indexes a AUX area tracing event within a
116  *                               perf.data file.
117  * @file_offset: offset within the perf.data file
118  * @sz: size of the event
119  */
120 struct auxtrace_index_entry {
121 	u64			file_offset;
122 	u64			sz;
123 };
124 
125 #define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
126 
127 /**
128  * struct auxtrace_index - index of AUX area tracing events within a perf.data
129  *                         file.
130  * @list: linking a number of arrays of entries
131  * @nr: number of entries
132  * @entries: array of entries
133  */
134 struct auxtrace_index {
135 	struct list_head	list;
136 	size_t			nr;
137 	struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
138 };
139 
140 /**
141  * struct auxtrace - session callbacks to allow AUX area data decoding.
142  * @process_event: lets the decoder see all session events
143  * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event
144  * @flush_events: process any remaining data
145  * @free_events: free resources associated with event processing
146  * @free: free resources associated with the session
147  */
148 struct auxtrace {
149 	int (*process_event)(struct perf_session *session,
150 			     union perf_event *event,
151 			     struct perf_sample *sample,
152 			     struct perf_tool *tool);
153 	int (*process_auxtrace_event)(struct perf_session *session,
154 				      union perf_event *event,
155 				      struct perf_tool *tool);
156 	int (*flush_events)(struct perf_session *session,
157 			    struct perf_tool *tool);
158 	void (*free_events)(struct perf_session *session);
159 	void (*free)(struct perf_session *session);
160 };
161 
162 /**
163  * struct auxtrace_buffer - a buffer containing AUX area tracing data.
164  * @list: buffers are queued in a list held by struct auxtrace_queue
165  * @size: size of the buffer in bytes
166  * @pid: in per-thread mode, the pid this buffer is associated with
167  * @tid: in per-thread mode, the tid this buffer is associated with
168  * @cpu: in per-cpu mode, the cpu this buffer is associated with
169  * @data: actual buffer data (can be null if the data has not been loaded)
170  * @data_offset: file offset at which the buffer can be read
171  * @mmap_addr: mmap address at which the buffer can be read
172  * @mmap_size: size of the mmap at @mmap_addr
173  * @data_needs_freeing: @data was malloc'd so free it when it is no longer
174  *                      needed
175  * @consecutive: the original data was split up and this buffer is consecutive
176  *               to the previous buffer
177  * @offset: offset as determined by aux_head / aux_tail members of struct
178  *          perf_event_mmap_page
179  * @reference: an implementation-specific reference determined when the data is
180  *             recorded
181  * @buffer_nr: used to number each buffer
182  * @use_size: implementation actually only uses this number of bytes
183  * @use_data: implementation actually only uses data starting at this address
184  */
185 struct auxtrace_buffer {
186 	struct list_head	list;
187 	size_t			size;
188 	pid_t			pid;
189 	pid_t			tid;
190 	int			cpu;
191 	void			*data;
192 	off_t			data_offset;
193 	void			*mmap_addr;
194 	size_t			mmap_size;
195 	bool			data_needs_freeing;
196 	bool			consecutive;
197 	u64			offset;
198 	u64			reference;
199 	u64			buffer_nr;
200 	size_t			use_size;
201 	void			*use_data;
202 };
203 
204 /**
205  * struct auxtrace_queue - a queue of AUX area tracing data buffers.
206  * @head: head of buffer list
207  * @tid: in per-thread mode, the tid this queue is associated with
208  * @cpu: in per-cpu mode, the cpu this queue is associated with
209  * @set: %true once this queue has been dedicated to a specific thread or cpu
210  * @priv: implementation-specific data
211  */
212 struct auxtrace_queue {
213 	struct list_head	head;
214 	pid_t			tid;
215 	int			cpu;
216 	bool			set;
217 	void			*priv;
218 };
219 
220 /**
221  * struct auxtrace_queues - an array of AUX area tracing queues.
222  * @queue_array: array of queues
223  * @nr_queues: number of queues
224  * @new_data: set whenever new data is queued
225  * @populated: queues have been fully populated using the auxtrace_index
226  * @next_buffer_nr: used to number each buffer
227  */
228 struct auxtrace_queues {
229 	struct auxtrace_queue	*queue_array;
230 	unsigned int		nr_queues;
231 	bool			new_data;
232 	bool			populated;
233 	u64			next_buffer_nr;
234 };
235 
236 /**
237  * struct auxtrace_heap_item - element of struct auxtrace_heap.
238  * @queue_nr: queue number
239  * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected
240  *           to be a timestamp
241  */
242 struct auxtrace_heap_item {
243 	unsigned int		queue_nr;
244 	u64			ordinal;
245 };
246 
247 /**
248  * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues.
249  * @heap_array: the heap
250  * @heap_cnt: the number of elements in the heap
251  * @heap_sz: maximum number of elements (grows as needed)
252  */
253 struct auxtrace_heap {
254 	struct auxtrace_heap_item	*heap_array;
255 	unsigned int		heap_cnt;
256 	unsigned int		heap_sz;
257 };
258 
259 /**
260  * struct auxtrace_mmap - records an mmap of the auxtrace buffer.
261  * @base: address of mapped area
262  * @userpg: pointer to buffer's perf_event_mmap_page
263  * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
264  * @len: size of mapped area
265  * @prev: previous aux_head
266  * @idx: index of this mmap
267  * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
268  *       mmap) otherwise %0
269  * @cpu: cpu number for a per-cpu mmap otherwise %-1
270  */
271 struct auxtrace_mmap {
272 	void		*base;
273 	void		*userpg;
274 	size_t		mask;
275 	size_t		len;
276 	u64		prev;
277 	int		idx;
278 	pid_t		tid;
279 	int		cpu;
280 };
281 
282 /**
283  * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap.
284  * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
285  * @offset: file offset of mapped area
286  * @len: size of mapped area
287  * @prot: mmap memory protection
288  * @idx: index of this mmap
289  * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
290  *       mmap) otherwise %0
291  * @cpu: cpu number for a per-cpu mmap otherwise %-1
292  */
293 struct auxtrace_mmap_params {
294 	size_t		mask;
295 	off_t		offset;
296 	size_t		len;
297 	int		prot;
298 	int		idx;
299 	pid_t		tid;
300 	int		cpu;
301 };
302 
303 /**
304  * struct auxtrace_record - callbacks for recording AUX area data.
305  * @recording_options: validate and process recording options
306  * @info_priv_size: return the size of the private data in auxtrace_info_event
307  * @info_fill: fill-in the private data in auxtrace_info_event
308  * @free: free this auxtrace record structure
309  * @snapshot_start: starting a snapshot
310  * @snapshot_finish: finishing a snapshot
311  * @find_snapshot: find data to snapshot within auxtrace mmap
312  * @parse_snapshot_options: parse snapshot options
313  * @reference: provide a 64-bit reference number for auxtrace_event
314  * @read_finish: called after reading from an auxtrace mmap
315  * @alignment: alignment (if any) for AUX area data
316  */
317 struct auxtrace_record {
318 	int (*recording_options)(struct auxtrace_record *itr,
319 				 struct evlist *evlist,
320 				 struct record_opts *opts);
321 	size_t (*info_priv_size)(struct auxtrace_record *itr,
322 				 struct evlist *evlist);
323 	int (*info_fill)(struct auxtrace_record *itr,
324 			 struct perf_session *session,
325 			 struct perf_record_auxtrace_info *auxtrace_info,
326 			 size_t priv_size);
327 	void (*free)(struct auxtrace_record *itr);
328 	int (*snapshot_start)(struct auxtrace_record *itr);
329 	int (*snapshot_finish)(struct auxtrace_record *itr);
330 	int (*find_snapshot)(struct auxtrace_record *itr, int idx,
331 			     struct auxtrace_mmap *mm, unsigned char *data,
332 			     u64 *head, u64 *old);
333 	int (*parse_snapshot_options)(struct auxtrace_record *itr,
334 				      struct record_opts *opts,
335 				      const char *str);
336 	u64 (*reference)(struct auxtrace_record *itr);
337 	int (*read_finish)(struct auxtrace_record *itr, int idx);
338 	unsigned int alignment;
339 };
340 
341 /**
342  * struct addr_filter - address filter.
343  * @list: list node
344  * @range: true if it is a range filter
345  * @start: true if action is 'filter' or 'start'
346  * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted
347  *          to 'stop')
348  * @sym_from: symbol name for the filter address
349  * @sym_to: symbol name that determines the filter size
350  * @sym_from_idx: selects n'th from symbols with the same name (0 means global
351  *                and less than 0 means symbol must be unique)
352  * @sym_to_idx: same as @sym_from_idx but for @sym_to
353  * @addr: filter address
354  * @size: filter region size (for range filters)
355  * @filename: DSO file name or NULL for the kernel
356  * @str: allocated string that contains the other string members
357  */
358 struct addr_filter {
359 	struct list_head	list;
360 	bool			range;
361 	bool			start;
362 	const char		*action;
363 	const char		*sym_from;
364 	const char		*sym_to;
365 	int			sym_from_idx;
366 	int			sym_to_idx;
367 	u64			addr;
368 	u64			size;
369 	const char		*filename;
370 	char			*str;
371 };
372 
373 /**
374  * struct addr_filters - list of address filters.
375  * @head: list of address filters
376  * @cnt: number of address filters
377  */
378 struct addr_filters {
379 	struct list_head	head;
380 	int			cnt;
381 };
382 
383 struct auxtrace_cache;
384 
385 #ifdef HAVE_AUXTRACE_SUPPORT
386 
387 /*
388  * In snapshot mode the mmapped page is read-only which makes using
389  * __sync_val_compare_and_swap() problematic.  However, snapshot mode expects
390  * the buffer is not updated while the snapshot is made (e.g. Intel PT disables
391  * the event) so there is not a race anyway.
392  */
393 static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
394 {
395 	struct perf_event_mmap_page *pc = mm->userpg;
396 	u64 head = READ_ONCE(pc->aux_head);
397 
398 	/* Ensure all reads are done after we read the head */
399 	rmb();
400 	return head;
401 }
402 
403 static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
404 {
405 	struct perf_event_mmap_page *pc = mm->userpg;
406 #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
407 	u64 head = READ_ONCE(pc->aux_head);
408 #else
409 	u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0);
410 #endif
411 
412 	/* Ensure all reads are done after we read the head */
413 	rmb();
414 	return head;
415 }
416 
417 static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
418 {
419 	struct perf_event_mmap_page *pc = mm->userpg;
420 #if BITS_PER_LONG != 64 && defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
421 	u64 old_tail;
422 #endif
423 
424 	/* Ensure all reads are done before we write the tail out */
425 	mb();
426 #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
427 	pc->aux_tail = tail;
428 #else
429 	do {
430 		old_tail = __sync_val_compare_and_swap(&pc->aux_tail, 0, 0);
431 	} while (!__sync_bool_compare_and_swap(&pc->aux_tail, old_tail, tail));
432 #endif
433 }
434 
435 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
436 			struct auxtrace_mmap_params *mp,
437 			void *userpg, int fd);
438 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
439 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
440 				off_t auxtrace_offset,
441 				unsigned int auxtrace_pages,
442 				bool auxtrace_overwrite);
443 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
444 				   struct evlist *evlist, int idx,
445 				   bool per_cpu);
446 
447 typedef int (*process_auxtrace_t)(struct perf_tool *tool,
448 				  struct mmap *map,
449 				  union perf_event *event, void *data1,
450 				  size_t len1, void *data2, size_t len2);
451 
452 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
453 			struct perf_tool *tool, process_auxtrace_t fn);
454 
455 int auxtrace_mmap__read_snapshot(struct mmap *map,
456 				 struct auxtrace_record *itr,
457 				 struct perf_tool *tool, process_auxtrace_t fn,
458 				 size_t snapshot_size);
459 
460 int auxtrace_queues__init(struct auxtrace_queues *queues);
461 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
462 			       struct perf_session *session,
463 			       union perf_event *event, off_t data_offset,
464 			       struct auxtrace_buffer **buffer_ptr);
465 void auxtrace_queues__free(struct auxtrace_queues *queues);
466 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
467 				   struct perf_session *session);
468 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
469 					      struct auxtrace_buffer *buffer);
470 void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd);
471 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
472 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
473 void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
474 
475 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
476 		       u64 ordinal);
477 void auxtrace_heap__pop(struct auxtrace_heap *heap);
478 void auxtrace_heap__free(struct auxtrace_heap *heap);
479 
480 struct auxtrace_cache_entry {
481 	struct hlist_node hash;
482 	u32 key;
483 };
484 
485 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
486 					   unsigned int limit_percent);
487 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache);
488 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
489 void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
490 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
491 			struct auxtrace_cache_entry *entry);
492 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key);
493 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
494 
495 struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
496 					      int *err);
497 
498 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
499 				    struct record_opts *opts,
500 				    const char *str);
501 int auxtrace_record__options(struct auxtrace_record *itr,
502 			     struct evlist *evlist,
503 			     struct record_opts *opts);
504 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
505 				       struct evlist *evlist);
506 int auxtrace_record__info_fill(struct auxtrace_record *itr,
507 			       struct perf_session *session,
508 			       struct perf_record_auxtrace_info *auxtrace_info,
509 			       size_t priv_size);
510 void auxtrace_record__free(struct auxtrace_record *itr);
511 int auxtrace_record__snapshot_start(struct auxtrace_record *itr);
512 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit);
513 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
514 				   struct auxtrace_mmap *mm,
515 				   unsigned char *data, u64 *head, u64 *old);
516 u64 auxtrace_record__reference(struct auxtrace_record *itr);
517 
518 int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
519 				   off_t file_offset);
520 int auxtrace_index__write(int fd, struct list_head *head);
521 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
522 			    bool needs_swap);
523 void auxtrace_index__free(struct list_head *head);
524 
525 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
526 			  int code, int cpu, pid_t pid, pid_t tid, u64 ip,
527 			  const char *msg, u64 timestamp);
528 
529 int perf_event__process_auxtrace_info(struct perf_session *session,
530 				      union perf_event *event);
531 s64 perf_event__process_auxtrace(struct perf_session *session,
532 				 union perf_event *event);
533 int perf_event__process_auxtrace_error(struct perf_session *session,
534 				       union perf_event *event);
535 int itrace_parse_synth_opts(const struct option *opt, const char *str,
536 			    int unset);
537 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
538 				    bool no_sample);
539 
540 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
541 void perf_session__auxtrace_error_inc(struct perf_session *session,
542 				      union perf_event *event);
543 void events_stats__auxtrace_error_warn(const struct events_stats *stats);
544 
545 void addr_filters__init(struct addr_filters *filts);
546 void addr_filters__exit(struct addr_filters *filts);
547 int addr_filters__parse_bare_filter(struct addr_filters *filts,
548 				    const char *filter);
549 int auxtrace_parse_filters(struct evlist *evlist);
550 
551 int auxtrace__process_event(struct perf_session *session, union perf_event *event,
552 			    struct perf_sample *sample, struct perf_tool *tool);
553 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool);
554 void auxtrace__free_events(struct perf_session *session);
555 void auxtrace__free(struct perf_session *session);
556 
557 #define ITRACE_HELP \
558 "				i:	    		synthesize instructions events\n"		\
559 "				b:	    		synthesize branches events\n"		\
560 "				c:	    		synthesize branches events (calls only)\n"	\
561 "				r:	    		synthesize branches events (returns only)\n" \
562 "				x:	    		synthesize transactions events\n"		\
563 "				w:	    		synthesize ptwrite events\n"		\
564 "				p:	    		synthesize power events\n"			\
565 "				e:	    		synthesize error events\n"			\
566 "				d:	    		create a debug log\n"			\
567 "				g[len]:     		synthesize a call chain (use with i or x)\n" \
568 "				l[len]:     		synthesize last branch entries (use with i or x)\n" \
569 "				sNUMBER:    		skip initial number of events\n"		\
570 "				PERIOD[ns|us|ms|i|t]:   specify period to sample stream\n" \
571 "				concatenate multiple options. Default is ibxwpe or cewp\n"
572 
573 static inline
574 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts,
575 				       struct perf_time_interval *ptime_range,
576 				       int range_num)
577 {
578 	opts->ptime_range = ptime_range;
579 	opts->range_num = range_num;
580 }
581 
582 static inline
583 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts)
584 {
585 	opts->ptime_range = NULL;
586 	opts->range_num = 0;
587 }
588 
589 #else
590 #include "debug.h"
591 
592 static inline struct auxtrace_record *
593 auxtrace_record__init(struct evlist *evlist __maybe_unused,
594 		      int *err)
595 {
596 	*err = 0;
597 	return NULL;
598 }
599 
600 static inline
601 void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
602 {
603 }
604 
605 static inline
606 int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
607 			     struct evlist *evlist __maybe_unused,
608 			     struct record_opts *opts __maybe_unused)
609 {
610 	return 0;
611 }
612 
613 #define perf_event__process_auxtrace_info		0
614 #define perf_event__process_auxtrace			0
615 #define perf_event__process_auxtrace_error		0
616 
617 static inline
618 void perf_session__auxtrace_error_inc(struct perf_session *session
619 				      __maybe_unused,
620 				      union perf_event *event
621 				      __maybe_unused)
622 {
623 }
624 
625 static inline
626 void events_stats__auxtrace_error_warn(const struct events_stats *stats
627 				       __maybe_unused)
628 {
629 }
630 
631 static inline
632 int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
633 			    const char *str __maybe_unused,
634 			    int unset __maybe_unused)
635 {
636 	pr_err("AUX area tracing not supported\n");
637 	return -EINVAL;
638 }
639 
640 static inline
641 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
642 				    struct record_opts *opts __maybe_unused,
643 				    const char *str)
644 {
645 	if (!str)
646 		return 0;
647 	pr_err("AUX area tracing not supported\n");
648 	return -EINVAL;
649 }
650 
651 static inline
652 int auxtrace__process_event(struct perf_session *session __maybe_unused,
653 			    union perf_event *event __maybe_unused,
654 			    struct perf_sample *sample __maybe_unused,
655 			    struct perf_tool *tool __maybe_unused)
656 {
657 	return 0;
658 }
659 
660 static inline
661 int auxtrace__flush_events(struct perf_session *session __maybe_unused,
662 			   struct perf_tool *tool __maybe_unused)
663 {
664 	return 0;
665 }
666 
667 static inline
668 void auxtrace__free_events(struct perf_session *session __maybe_unused)
669 {
670 }
671 
672 static inline
673 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
674 {
675 }
676 
677 static inline
678 void auxtrace__free(struct perf_session *session __maybe_unused)
679 {
680 }
681 
682 static inline
683 int auxtrace_index__write(int fd __maybe_unused,
684 			  struct list_head *head __maybe_unused)
685 {
686 	return -EINVAL;
687 }
688 
689 static inline
690 int auxtrace_index__process(int fd __maybe_unused,
691 			    u64 size __maybe_unused,
692 			    struct perf_session *session __maybe_unused,
693 			    bool needs_swap __maybe_unused)
694 {
695 	return -EINVAL;
696 }
697 
698 static inline
699 void auxtrace_index__free(struct list_head *head __maybe_unused)
700 {
701 }
702 
703 static inline
704 int auxtrace_parse_filters(struct evlist *evlist __maybe_unused)
705 {
706 	return 0;
707 }
708 
709 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
710 			struct auxtrace_mmap_params *mp,
711 			void *userpg, int fd);
712 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
713 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
714 				off_t auxtrace_offset,
715 				unsigned int auxtrace_pages,
716 				bool auxtrace_overwrite);
717 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
718 				   struct evlist *evlist, int idx,
719 				   bool per_cpu);
720 
721 #define ITRACE_HELP ""
722 
723 static inline
724 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts
725 				       __maybe_unused,
726 				       struct perf_time_interval *ptime_range
727 				       __maybe_unused,
728 				       int range_num __maybe_unused)
729 {
730 }
731 
732 static inline
733 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts
734 					 __maybe_unused)
735 {
736 }
737 
738 #endif
739 
740 #endif
741