xref: /linux/tools/perf/util/auxtrace.h (revision 75bf465f0bc33e9b776a46d6a1b9b990f5fb7c37)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * auxtrace.h: AUX area trace support
4  * Copyright (c) 2013-2015, Intel Corporation.
5  */
6 
7 #ifndef __PERF_AUXTRACE_H
8 #define __PERF_AUXTRACE_H
9 
10 #include <sys/types.h>
11 #include <errno.h>
12 #include <stdbool.h>
13 #include <stddef.h>
14 #include <linux/list.h>
15 #include <linux/perf_event.h>
16 #include <linux/types.h>
17 #include <asm/bitsperlong.h>
18 
19 #include "../perf.h"
20 #include "event.h"
21 #include "session.h"
22 #include "debug.h"
23 
24 union perf_event;
25 struct perf_session;
26 struct perf_evlist;
27 struct perf_tool;
28 struct perf_mmap;
29 struct option;
30 struct record_opts;
31 struct auxtrace_info_event;
32 struct events_stats;
33 
34 /* Auxtrace records must have the same alignment as perf event records */
35 #define PERF_AUXTRACE_RECORD_ALIGNMENT 8
36 
37 enum auxtrace_type {
38 	PERF_AUXTRACE_UNKNOWN,
39 	PERF_AUXTRACE_INTEL_PT,
40 	PERF_AUXTRACE_INTEL_BTS,
41 	PERF_AUXTRACE_CS_ETM,
42 	PERF_AUXTRACE_ARM_SPE,
43 	PERF_AUXTRACE_S390_CPUMSF,
44 };
45 
46 enum itrace_period_type {
47 	PERF_ITRACE_PERIOD_INSTRUCTIONS,
48 	PERF_ITRACE_PERIOD_TICKS,
49 	PERF_ITRACE_PERIOD_NANOSECS,
50 };
51 
52 /**
53  * struct itrace_synth_opts - AUX area tracing synthesis options.
54  * @set: indicates whether or not options have been set
55  * @default_no_sample: Default to no sampling.
56  * @inject: indicates the event (not just the sample) must be fully synthesized
57  *          because 'perf inject' will write it out
58  * @instructions: whether to synthesize 'instructions' events
59  * @branches: whether to synthesize 'branches' events
60  * @transactions: whether to synthesize events for transactions
61  * @ptwrites: whether to synthesize events for ptwrites
62  * @pwr_events: whether to synthesize power events
63  * @errors: whether to synthesize decoder error events
64  * @dont_decode: whether to skip decoding entirely
65  * @log: write a decoding log
66  * @calls: limit branch samples to calls (can be combined with @returns)
67  * @returns: limit branch samples to returns (can be combined with @calls)
68  * @callchain: add callchain to 'instructions' events
69  * @thread_stack: feed branches to the thread_stack
70  * @last_branch: add branch context to 'instruction' events
71  * @callchain_sz: maximum callchain size
72  * @last_branch_sz: branch context size
73  * @period: 'instructions' events period
74  * @period_type: 'instructions' events period type
75  * @initial_skip: skip N events at the beginning.
76  * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
77  * @ptime_range: time intervals to trace or NULL
78  * @range_num: number of time intervals to trace
79  */
80 struct itrace_synth_opts {
81 	bool			set;
82 	bool			default_no_sample;
83 	bool			inject;
84 	bool			instructions;
85 	bool			branches;
86 	bool			transactions;
87 	bool			ptwrites;
88 	bool			pwr_events;
89 	bool			errors;
90 	bool			dont_decode;
91 	bool			log;
92 	bool			calls;
93 	bool			returns;
94 	bool			callchain;
95 	bool			thread_stack;
96 	bool			last_branch;
97 	unsigned int		callchain_sz;
98 	unsigned int		last_branch_sz;
99 	unsigned long long	period;
100 	enum itrace_period_type	period_type;
101 	unsigned long		initial_skip;
102 	unsigned long		*cpu_bitmap;
103 	struct perf_time_interval *ptime_range;
104 	int			range_num;
105 };
106 
107 /**
108  * struct auxtrace_index_entry - indexes a AUX area tracing event within a
109  *                               perf.data file.
110  * @file_offset: offset within the perf.data file
111  * @sz: size of the event
112  */
113 struct auxtrace_index_entry {
114 	u64			file_offset;
115 	u64			sz;
116 };
117 
118 #define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
119 
120 /**
121  * struct auxtrace_index - index of AUX area tracing events within a perf.data
122  *                         file.
123  * @list: linking a number of arrays of entries
124  * @nr: number of entries
125  * @entries: array of entries
126  */
127 struct auxtrace_index {
128 	struct list_head	list;
129 	size_t			nr;
130 	struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
131 };
132 
133 /**
134  * struct auxtrace - session callbacks to allow AUX area data decoding.
135  * @process_event: lets the decoder see all session events
136  * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event
137  * @flush_events: process any remaining data
138  * @free_events: free resources associated with event processing
139  * @free: free resources associated with the session
140  */
141 struct auxtrace {
142 	int (*process_event)(struct perf_session *session,
143 			     union perf_event *event,
144 			     struct perf_sample *sample,
145 			     struct perf_tool *tool);
146 	int (*process_auxtrace_event)(struct perf_session *session,
147 				      union perf_event *event,
148 				      struct perf_tool *tool);
149 	int (*flush_events)(struct perf_session *session,
150 			    struct perf_tool *tool);
151 	void (*free_events)(struct perf_session *session);
152 	void (*free)(struct perf_session *session);
153 };
154 
155 /**
156  * struct auxtrace_buffer - a buffer containing AUX area tracing data.
157  * @list: buffers are queued in a list held by struct auxtrace_queue
158  * @size: size of the buffer in bytes
159  * @pid: in per-thread mode, the pid this buffer is associated with
160  * @tid: in per-thread mode, the tid this buffer is associated with
161  * @cpu: in per-cpu mode, the cpu this buffer is associated with
162  * @data: actual buffer data (can be null if the data has not been loaded)
163  * @data_offset: file offset at which the buffer can be read
164  * @mmap_addr: mmap address at which the buffer can be read
165  * @mmap_size: size of the mmap at @mmap_addr
166  * @data_needs_freeing: @data was malloc'd so free it when it is no longer
167  *                      needed
168  * @consecutive: the original data was split up and this buffer is consecutive
169  *               to the previous buffer
170  * @offset: offset as determined by aux_head / aux_tail members of struct
171  *          perf_event_mmap_page
172  * @reference: an implementation-specific reference determined when the data is
173  *             recorded
174  * @buffer_nr: used to number each buffer
175  * @use_size: implementation actually only uses this number of bytes
176  * @use_data: implementation actually only uses data starting at this address
177  */
178 struct auxtrace_buffer {
179 	struct list_head	list;
180 	size_t			size;
181 	pid_t			pid;
182 	pid_t			tid;
183 	int			cpu;
184 	void			*data;
185 	off_t			data_offset;
186 	void			*mmap_addr;
187 	size_t			mmap_size;
188 	bool			data_needs_freeing;
189 	bool			consecutive;
190 	u64			offset;
191 	u64			reference;
192 	u64			buffer_nr;
193 	size_t			use_size;
194 	void			*use_data;
195 };
196 
197 /**
198  * struct auxtrace_queue - a queue of AUX area tracing data buffers.
199  * @head: head of buffer list
200  * @tid: in per-thread mode, the tid this queue is associated with
201  * @cpu: in per-cpu mode, the cpu this queue is associated with
202  * @set: %true once this queue has been dedicated to a specific thread or cpu
203  * @priv: implementation-specific data
204  */
205 struct auxtrace_queue {
206 	struct list_head	head;
207 	pid_t			tid;
208 	int			cpu;
209 	bool			set;
210 	void			*priv;
211 };
212 
213 /**
214  * struct auxtrace_queues - an array of AUX area tracing queues.
215  * @queue_array: array of queues
216  * @nr_queues: number of queues
217  * @new_data: set whenever new data is queued
218  * @populated: queues have been fully populated using the auxtrace_index
219  * @next_buffer_nr: used to number each buffer
220  */
221 struct auxtrace_queues {
222 	struct auxtrace_queue	*queue_array;
223 	unsigned int		nr_queues;
224 	bool			new_data;
225 	bool			populated;
226 	u64			next_buffer_nr;
227 };
228 
229 /**
230  * struct auxtrace_heap_item - element of struct auxtrace_heap.
231  * @queue_nr: queue number
232  * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected
233  *           to be a timestamp
234  */
235 struct auxtrace_heap_item {
236 	unsigned int		queue_nr;
237 	u64			ordinal;
238 };
239 
240 /**
241  * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues.
242  * @heap_array: the heap
243  * @heap_cnt: the number of elements in the heap
244  * @heap_sz: maximum number of elements (grows as needed)
245  */
246 struct auxtrace_heap {
247 	struct auxtrace_heap_item	*heap_array;
248 	unsigned int		heap_cnt;
249 	unsigned int		heap_sz;
250 };
251 
252 /**
253  * struct auxtrace_mmap - records an mmap of the auxtrace buffer.
254  * @base: address of mapped area
255  * @userpg: pointer to buffer's perf_event_mmap_page
256  * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
257  * @len: size of mapped area
258  * @prev: previous aux_head
259  * @idx: index of this mmap
260  * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
261  *       mmap) otherwise %0
262  * @cpu: cpu number for a per-cpu mmap otherwise %-1
263  */
264 struct auxtrace_mmap {
265 	void		*base;
266 	void		*userpg;
267 	size_t		mask;
268 	size_t		len;
269 	u64		prev;
270 	int		idx;
271 	pid_t		tid;
272 	int		cpu;
273 };
274 
275 /**
276  * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap.
277  * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
278  * @offset: file offset of mapped area
279  * @len: size of mapped area
280  * @prot: mmap memory protection
281  * @idx: index of this mmap
282  * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
283  *       mmap) otherwise %0
284  * @cpu: cpu number for a per-cpu mmap otherwise %-1
285  */
286 struct auxtrace_mmap_params {
287 	size_t		mask;
288 	off_t		offset;
289 	size_t		len;
290 	int		prot;
291 	int		idx;
292 	pid_t		tid;
293 	int		cpu;
294 };
295 
296 /**
297  * struct auxtrace_record - callbacks for recording AUX area data.
298  * @recording_options: validate and process recording options
299  * @info_priv_size: return the size of the private data in auxtrace_info_event
300  * @info_fill: fill-in the private data in auxtrace_info_event
301  * @free: free this auxtrace record structure
302  * @snapshot_start: starting a snapshot
303  * @snapshot_finish: finishing a snapshot
304  * @find_snapshot: find data to snapshot within auxtrace mmap
305  * @parse_snapshot_options: parse snapshot options
306  * @reference: provide a 64-bit reference number for auxtrace_event
307  * @read_finish: called after reading from an auxtrace mmap
308  * @alignment: alignment (if any) for AUX area data
309  */
310 struct auxtrace_record {
311 	int (*recording_options)(struct auxtrace_record *itr,
312 				 struct perf_evlist *evlist,
313 				 struct record_opts *opts);
314 	size_t (*info_priv_size)(struct auxtrace_record *itr,
315 				 struct perf_evlist *evlist);
316 	int (*info_fill)(struct auxtrace_record *itr,
317 			 struct perf_session *session,
318 			 struct auxtrace_info_event *auxtrace_info,
319 			 size_t priv_size);
320 	void (*free)(struct auxtrace_record *itr);
321 	int (*snapshot_start)(struct auxtrace_record *itr);
322 	int (*snapshot_finish)(struct auxtrace_record *itr);
323 	int (*find_snapshot)(struct auxtrace_record *itr, int idx,
324 			     struct auxtrace_mmap *mm, unsigned char *data,
325 			     u64 *head, u64 *old);
326 	int (*parse_snapshot_options)(struct auxtrace_record *itr,
327 				      struct record_opts *opts,
328 				      const char *str);
329 	u64 (*reference)(struct auxtrace_record *itr);
330 	int (*read_finish)(struct auxtrace_record *itr, int idx);
331 	unsigned int alignment;
332 };
333 
334 /**
335  * struct addr_filter - address filter.
336  * @list: list node
337  * @range: true if it is a range filter
338  * @start: true if action is 'filter' or 'start'
339  * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted
340  *          to 'stop')
341  * @sym_from: symbol name for the filter address
342  * @sym_to: symbol name that determines the filter size
343  * @sym_from_idx: selects n'th from symbols with the same name (0 means global
344  *                and less than 0 means symbol must be unique)
345  * @sym_to_idx: same as @sym_from_idx but for @sym_to
346  * @addr: filter address
347  * @size: filter region size (for range filters)
348  * @filename: DSO file name or NULL for the kernel
349  * @str: allocated string that contains the other string members
350  */
351 struct addr_filter {
352 	struct list_head	list;
353 	bool			range;
354 	bool			start;
355 	const char		*action;
356 	const char		*sym_from;
357 	const char		*sym_to;
358 	int			sym_from_idx;
359 	int			sym_to_idx;
360 	u64			addr;
361 	u64			size;
362 	const char		*filename;
363 	char			*str;
364 };
365 
366 /**
367  * struct addr_filters - list of address filters.
368  * @head: list of address filters
369  * @cnt: number of address filters
370  */
371 struct addr_filters {
372 	struct list_head	head;
373 	int			cnt;
374 };
375 
376 #ifdef HAVE_AUXTRACE_SUPPORT
377 
378 /*
379  * In snapshot mode the mmapped page is read-only which makes using
380  * __sync_val_compare_and_swap() problematic.  However, snapshot mode expects
381  * the buffer is not updated while the snapshot is made (e.g. Intel PT disables
382  * the event) so there is not a race anyway.
383  */
384 static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
385 {
386 	struct perf_event_mmap_page *pc = mm->userpg;
387 	u64 head = READ_ONCE(pc->aux_head);
388 
389 	/* Ensure all reads are done after we read the head */
390 	rmb();
391 	return head;
392 }
393 
394 static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
395 {
396 	struct perf_event_mmap_page *pc = mm->userpg;
397 #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
398 	u64 head = READ_ONCE(pc->aux_head);
399 #else
400 	u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0);
401 #endif
402 
403 	/* Ensure all reads are done after we read the head */
404 	rmb();
405 	return head;
406 }
407 
408 static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
409 {
410 	struct perf_event_mmap_page *pc = mm->userpg;
411 #if BITS_PER_LONG != 64 && defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
412 	u64 old_tail;
413 #endif
414 
415 	/* Ensure all reads are done before we write the tail out */
416 	mb();
417 #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
418 	pc->aux_tail = tail;
419 #else
420 	do {
421 		old_tail = __sync_val_compare_and_swap(&pc->aux_tail, 0, 0);
422 	} while (!__sync_bool_compare_and_swap(&pc->aux_tail, old_tail, tail));
423 #endif
424 }
425 
426 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
427 			struct auxtrace_mmap_params *mp,
428 			void *userpg, int fd);
429 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
430 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
431 				off_t auxtrace_offset,
432 				unsigned int auxtrace_pages,
433 				bool auxtrace_overwrite);
434 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
435 				   struct perf_evlist *evlist, int idx,
436 				   bool per_cpu);
437 
438 typedef int (*process_auxtrace_t)(struct perf_tool *tool,
439 				  struct perf_mmap *map,
440 				  union perf_event *event, void *data1,
441 				  size_t len1, void *data2, size_t len2);
442 
443 int auxtrace_mmap__read(struct perf_mmap *map, struct auxtrace_record *itr,
444 			struct perf_tool *tool, process_auxtrace_t fn);
445 
446 int auxtrace_mmap__read_snapshot(struct perf_mmap *map,
447 				 struct auxtrace_record *itr,
448 				 struct perf_tool *tool, process_auxtrace_t fn,
449 				 size_t snapshot_size);
450 
451 int auxtrace_queues__init(struct auxtrace_queues *queues);
452 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
453 			       struct perf_session *session,
454 			       union perf_event *event, off_t data_offset,
455 			       struct auxtrace_buffer **buffer_ptr);
456 void auxtrace_queues__free(struct auxtrace_queues *queues);
457 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
458 				   struct perf_session *session);
459 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
460 					      struct auxtrace_buffer *buffer);
461 void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd);
462 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
463 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
464 void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
465 
466 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
467 		       u64 ordinal);
468 void auxtrace_heap__pop(struct auxtrace_heap *heap);
469 void auxtrace_heap__free(struct auxtrace_heap *heap);
470 
471 struct auxtrace_cache_entry {
472 	struct hlist_node hash;
473 	u32 key;
474 };
475 
476 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
477 					   unsigned int limit_percent);
478 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache);
479 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
480 void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
481 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
482 			struct auxtrace_cache_entry *entry);
483 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
484 
485 struct auxtrace_record *auxtrace_record__init(struct perf_evlist *evlist,
486 					      int *err);
487 
488 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
489 				    struct record_opts *opts,
490 				    const char *str);
491 int auxtrace_record__options(struct auxtrace_record *itr,
492 			     struct perf_evlist *evlist,
493 			     struct record_opts *opts);
494 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
495 				       struct perf_evlist *evlist);
496 int auxtrace_record__info_fill(struct auxtrace_record *itr,
497 			       struct perf_session *session,
498 			       struct auxtrace_info_event *auxtrace_info,
499 			       size_t priv_size);
500 void auxtrace_record__free(struct auxtrace_record *itr);
501 int auxtrace_record__snapshot_start(struct auxtrace_record *itr);
502 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr);
503 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
504 				   struct auxtrace_mmap *mm,
505 				   unsigned char *data, u64 *head, u64 *old);
506 u64 auxtrace_record__reference(struct auxtrace_record *itr);
507 
508 int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
509 				   off_t file_offset);
510 int auxtrace_index__write(int fd, struct list_head *head);
511 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
512 			    bool needs_swap);
513 void auxtrace_index__free(struct list_head *head);
514 
515 void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
516 			  int code, int cpu, pid_t pid, pid_t tid, u64 ip,
517 			  const char *msg, u64 timestamp);
518 
519 int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
520 					 struct perf_tool *tool,
521 					 struct perf_session *session,
522 					 perf_event__handler_t process);
523 int perf_event__process_auxtrace_info(struct perf_session *session,
524 				      union perf_event *event);
525 s64 perf_event__process_auxtrace(struct perf_session *session,
526 				 union perf_event *event);
527 int perf_event__process_auxtrace_error(struct perf_session *session,
528 				       union perf_event *event);
529 int itrace_parse_synth_opts(const struct option *opt, const char *str,
530 			    int unset);
531 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
532 				    bool no_sample);
533 
534 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
535 void perf_session__auxtrace_error_inc(struct perf_session *session,
536 				      union perf_event *event);
537 void events_stats__auxtrace_error_warn(const struct events_stats *stats);
538 
539 void addr_filters__init(struct addr_filters *filts);
540 void addr_filters__exit(struct addr_filters *filts);
541 int addr_filters__parse_bare_filter(struct addr_filters *filts,
542 				    const char *filter);
543 int auxtrace_parse_filters(struct perf_evlist *evlist);
544 
545 static inline int auxtrace__process_event(struct perf_session *session,
546 					  union perf_event *event,
547 					  struct perf_sample *sample,
548 					  struct perf_tool *tool)
549 {
550 	if (!session->auxtrace)
551 		return 0;
552 
553 	return session->auxtrace->process_event(session, event, sample, tool);
554 }
555 
556 static inline int auxtrace__flush_events(struct perf_session *session,
557 					 struct perf_tool *tool)
558 {
559 	if (!session->auxtrace)
560 		return 0;
561 
562 	return session->auxtrace->flush_events(session, tool);
563 }
564 
565 static inline void auxtrace__free_events(struct perf_session *session)
566 {
567 	if (!session->auxtrace)
568 		return;
569 
570 	return session->auxtrace->free_events(session);
571 }
572 
573 static inline void auxtrace__free(struct perf_session *session)
574 {
575 	if (!session->auxtrace)
576 		return;
577 
578 	return session->auxtrace->free(session);
579 }
580 
581 #define ITRACE_HELP \
582 "				i:	    		synthesize instructions events\n"		\
583 "				b:	    		synthesize branches events\n"		\
584 "				c:	    		synthesize branches events (calls only)\n"	\
585 "				r:	    		synthesize branches events (returns only)\n" \
586 "				x:	    		synthesize transactions events\n"		\
587 "				w:	    		synthesize ptwrite events\n"		\
588 "				p:	    		synthesize power events\n"			\
589 "				e:	    		synthesize error events\n"			\
590 "				d:	    		create a debug log\n"			\
591 "				g[len]:     		synthesize a call chain (use with i or x)\n" \
592 "				l[len]:     		synthesize last branch entries (use with i or x)\n" \
593 "				sNUMBER:    		skip initial number of events\n"		\
594 "				PERIOD[ns|us|ms|i|t]:   specify period to sample stream\n" \
595 "				concatenate multiple options. Default is ibxwpe or cewp\n"
596 
597 static inline
598 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts,
599 				       struct perf_time_interval *ptime_range,
600 				       int range_num)
601 {
602 	opts->ptime_range = ptime_range;
603 	opts->range_num = range_num;
604 }
605 
606 static inline
607 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts)
608 {
609 	opts->ptime_range = NULL;
610 	opts->range_num = 0;
611 }
612 
613 #else
614 
615 static inline struct auxtrace_record *
616 auxtrace_record__init(struct perf_evlist *evlist __maybe_unused,
617 		      int *err)
618 {
619 	*err = 0;
620 	return NULL;
621 }
622 
623 static inline
624 void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
625 {
626 }
627 
628 static inline int
629 perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr __maybe_unused,
630 				     struct perf_tool *tool __maybe_unused,
631 				     struct perf_session *session __maybe_unused,
632 				     perf_event__handler_t process __maybe_unused)
633 {
634 	return -EINVAL;
635 }
636 
637 static inline
638 int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
639 			     struct perf_evlist *evlist __maybe_unused,
640 			     struct record_opts *opts __maybe_unused)
641 {
642 	return 0;
643 }
644 
645 #define perf_event__process_auxtrace_info		0
646 #define perf_event__process_auxtrace			0
647 #define perf_event__process_auxtrace_error		0
648 
649 static inline
650 void perf_session__auxtrace_error_inc(struct perf_session *session
651 				      __maybe_unused,
652 				      union perf_event *event
653 				      __maybe_unused)
654 {
655 }
656 
657 static inline
658 void events_stats__auxtrace_error_warn(const struct events_stats *stats
659 				       __maybe_unused)
660 {
661 }
662 
663 static inline
664 int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
665 			    const char *str __maybe_unused,
666 			    int unset __maybe_unused)
667 {
668 	pr_err("AUX area tracing not supported\n");
669 	return -EINVAL;
670 }
671 
672 static inline
673 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
674 				    struct record_opts *opts __maybe_unused,
675 				    const char *str)
676 {
677 	if (!str)
678 		return 0;
679 	pr_err("AUX area tracing not supported\n");
680 	return -EINVAL;
681 }
682 
683 static inline
684 int auxtrace__process_event(struct perf_session *session __maybe_unused,
685 			    union perf_event *event __maybe_unused,
686 			    struct perf_sample *sample __maybe_unused,
687 			    struct perf_tool *tool __maybe_unused)
688 {
689 	return 0;
690 }
691 
692 static inline
693 int auxtrace__flush_events(struct perf_session *session __maybe_unused,
694 			   struct perf_tool *tool __maybe_unused)
695 {
696 	return 0;
697 }
698 
699 static inline
700 void auxtrace__free_events(struct perf_session *session __maybe_unused)
701 {
702 }
703 
704 static inline
705 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
706 {
707 }
708 
709 static inline
710 void auxtrace__free(struct perf_session *session __maybe_unused)
711 {
712 }
713 
714 static inline
715 int auxtrace_index__write(int fd __maybe_unused,
716 			  struct list_head *head __maybe_unused)
717 {
718 	return -EINVAL;
719 }
720 
721 static inline
722 int auxtrace_index__process(int fd __maybe_unused,
723 			    u64 size __maybe_unused,
724 			    struct perf_session *session __maybe_unused,
725 			    bool needs_swap __maybe_unused)
726 {
727 	return -EINVAL;
728 }
729 
730 static inline
731 void auxtrace_index__free(struct list_head *head __maybe_unused)
732 {
733 }
734 
735 static inline
736 int auxtrace_parse_filters(struct perf_evlist *evlist __maybe_unused)
737 {
738 	return 0;
739 }
740 
741 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
742 			struct auxtrace_mmap_params *mp,
743 			void *userpg, int fd);
744 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
745 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
746 				off_t auxtrace_offset,
747 				unsigned int auxtrace_pages,
748 				bool auxtrace_overwrite);
749 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
750 				   struct perf_evlist *evlist, int idx,
751 				   bool per_cpu);
752 
753 #define ITRACE_HELP ""
754 
755 static inline
756 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts
757 				       __maybe_unused,
758 				       struct perf_time_interval *ptime_range
759 				       __maybe_unused,
760 				       int range_num __maybe_unused)
761 {
762 }
763 
764 static inline
765 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts
766 					 __maybe_unused)
767 {
768 }
769 
770 #endif
771 
772 #endif
773