xref: /linux/tools/perf/util/evlist.h (revision 64af4e0da419ef9e9db0d34a3b5836adbf90a5e8)
1 #ifndef __PERF_EVLIST_H
2 #define __PERF_EVLIST_H 1
3 
4 #include <linux/atomic.h>
5 #include <linux/list.h>
6 #include <api/fd/array.h>
7 #include <stdio.h>
8 #include "../perf.h"
9 #include "event.h"
10 #include "evsel.h"
11 #include "util.h"
12 #include "auxtrace.h"
13 #include <unistd.h>
14 
15 struct pollfd;
16 struct thread_map;
17 struct cpu_map;
18 struct record_opts;
19 
20 #define PERF_EVLIST__HLIST_BITS 8
21 #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
22 
23 /**
24  * struct perf_mmap - perf's ring buffer mmap details
25  *
26  * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
27  */
28 struct perf_mmap {
29 	void		 *base;
30 	int		 mask;
31 	atomic_t	 refcnt;
32 	u64		 prev;
33 	struct auxtrace_mmap auxtrace_mmap;
34 	char		 event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
35 };
36 
37 struct perf_evlist {
38 	struct list_head entries;
39 	struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
40 	int		 nr_entries;
41 	int		 nr_groups;
42 	int		 nr_mmaps;
43 	bool		 overwrite;
44 	bool		 enabled;
45 	bool		 has_user_cpus;
46 	size_t		 mmap_len;
47 	int		 id_pos;
48 	int		 is_pos;
49 	u64		 combined_sample_type;
50 	struct {
51 		int	cork_fd;
52 		pid_t	pid;
53 	} workload;
54 	struct fdarray	 pollfd;
55 	struct perf_mmap *mmap;
56 	struct thread_map *threads;
57 	struct cpu_map	  *cpus;
58 	struct perf_evsel *selected;
59 	struct events_stats stats;
60 	struct perf_env	*env;
61 };
62 
63 struct perf_evsel_str_handler {
64 	const char *name;
65 	void	   *handler;
66 };
67 
68 struct perf_evlist *perf_evlist__new(void);
69 struct perf_evlist *perf_evlist__new_default(void);
70 struct perf_evlist *perf_evlist__new_dummy(void);
71 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
72 		       struct thread_map *threads);
73 void perf_evlist__exit(struct perf_evlist *evlist);
74 void perf_evlist__delete(struct perf_evlist *evlist);
75 
76 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
77 void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel);
78 int perf_evlist__add_default(struct perf_evlist *evlist);
79 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
80 				     struct perf_event_attr *attrs, size_t nr_attrs);
81 
82 #define perf_evlist__add_default_attrs(evlist, array) \
83 	__perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
84 
85 int perf_evlist__add_dummy(struct perf_evlist *evlist);
86 
87 int perf_evlist__add_newtp(struct perf_evlist *evlist,
88 			   const char *sys, const char *name, void *handler);
89 
90 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter);
91 int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid);
92 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids);
93 
94 struct perf_evsel *
95 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
96 
97 struct perf_evsel *
98 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
99 				     const char *name);
100 
101 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
102 			 int cpu, int thread, u64 id);
103 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
104 			   struct perf_evsel *evsel,
105 			   int cpu, int thread, int fd);
106 
107 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
108 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
109 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask);
110 
111 int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
112 
113 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
114 struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
115 						u64 id);
116 
117 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
118 
119 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
120 
121 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
122 
123 int perf_evlist__open(struct perf_evlist *evlist);
124 void perf_evlist__close(struct perf_evlist *evlist);
125 
126 void perf_evlist__set_id_pos(struct perf_evlist *evlist);
127 bool perf_can_sample_identifier(void);
128 bool perf_can_record_switch_events(void);
129 bool perf_can_record_cpu_wide(void);
130 void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts);
131 int record_opts__config(struct record_opts *opts);
132 
133 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
134 				  struct target *target,
135 				  const char *argv[], bool pipe_output,
136 				  void (*exec_error)(int signo, siginfo_t *info,
137 						     void *ucontext));
138 int perf_evlist__start_workload(struct perf_evlist *evlist);
139 
140 struct option;
141 
142 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str);
143 int perf_evlist__parse_mmap_pages(const struct option *opt,
144 				  const char *str,
145 				  int unset);
146 
147 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
148 			 bool overwrite, unsigned int auxtrace_pages,
149 			 bool auxtrace_overwrite);
150 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
151 		      bool overwrite);
152 void perf_evlist__munmap(struct perf_evlist *evlist);
153 
154 void perf_evlist__disable(struct perf_evlist *evlist);
155 void perf_evlist__enable(struct perf_evlist *evlist);
156 void perf_evlist__toggle_enable(struct perf_evlist *evlist);
157 
158 int perf_evlist__disable_event(struct perf_evlist *evlist,
159 			       struct perf_evsel *evsel);
160 int perf_evlist__enable_event(struct perf_evlist *evlist,
161 			      struct perf_evsel *evsel);
162 int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
163 				  struct perf_evsel *evsel, int idx);
164 
165 void perf_evlist__set_selected(struct perf_evlist *evlist,
166 			       struct perf_evsel *evsel);
167 
168 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
169 			   struct thread_map *threads);
170 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
171 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel);
172 
173 void __perf_evlist__set_leader(struct list_head *list);
174 void perf_evlist__set_leader(struct perf_evlist *evlist);
175 
176 u64 perf_evlist__read_format(struct perf_evlist *evlist);
177 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist);
178 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist);
179 u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist);
180 bool perf_evlist__sample_id_all(struct perf_evlist *evlist);
181 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
182 
183 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
184 			      struct perf_sample *sample);
185 
186 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
187 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
188 bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
189 
190 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
191 				   struct list_head *list);
192 
193 static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
194 {
195 	return list_entry(evlist->entries.next, struct perf_evsel, node);
196 }
197 
198 static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
199 {
200 	return list_entry(evlist->entries.prev, struct perf_evsel, node);
201 }
202 
203 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
204 
205 int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
206 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
207 
208 static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
209 {
210 	struct perf_event_mmap_page *pc = mm->base;
211 	u64 head = ACCESS_ONCE(pc->data_head);
212 	rmb();
213 	return head;
214 }
215 
216 static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
217 {
218 	struct perf_event_mmap_page *pc = md->base;
219 
220 	/*
221 	 * ensure all reads are done before we write the tail out.
222 	 */
223 	mb();
224 	pc->data_tail = tail;
225 }
226 
227 bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str);
228 void perf_evlist__to_front(struct perf_evlist *evlist,
229 			   struct perf_evsel *move_evsel);
230 
231 /**
232  * __evlist__for_each - iterate thru all the evsels
233  * @list: list_head instance to iterate
234  * @evsel: struct evsel iterator
235  */
236 #define __evlist__for_each(list, evsel) \
237         list_for_each_entry(evsel, list, node)
238 
239 /**
240  * evlist__for_each - iterate thru all the evsels
241  * @evlist: evlist instance to iterate
242  * @evsel: struct evsel iterator
243  */
244 #define evlist__for_each(evlist, evsel) \
245 	__evlist__for_each(&(evlist)->entries, evsel)
246 
247 /**
248  * __evlist__for_each_continue - continue iteration thru all the evsels
249  * @list: list_head instance to iterate
250  * @evsel: struct evsel iterator
251  */
252 #define __evlist__for_each_continue(list, evsel) \
253         list_for_each_entry_continue(evsel, list, node)
254 
255 /**
256  * evlist__for_each_continue - continue iteration thru all the evsels
257  * @evlist: evlist instance to iterate
258  * @evsel: struct evsel iterator
259  */
260 #define evlist__for_each_continue(evlist, evsel) \
261 	__evlist__for_each_continue(&(evlist)->entries, evsel)
262 
263 /**
264  * __evlist__for_each_reverse - iterate thru all the evsels in reverse order
265  * @list: list_head instance to iterate
266  * @evsel: struct evsel iterator
267  */
268 #define __evlist__for_each_reverse(list, evsel) \
269         list_for_each_entry_reverse(evsel, list, node)
270 
271 /**
272  * evlist__for_each_reverse - iterate thru all the evsels in reverse order
273  * @evlist: evlist instance to iterate
274  * @evsel: struct evsel iterator
275  */
276 #define evlist__for_each_reverse(evlist, evsel) \
277 	__evlist__for_each_reverse(&(evlist)->entries, evsel)
278 
279 /**
280  * __evlist__for_each_safe - safely iterate thru all the evsels
281  * @list: list_head instance to iterate
282  * @tmp: struct evsel temp iterator
283  * @evsel: struct evsel iterator
284  */
285 #define __evlist__for_each_safe(list, tmp, evsel) \
286         list_for_each_entry_safe(evsel, tmp, list, node)
287 
288 /**
289  * evlist__for_each_safe - safely iterate thru all the evsels
290  * @evlist: evlist instance to iterate
291  * @evsel: struct evsel iterator
292  * @tmp: struct evsel temp iterator
293  */
294 #define evlist__for_each_safe(evlist, tmp, evsel) \
295 	__evlist__for_each_safe(&(evlist)->entries, tmp, evsel)
296 
297 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
298 				     struct perf_evsel *tracking_evsel);
299 
300 void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
301 #endif /* __PERF_EVLIST_H */
302