xref: /linux/tools/perf/util/evlist.h (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __PERF_EVLIST_H
3 #define __PERF_EVLIST_H 1
4 
5 #include <linux/compiler.h>
6 #include <linux/kernel.h>
7 #include <linux/refcount.h>
8 #include <linux/list.h>
9 #include <api/fd/array.h>
10 #include <stdio.h>
11 #include "../perf.h"
12 #include "event.h"
13 #include "evsel.h"
14 #include "mmap.h"
15 #include "util.h"
16 #include <signal.h>
17 #include <unistd.h>
18 
19 struct pollfd;
20 struct thread_map;
21 struct cpu_map;
22 struct record_opts;
23 
24 #define PERF_EVLIST__HLIST_BITS 8
25 #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
26 
27 struct perf_evlist {
28 	struct list_head entries;
29 	struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
30 	int		 nr_entries;
31 	int		 nr_groups;
32 	int		 nr_mmaps;
33 	bool		 enabled;
34 	bool		 has_user_cpus;
35 	size_t		 mmap_len;
36 	int		 id_pos;
37 	int		 is_pos;
38 	u64		 combined_sample_type;
39 	enum bkw_mmap_state bkw_mmap_state;
40 	struct {
41 		int	cork_fd;
42 		pid_t	pid;
43 	} workload;
44 	struct fdarray	 pollfd;
45 	struct perf_mmap *mmap;
46 	struct perf_mmap *overwrite_mmap;
47 	struct thread_map *threads;
48 	struct cpu_map	  *cpus;
49 	struct perf_evsel *selected;
50 	struct events_stats stats;
51 	struct perf_env	*env;
52 	u64		first_sample_time;
53 	u64		last_sample_time;
54 };
55 
56 struct perf_evsel_str_handler {
57 	const char *name;
58 	void	   *handler;
59 };
60 
61 struct perf_evlist *perf_evlist__new(void);
62 struct perf_evlist *perf_evlist__new_default(void);
63 struct perf_evlist *perf_evlist__new_dummy(void);
64 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
65 		       struct thread_map *threads);
66 void perf_evlist__exit(struct perf_evlist *evlist);
67 void perf_evlist__delete(struct perf_evlist *evlist);
68 
69 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
70 void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel);
71 
72 int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise);
73 
74 static inline int perf_evlist__add_default(struct perf_evlist *evlist)
75 {
76 	return __perf_evlist__add_default(evlist, true);
77 }
78 
79 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
80 				     struct perf_event_attr *attrs, size_t nr_attrs);
81 
82 #define perf_evlist__add_default_attrs(evlist, array) \
83 	__perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
84 
85 int perf_evlist__add_dummy(struct perf_evlist *evlist);
86 
87 int perf_evlist__add_newtp(struct perf_evlist *evlist,
88 			   const char *sys, const char *name, void *handler);
89 
90 void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
91 				   enum perf_event_sample_format bit);
92 void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
93 				     enum perf_event_sample_format bit);
94 
95 #define perf_evlist__set_sample_bit(evlist, bit) \
96 	__perf_evlist__set_sample_bit(evlist, PERF_SAMPLE_##bit)
97 
98 #define perf_evlist__reset_sample_bit(evlist, bit) \
99 	__perf_evlist__reset_sample_bit(evlist, PERF_SAMPLE_##bit)
100 
101 int perf_evlist__set_tp_filter(struct perf_evlist *evlist, const char *filter);
102 int perf_evlist__set_tp_filter_pid(struct perf_evlist *evlist, pid_t pid);
103 int perf_evlist__set_tp_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids);
104 
105 struct perf_evsel *
106 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
107 
108 struct perf_evsel *
109 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
110 				     const char *name);
111 
112 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
113 			 int cpu, int thread, u64 id);
114 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
115 			   struct perf_evsel *evsel,
116 			   int cpu, int thread, int fd);
117 
118 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
119 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
120 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask);
121 
122 int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
123 
124 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
125 struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
126 						u64 id);
127 
128 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
129 
130 void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state);
131 
132 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
133 
134 int perf_evlist__open(struct perf_evlist *evlist);
135 void perf_evlist__close(struct perf_evlist *evlist);
136 
137 struct callchain_param;
138 
139 void perf_evlist__set_id_pos(struct perf_evlist *evlist);
140 bool perf_can_sample_identifier(void);
141 bool perf_can_record_switch_events(void);
142 bool perf_can_record_cpu_wide(void);
143 void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
144 			 struct callchain_param *callchain);
145 int record_opts__config(struct record_opts *opts);
146 
147 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
148 				  struct target *target,
149 				  const char *argv[], bool pipe_output,
150 				  void (*exec_error)(int signo, siginfo_t *info,
151 						     void *ucontext));
152 int perf_evlist__start_workload(struct perf_evlist *evlist);
153 
154 struct option;
155 
156 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str);
157 int perf_evlist__parse_mmap_pages(const struct option *opt,
158 				  const char *str,
159 				  int unset);
160 
161 unsigned long perf_event_mlock_kb_in_pages(void);
162 
163 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
164 			 unsigned int auxtrace_pages,
165 			 bool auxtrace_overwrite, int nr_cblocks);
166 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
167 void perf_evlist__munmap(struct perf_evlist *evlist);
168 
169 size_t perf_evlist__mmap_size(unsigned long pages);
170 
171 void perf_evlist__disable(struct perf_evlist *evlist);
172 void perf_evlist__enable(struct perf_evlist *evlist);
173 void perf_evlist__toggle_enable(struct perf_evlist *evlist);
174 
175 int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
176 				  struct perf_evsel *evsel, int idx);
177 
178 void perf_evlist__set_selected(struct perf_evlist *evlist,
179 			       struct perf_evsel *evsel);
180 
181 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
182 			   struct thread_map *threads);
183 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
184 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel);
185 
186 void __perf_evlist__set_leader(struct list_head *list);
187 void perf_evlist__set_leader(struct perf_evlist *evlist);
188 
189 u64 perf_evlist__read_format(struct perf_evlist *evlist);
190 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist);
191 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist);
192 u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist);
193 bool perf_evlist__sample_id_all(struct perf_evlist *evlist);
194 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
195 
196 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
197 			      struct perf_sample *sample);
198 
199 int perf_evlist__parse_sample_timestamp(struct perf_evlist *evlist,
200 					union perf_event *event,
201 					u64 *timestamp);
202 
203 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
204 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
205 bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
206 
207 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
208 				   struct list_head *list);
209 
210 static inline bool perf_evlist__empty(struct perf_evlist *evlist)
211 {
212 	return list_empty(&evlist->entries);
213 }
214 
215 static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
216 {
217 	return list_entry(evlist->entries.next, struct perf_evsel, node);
218 }
219 
220 static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
221 {
222 	return list_entry(evlist->entries.prev, struct perf_evsel, node);
223 }
224 
225 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
226 
227 int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
228 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
229 
230 bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str);
231 void perf_evlist__to_front(struct perf_evlist *evlist,
232 			   struct perf_evsel *move_evsel);
233 
234 /**
235  * __evlist__for_each_entry - iterate thru all the evsels
236  * @list: list_head instance to iterate
237  * @evsel: struct evsel iterator
238  */
239 #define __evlist__for_each_entry(list, evsel) \
240         list_for_each_entry(evsel, list, node)
241 
242 /**
243  * evlist__for_each_entry - iterate thru all the evsels
244  * @evlist: evlist instance to iterate
245  * @evsel: struct evsel iterator
246  */
247 #define evlist__for_each_entry(evlist, evsel) \
248 	__evlist__for_each_entry(&(evlist)->entries, evsel)
249 
250 /**
251  * __evlist__for_each_entry_continue - continue iteration thru all the evsels
252  * @list: list_head instance to iterate
253  * @evsel: struct evsel iterator
254  */
255 #define __evlist__for_each_entry_continue(list, evsel) \
256         list_for_each_entry_continue(evsel, list, node)
257 
258 /**
259  * evlist__for_each_entry_continue - continue iteration thru all the evsels
260  * @evlist: evlist instance to iterate
261  * @evsel: struct evsel iterator
262  */
263 #define evlist__for_each_entry_continue(evlist, evsel) \
264 	__evlist__for_each_entry_continue(&(evlist)->entries, evsel)
265 
266 /**
267  * __evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
268  * @list: list_head instance to iterate
269  * @evsel: struct evsel iterator
270  */
271 #define __evlist__for_each_entry_reverse(list, evsel) \
272         list_for_each_entry_reverse(evsel, list, node)
273 
274 /**
275  * evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
276  * @evlist: evlist instance to iterate
277  * @evsel: struct evsel iterator
278  */
279 #define evlist__for_each_entry_reverse(evlist, evsel) \
280 	__evlist__for_each_entry_reverse(&(evlist)->entries, evsel)
281 
282 /**
283  * __evlist__for_each_entry_safe - safely iterate thru all the evsels
284  * @list: list_head instance to iterate
285  * @tmp: struct evsel temp iterator
286  * @evsel: struct evsel iterator
287  */
288 #define __evlist__for_each_entry_safe(list, tmp, evsel) \
289         list_for_each_entry_safe(evsel, tmp, list, node)
290 
291 /**
292  * evlist__for_each_entry_safe - safely iterate thru all the evsels
293  * @evlist: evlist instance to iterate
294  * @evsel: struct evsel iterator
295  * @tmp: struct evsel temp iterator
296  */
297 #define evlist__for_each_entry_safe(evlist, tmp, evsel) \
298 	__evlist__for_each_entry_safe(&(evlist)->entries, tmp, evsel)
299 
300 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
301 				     struct perf_evsel *tracking_evsel);
302 
303 void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
304 
305 struct perf_evsel *
306 perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
307 
308 struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
309 					    union perf_event *event);
310 
311 bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
312 
313 void perf_evlist__force_leader(struct perf_evlist *evlist);
314 
315 struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evlist,
316 						 struct perf_evsel *evsel);
317 
318 #endif /* __PERF_EVLIST_H */
319