1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __PERF_EVLIST_H
3 #define __PERF_EVLIST_H 1
4
5 #include <linux/compiler.h>
6 #include <linux/kernel.h>
7 #include <linux/refcount.h>
8 #include <linux/list.h>
9 #include <api/fd/array.h>
10 #include <internal/evlist.h>
11 #include <internal/evsel.h>
12 #include <perf/evlist.h>
13 #include "affinity.h"
14 #include "events_stats.h"
15 #include "evsel.h"
16 #include "rblist.h"
17 #include <pthread.h>
18 #include <signal.h>
19 #include <unistd.h>
20
21 struct pollfd;
22 struct thread_map;
23 struct perf_cpu_map;
24 struct perf_stat_config;
25 struct record_opts;
26 struct strbuf;
27 struct target;
28
29 /*
30 * State machine of bkw_mmap_state:
31 *
32 * .________________(forbid)_____________.
33 * | V
34 * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
35 * ^ ^ | ^ |
36 * | |__(forbid)____/ |___(forbid)___/|
37 * | |
38 * \_________________(3)_______________/
39 *
40 * NOTREADY : Backward ring buffers are not ready
41 * RUNNING : Backward ring buffers are recording
42 * DATA_PENDING : We are required to collect data from backward ring buffers
43 * EMPTY : We have collected data from backward ring buffers.
44 *
45 * (0): Setup backward ring buffer
46 * (1): Pause ring buffers for reading
47 * (2): Read from ring buffers
48 * (3): Resume ring buffers for recording
49 */
50 enum bkw_mmap_state {
51 BKW_MMAP_NOTREADY,
52 BKW_MMAP_RUNNING,
53 BKW_MMAP_DATA_PENDING,
54 BKW_MMAP_EMPTY,
55 };
56
57 struct event_enable_timer;
58
59 struct evlist {
60 struct perf_evlist core;
61 bool enabled;
62 bool no_affinity;
63 int id_pos;
64 int is_pos;
65 int nr_br_cntr;
66 u64 combined_sample_type;
67 enum bkw_mmap_state bkw_mmap_state;
68 struct {
69 int cork_fd;
70 pid_t pid;
71 } workload;
72 struct mmap *mmap;
73 struct mmap *overwrite_mmap;
74 struct evsel *selected;
75 struct events_stats stats;
76 struct perf_session *session;
77 void (*trace_event_sample_raw)(struct evlist *evlist,
78 union perf_event *event,
79 struct perf_sample *sample);
80 u64 first_sample_time;
81 u64 last_sample_time;
82 struct {
83 pthread_t th;
84 volatile int done;
85 } thread;
86 struct {
87 int fd; /* control file descriptor */
88 int ack; /* ack file descriptor for control commands */
89 int pos; /* index at evlist core object to check signals */
90 } ctl_fd;
91 struct event_enable_timer *eet;
92 /**
93 * @metric_events: A list of struct metric_event which each have a list
94 * of struct metric_expr.
95 */
96 struct rblist metric_events;
97 /* samples with deferred_callchain would wait here. */
98 struct list_head deferred_samples;
99 };
100
101 struct evsel_str_handler {
102 const char *name;
103 void *handler;
104 };
105
106 struct evlist *evlist__new(void);
107 struct evlist *evlist__new_default(void);
108 struct evlist *evlist__new_dummy(void);
109 void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
110 struct perf_thread_map *threads);
111 void evlist__exit(struct evlist *evlist);
112 void evlist__delete(struct evlist *evlist);
113
114 void evlist__add(struct evlist *evlist, struct evsel *entry);
115 void evlist__remove(struct evlist *evlist, struct evsel *evsel);
116
117 int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs);
118 int arch_evlist__add_required_events(struct list_head *list);
119
120 int evlist__add_dummy(struct evlist *evlist);
121 struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide);
evlist__add_dummy_on_all_cpus(struct evlist * evlist)122 static inline struct evsel *evlist__add_dummy_on_all_cpus(struct evlist *evlist)
123 {
124 return evlist__add_aux_dummy(evlist, true);
125 }
126 #ifdef HAVE_LIBTRACEEVENT
127 struct evsel *evlist__add_sched_switch(struct evlist *evlist, bool system_wide);
128 #endif
129
130 int evlist__add_sb_event(struct evlist *evlist, struct perf_event_attr *attr,
131 evsel__sb_cb_t cb, void *data);
132 void evlist__set_cb(struct evlist *evlist, evsel__sb_cb_t cb, void *data);
133 int evlist__start_sb_thread(struct evlist *evlist, struct target *target);
134 void evlist__stop_sb_thread(struct evlist *evlist);
135
136 #ifdef HAVE_LIBTRACEEVENT
137 int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler);
138 #endif
139
140 int __evlist__set_tracepoints_handlers(struct evlist *evlist,
141 const struct evsel_str_handler *assocs,
142 size_t nr_assocs);
143
144 #define evlist__set_tracepoints_handlers(evlist, array) \
145 __evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array))
146
147 int evlist__set_tp_filter(struct evlist *evlist, const char *filter);
148 int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids);
149
150 int evlist__append_tp_filter(struct evlist *evlist, const char *filter);
151
152 int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid);
153 int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids);
154
155 struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name);
156
157 int evlist__add_pollfd(struct evlist *evlist, int fd);
158 int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask);
159
160 #ifdef HAVE_EVENTFD_SUPPORT
161 int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd);
162 #endif
163
164 int evlist__poll(struct evlist *evlist, int timeout);
165
166 struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id);
167 struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id);
168
169 struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id);
170
171 void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state);
172
173 void evlist__mmap_consume(struct evlist *evlist, int idx);
174
175 int evlist__open(struct evlist *evlist);
176 void evlist__close(struct evlist *evlist);
177
178 struct callchain_param;
179
180 void evlist__set_id_pos(struct evlist *evlist);
181 void evlist__config(struct evlist *evlist, struct record_opts *opts, struct callchain_param *callchain);
182 int record_opts__config(struct record_opts *opts);
183
184 int evlist__prepare_workload(struct evlist *evlist, struct target *target,
185 const char *argv[], bool pipe_output,
186 void (*exec_error)(int signo, siginfo_t *info, void *ucontext));
187 int evlist__start_workload(struct evlist *evlist);
188 void evlist__cancel_workload(struct evlist *evlist);
189
190 struct option;
191
192 int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str);
193 int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset);
194
195 unsigned long perf_event_mlock_kb_in_pages(void);
196
197 int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
198 unsigned int auxtrace_pages,
199 bool auxtrace_overwrite, int nr_cblocks,
200 int affinity, int flush, int comp_level);
201 int evlist__mmap(struct evlist *evlist, unsigned int pages);
202 void evlist__munmap(struct evlist *evlist);
203
204 size_t evlist__mmap_size(unsigned long pages);
205
206 void evlist__disable(struct evlist *evlist);
207 void evlist__enable(struct evlist *evlist);
208 void evlist__toggle_enable(struct evlist *evlist);
209 void evlist__disable_evsel(struct evlist *evlist, char *evsel_name);
210 void evlist__enable_evsel(struct evlist *evlist, char *evsel_name);
211 void evlist__disable_non_dummy(struct evlist *evlist);
212 void evlist__enable_non_dummy(struct evlist *evlist);
213
214 void evlist__set_selected(struct evlist *evlist, struct evsel *evsel);
215
216 int evlist__create_maps(struct evlist *evlist, struct target *target);
217 int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel,
218 struct target *target);
219
220 u64 __evlist__combined_sample_type(struct evlist *evlist);
221 u64 evlist__combined_sample_type(struct evlist *evlist);
222 u64 evlist__combined_branch_type(struct evlist *evlist);
223 void evlist__update_br_cntr(struct evlist *evlist);
224 bool evlist__sample_id_all(struct evlist *evlist);
225 u16 evlist__id_hdr_size(struct evlist *evlist);
226
227 int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample);
228 int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp);
229
230 bool evlist__valid_sample_type(struct evlist *evlist);
231 bool evlist__valid_sample_id_all(struct evlist *evlist);
232 bool evlist__valid_read_format(struct evlist *evlist);
233
234 void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list);
235
evlist__empty(struct evlist * evlist)236 static inline bool evlist__empty(struct evlist *evlist)
237 {
238 return list_empty(&evlist->core.entries);
239 }
240
evlist__first(struct evlist * evlist)241 static inline struct evsel *evlist__first(struct evlist *evlist)
242 {
243 struct perf_evsel *evsel = perf_evlist__first(&evlist->core);
244
245 return container_of(evsel, struct evsel, core);
246 }
247
evlist__last(struct evlist * evlist)248 static inline struct evsel *evlist__last(struct evlist *evlist)
249 {
250 struct perf_evsel *evsel = perf_evlist__last(&evlist->core);
251
252 return container_of(evsel, struct evsel, core);
253 }
254
evlist__nr_groups(struct evlist * evlist)255 static inline int evlist__nr_groups(struct evlist *evlist)
256 {
257 return perf_evlist__nr_groups(&evlist->core);
258 }
259
260 int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size);
261 int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size);
262
263 bool evlist__can_select_event(struct evlist *evlist, const char *str);
264 void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel);
265
266 /**
267 * __evlist__for_each_entry - iterate thru all the evsels
268 * @list: list_head instance to iterate
269 * @evsel: struct evsel iterator
270 */
271 #define __evlist__for_each_entry(list, evsel) \
272 list_for_each_entry(evsel, list, core.node)
273
274 /**
275 * evlist__for_each_entry - iterate thru all the evsels
276 * @evlist: evlist instance to iterate
277 * @evsel: struct evsel iterator
278 */
279 #define evlist__for_each_entry(evlist, evsel) \
280 __evlist__for_each_entry(&(evlist)->core.entries, evsel)
281
282 /**
283 * __evlist__for_each_entry_continue - continue iteration thru all the evsels
284 * @list: list_head instance to iterate
285 * @evsel: struct evsel iterator
286 */
287 #define __evlist__for_each_entry_continue(list, evsel) \
288 list_for_each_entry_continue(evsel, list, core.node)
289
290 /**
291 * evlist__for_each_entry_continue - continue iteration thru all the evsels
292 * @evlist: evlist instance to iterate
293 * @evsel: struct evsel iterator
294 */
295 #define evlist__for_each_entry_continue(evlist, evsel) \
296 __evlist__for_each_entry_continue(&(evlist)->core.entries, evsel)
297
298 /**
299 * __evlist__for_each_entry_from - continue iteration from @evsel (included)
300 * @list: list_head instance to iterate
301 * @evsel: struct evsel iterator
302 */
303 #define __evlist__for_each_entry_from(list, evsel) \
304 list_for_each_entry_from(evsel, list, core.node)
305
306 /**
307 * evlist__for_each_entry_from - continue iteration from @evsel (included)
308 * @evlist: evlist instance to iterate
309 * @evsel: struct evsel iterator
310 */
311 #define evlist__for_each_entry_from(evlist, evsel) \
312 __evlist__for_each_entry_from(&(evlist)->core.entries, evsel)
313
314 /**
315 * __evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
316 * @list: list_head instance to iterate
317 * @evsel: struct evsel iterator
318 */
319 #define __evlist__for_each_entry_reverse(list, evsel) \
320 list_for_each_entry_reverse(evsel, list, core.node)
321
322 /**
323 * evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
324 * @evlist: evlist instance to iterate
325 * @evsel: struct evsel iterator
326 */
327 #define evlist__for_each_entry_reverse(evlist, evsel) \
328 __evlist__for_each_entry_reverse(&(evlist)->core.entries, evsel)
329
330 /**
331 * __evlist__for_each_entry_safe - safely iterate thru all the evsels
332 * @list: list_head instance to iterate
333 * @tmp: struct evsel temp iterator
334 * @evsel: struct evsel iterator
335 */
336 #define __evlist__for_each_entry_safe(list, tmp, evsel) \
337 list_for_each_entry_safe(evsel, tmp, list, core.node)
338
339 /**
340 * evlist__for_each_entry_safe - safely iterate thru all the evsels
341 * @evlist: evlist instance to iterate
342 * @evsel: struct evsel iterator
343 * @tmp: struct evsel temp iterator
344 */
345 #define evlist__for_each_entry_safe(evlist, tmp, evsel) \
346 __evlist__for_each_entry_safe(&(evlist)->core.entries, tmp, evsel)
347
348 /** Iterator state for evlist__for_each_cpu */
349 struct evlist_cpu_iterator {
350 /** The list being iterated through. */
351 struct evlist *container;
352 /** The current evsel of the iterator. */
353 struct evsel *evsel;
354 /** The CPU map index corresponding to the evsel->core.cpus for the current CPU. */
355 int cpu_map_idx;
356 /**
357 * The CPU map index corresponding to evlist->core.all_cpus for the
358 * current CPU. Distinct from cpu_map_idx as the evsel's cpu map may
359 * contain fewer entries.
360 */
361 int evlist_cpu_map_idx;
362 /** The number of CPU map entries in evlist->core.all_cpus. */
363 int evlist_cpu_map_nr;
364 /** The current CPU of the iterator. */
365 struct perf_cpu cpu;
366 /** If present, used to set the affinity when switching between CPUs. */
367 struct affinity *affinity;
368 /** Maybe be used to hold affinity state prior to iterating. */
369 struct affinity saved_affinity;
370 };
371
372 /**
373 * evlist__for_each_cpu - without affinity, iterate over the evlist. With
374 * affinity, iterate over all CPUs and then the evlist
375 * for each evsel on that CPU. When switching between
376 * CPUs the affinity is set to the CPU to avoid IPIs
377 * during syscalls. The affinity is set up and removed
378 * automatically, if the loop is broken a call to
379 * evlist_cpu_iterator__exit is necessary.
380 * @evlist_cpu_itr: the iterator instance.
381 * @evlist: evlist instance to iterate.
382 */
383 #define evlist__for_each_cpu(evlist_cpu_itr, evlist) \
384 for (evlist_cpu_iterator__init(&(evlist_cpu_itr), evlist); \
385 !evlist_cpu_iterator__end(&evlist_cpu_itr); \
386 evlist_cpu_iterator__next(&evlist_cpu_itr))
387
388 /** Setup an iterator set to the first CPU/evsel of evlist. */
389 void evlist_cpu_iterator__init(struct evlist_cpu_iterator *itr, struct evlist *evlist);
390 /**
391 * Cleans up the iterator, automatically done by evlist_cpu_iterator__next when
392 * the end of the list is reached. Multiple calls are safe.
393 */
394 void evlist_cpu_iterator__exit(struct evlist_cpu_iterator *itr);
395 /** Move to next element in iterator, updating CPU, evsel and the affinity. */
396 void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr);
397 /** Returns true when iterator is at the end of the CPUs and evlist. */
evlist_cpu_iterator__end(const struct evlist_cpu_iterator * evlist_cpu_itr)398 static inline bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr)
399 {
400 return evlist_cpu_itr->evlist_cpu_map_idx >= evlist_cpu_itr->evlist_cpu_map_nr;
401 }
402
403 struct evsel *evlist__get_tracking_event(struct evlist *evlist);
404 void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel);
405 struct evsel *evlist__findnew_tracking_event(struct evlist *evlist, bool system_wide);
406
407 struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str);
408
409 struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event);
410
411 bool evlist__exclude_kernel(struct evlist *evlist);
412
413 void evlist__force_leader(struct evlist *evlist);
414
415 struct evsel *evlist__reset_weak_group(struct evlist *evlist, struct evsel *evsel, bool close);
416
417 #define EVLIST_CTL_CMD_ENABLE_TAG "enable"
418 #define EVLIST_CTL_CMD_DISABLE_TAG "disable"
419 #define EVLIST_CTL_CMD_ACK_TAG "ack\n"
420 #define EVLIST_CTL_CMD_SNAPSHOT_TAG "snapshot"
421 #define EVLIST_CTL_CMD_EVLIST_TAG "evlist"
422 #define EVLIST_CTL_CMD_STOP_TAG "stop"
423 #define EVLIST_CTL_CMD_PING_TAG "ping"
424
425 #define EVLIST_CTL_CMD_MAX_LEN 64
426
427 enum evlist_ctl_cmd {
428 EVLIST_CTL_CMD_UNSUPPORTED = 0,
429 EVLIST_CTL_CMD_ENABLE,
430 EVLIST_CTL_CMD_DISABLE,
431 EVLIST_CTL_CMD_ACK,
432 EVLIST_CTL_CMD_SNAPSHOT,
433 EVLIST_CTL_CMD_EVLIST,
434 EVLIST_CTL_CMD_STOP,
435 EVLIST_CTL_CMD_PING,
436 };
437
438 int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close);
439 void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close);
440 int evlist__initialize_ctlfd(struct evlist *evlist, int ctl_fd, int ctl_fd_ack);
441 int evlist__finalize_ctlfd(struct evlist *evlist);
442 bool evlist__ctlfd_initialized(struct evlist *evlist);
443 int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd);
444 int evlist__ctlfd_ack(struct evlist *evlist);
445
446 #define EVLIST_ENABLED_MSG "Events enabled\n"
447 #define EVLIST_DISABLED_MSG "Events disabled\n"
448
449 int evlist__parse_event_enable_time(struct evlist *evlist, struct record_opts *opts,
450 const char *str, int unset);
451 int event_enable_timer__start(struct event_enable_timer *eet);
452 void event_enable_timer__exit(struct event_enable_timer **ep);
453 int event_enable_timer__process(struct event_enable_timer *eet);
454
455 struct evsel *evlist__find_evsel(struct evlist *evlist, int idx);
456
457 void evlist__format_evsels(struct evlist *evlist, struct strbuf *sb, size_t max_length);
458 void evlist__check_mem_load_aux(struct evlist *evlist);
459 void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list);
460 void evlist__uniquify_evsel_names(struct evlist *evlist, const struct perf_stat_config *config);
461 bool evlist__has_bpf_output(struct evlist *evlist);
462 bool evlist__needs_bpf_sb_event(struct evlist *evlist);
463
464 #endif /* __PERF_EVLIST_H */
465