1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include "util/debug.h"
4 #include "util/evlist.h"
5 #include "util/evsel.h"
6 #include "util/mmap.h"
7 #include "util/perf_api_probe.h"
8 #include <perf/mmap.h>
9 #include <linux/perf_event.h>
10 #include <limits.h>
11 #include <pthread.h>
12 #include <sched.h>
13 #include <stdbool.h>
14
evlist__add_sb_event(struct evlist * evlist,struct perf_event_attr * attr,evsel__sb_cb_t cb,void * data)15 int evlist__add_sb_event(struct evlist *evlist, struct perf_event_attr *attr,
16 evsel__sb_cb_t cb, void *data)
17 {
18 struct evsel *evsel;
19
20 if (!attr->sample_id_all) {
21 pr_warning("enabling sample_id_all for all side band events\n");
22 attr->sample_id_all = 1;
23 }
24
25 evsel = evsel__new_idx(attr, evlist->core.nr_entries);
26 if (!evsel)
27 return -1;
28
29 evsel->side_band.cb = cb;
30 evsel->side_band.data = data;
31 evlist__add(evlist, evsel);
32 return 0;
33 }
34
perf_evlist__poll_thread(void * arg)35 static void *perf_evlist__poll_thread(void *arg)
36 {
37 struct evlist *evlist = arg;
38 bool draining = false;
39 int i, done = 0;
40 /*
41 * In order to read symbols from other namespaces perf to needs to call
42 * setns(2). This isn't permitted if the struct_fs has multiple users.
43 * unshare(2) the fs so that we may continue to setns into namespaces
44 * that we're observing when, for instance, reading the build-ids at
45 * the end of a 'perf record' session.
46 */
47 unshare(CLONE_FS);
48
49 while (!done) {
50 bool got_data = false;
51
52 if (evlist->thread.done)
53 draining = true;
54
55 if (!draining)
56 evlist__poll(evlist, 1000);
57
58 for (i = 0; i < evlist->core.nr_mmaps; i++) {
59 struct mmap *map = &evlist->mmap[i];
60 union perf_event *event;
61
62 if (perf_mmap__read_init(&map->core))
63 continue;
64 while ((event = perf_mmap__read_event(&map->core)) != NULL) {
65 struct evsel *evsel = evlist__event2evsel(evlist, event);
66
67 if (evsel && evsel->side_band.cb)
68 evsel->side_band.cb(event, evsel->side_band.data);
69 else
70 pr_warning("cannot locate proper evsel for the side band event\n");
71
72 perf_mmap__consume(&map->core);
73 got_data = true;
74 }
75 perf_mmap__read_done(&map->core);
76 }
77
78 if (draining && !got_data)
79 break;
80 }
81 return NULL;
82 }
83
evlist__set_cb(struct evlist * evlist,evsel__sb_cb_t cb,void * data)84 void evlist__set_cb(struct evlist *evlist, evsel__sb_cb_t cb, void *data)
85 {
86 struct evsel *evsel;
87
88 evlist__for_each_entry(evlist, evsel) {
89 evsel->core.attr.sample_id_all = 1;
90 evsel->core.attr.watermark = 1;
91 evsel->core.attr.wakeup_watermark = 1;
92 evsel->side_band.cb = cb;
93 evsel->side_band.data = data;
94 }
95 }
96
evlist__start_sb_thread(struct evlist * evlist,struct target * target)97 int evlist__start_sb_thread(struct evlist *evlist, struct target *target)
98 {
99 struct evsel *counter;
100
101 if (!evlist)
102 return 0;
103
104 if (evlist__create_maps(evlist, target))
105 goto out_delete_evlist;
106
107 if (evlist->core.nr_entries > 1) {
108 bool can_sample_identifier = perf_can_sample_identifier();
109
110 evlist__for_each_entry(evlist, counter)
111 evsel__set_sample_id(counter, can_sample_identifier);
112
113 evlist__set_id_pos(evlist);
114 }
115
116 evlist__for_each_entry(evlist, counter) {
117 if (evsel__open(counter, evlist->core.user_requested_cpus,
118 evlist->core.threads) < 0)
119 goto out_delete_evlist;
120 }
121
122 if (evlist__mmap(evlist, UINT_MAX))
123 goto out_delete_evlist;
124
125 evlist__for_each_entry(evlist, counter) {
126 if (evsel__enable(counter))
127 goto out_delete_evlist;
128 }
129
130 evlist->thread.done = 0;
131 if (pthread_create(&evlist->thread.th, NULL, perf_evlist__poll_thread, evlist))
132 goto out_delete_evlist;
133
134 return 0;
135
136 out_delete_evlist:
137 evlist__delete(evlist);
138 evlist = NULL;
139 return -1;
140 }
141
evlist__stop_sb_thread(struct evlist * evlist)142 void evlist__stop_sb_thread(struct evlist *evlist)
143 {
144 if (!evlist)
145 return;
146 evlist->thread.done = 1;
147 pthread_join(evlist->thread.th, NULL);
148 evlist__delete(evlist);
149 }
150