1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <fcntl.h>
8 #include <sys/param.h>
9 #include <unistd.h>
10
11 #include <api/fs/tracing_path.h>
12 #include <api/io.h>
13 #include <linux/stddef.h>
14 #include <linux/perf_event.h>
15 #include <linux/zalloc.h>
16 #include <subcmd/pager.h>
17
18 #include "build-id.h"
19 #include "debug.h"
20 #include "evsel.h"
21 #include "metricgroup.h"
22 #include "parse-events.h"
23 #include "pmu.h"
24 #include "pmus.h"
25 #include "print-events.h"
26 #include "probe-file.h"
27 #include "string2.h"
28 #include "strlist.h"
29 #include "tracepoint.h"
30 #include "pfm.h"
31 #include "thread_map.h"
32 #include "tool_pmu.h"
33 #include "util.h"
34
35 #define MAX_NAME_LEN 100
36
37 /** Strings corresponding to enum perf_type_id. */
38 static const char * const event_type_descriptors[] = {
39 "Hardware event",
40 "Software event",
41 "Tracepoint event",
42 "Hardware cache event",
43 "Raw event descriptor",
44 "Hardware breakpoint",
45 };
46
47 /*
48 * Print the events from <debugfs_mount_point>/tracing/events
49 */
print_tracepoint_events(const struct print_callbacks * print_cb __maybe_unused,void * print_state __maybe_unused)50 void print_tracepoint_events(const struct print_callbacks *print_cb __maybe_unused, void *print_state __maybe_unused)
51 {
52 char *events_path = get_tracing_file("events");
53 int events_fd = open(events_path, O_PATH);
54 struct dirent **sys_namelist = NULL;
55 int sys_items;
56
57 if (events_fd < 0) {
58 pr_err("Error: failed to open tracing events directory\n");
59 pr_err("%s: %s\n", events_path, strerror(errno));
60 return;
61 }
62 put_tracing_file(events_path);
63
64 sys_items = tracing_events__scandir_alphasort(&sys_namelist);
65
66 for (int i = 0; i < sys_items; i++) {
67 struct dirent *sys_dirent = sys_namelist[i];
68 struct dirent **evt_namelist = NULL;
69 int dir_fd;
70 int evt_items;
71
72 if (sys_dirent->d_type != DT_DIR ||
73 !strcmp(sys_dirent->d_name, ".") ||
74 !strcmp(sys_dirent->d_name, ".."))
75 goto next_sys;
76
77 dir_fd = openat(events_fd, sys_dirent->d_name, O_PATH);
78 if (dir_fd < 0)
79 goto next_sys;
80
81 evt_items = scandirat(events_fd, sys_dirent->d_name, &evt_namelist, NULL, alphasort);
82 for (int j = 0; j < evt_items; j++) {
83 /*
84 * Buffer sized at twice the max filename length + 1
85 * separator + 1 \0 terminator.
86 */
87 char buf[NAME_MAX * 2 + 2];
88 /* 16 possible hex digits and 22 other characters and \0. */
89 char encoding[16 + 22];
90 struct dirent *evt_dirent = evt_namelist[j];
91 struct io id;
92 __u64 config;
93
94 if (evt_dirent->d_type != DT_DIR ||
95 !strcmp(evt_dirent->d_name, ".") ||
96 !strcmp(evt_dirent->d_name, ".."))
97 goto next_evt;
98
99 snprintf(buf, sizeof(buf), "%s/id", evt_dirent->d_name);
100 io__init(&id, openat(dir_fd, buf, O_RDONLY), buf, sizeof(buf));
101
102 if (id.fd < 0)
103 goto next_evt;
104
105 if (io__get_dec(&id, &config) < 0) {
106 close(id.fd);
107 goto next_evt;
108 }
109 close(id.fd);
110
111 snprintf(buf, sizeof(buf), "%s:%s",
112 sys_dirent->d_name, evt_dirent->d_name);
113 snprintf(encoding, sizeof(encoding), "tracepoint/config=0x%llx/", config);
114 print_cb->print_event(print_state,
115 /*topic=*/NULL,
116 /*pmu_name=*/NULL, /* really "tracepoint" */
117 /*event_name=*/buf,
118 /*event_alias=*/NULL,
119 /*scale_unit=*/NULL,
120 /*deprecated=*/false,
121 "Tracepoint event",
122 /*desc=*/NULL,
123 /*long_desc=*/NULL,
124 encoding);
125 next_evt:
126 free(evt_namelist[j]);
127 }
128 close(dir_fd);
129 free(evt_namelist);
130 next_sys:
131 free(sys_namelist[i]);
132 }
133
134 free(sys_namelist);
135 close(events_fd);
136 }
137
print_sdt_events(const struct print_callbacks * print_cb,void * print_state)138 void print_sdt_events(const struct print_callbacks *print_cb, void *print_state)
139 {
140 struct strlist *bidlist, *sdtlist;
141 struct str_node *bid_nd, *sdt_name, *next_sdt_name;
142 const char *last_sdt_name = NULL;
143
144 /*
145 * The implicitly sorted sdtlist will hold the tracepoint name followed
146 * by @<buildid>. If the tracepoint name is unique (determined by
147 * looking at the adjacent nodes) the @<buildid> is dropped otherwise
148 * the executable path and buildid are added to the name.
149 */
150 sdtlist = strlist__new(NULL, NULL);
151 if (!sdtlist) {
152 pr_debug("Failed to allocate new strlist for SDT\n");
153 return;
154 }
155 bidlist = build_id_cache__list_all(true);
156 if (!bidlist) {
157 pr_debug("Failed to get buildids: %d\n", errno);
158 return;
159 }
160 strlist__for_each_entry(bid_nd, bidlist) {
161 struct probe_cache *pcache;
162 struct probe_cache_entry *ent;
163
164 pcache = probe_cache__new(bid_nd->s, NULL);
165 if (!pcache)
166 continue;
167 list_for_each_entry(ent, &pcache->entries, node) {
168 char buf[1024];
169
170 snprintf(buf, sizeof(buf), "%s:%s@%s",
171 ent->pev.group, ent->pev.event, bid_nd->s);
172 strlist__add(sdtlist, buf);
173 }
174 probe_cache__delete(pcache);
175 }
176 strlist__delete(bidlist);
177
178 strlist__for_each_entry(sdt_name, sdtlist) {
179 bool show_detail = false;
180 char *bid = strchr(sdt_name->s, '@');
181 char *evt_name = NULL;
182
183 if (bid)
184 *(bid++) = '\0';
185
186 if (last_sdt_name && !strcmp(last_sdt_name, sdt_name->s)) {
187 show_detail = true;
188 } else {
189 next_sdt_name = strlist__next(sdt_name);
190 if (next_sdt_name) {
191 char *bid2 = strchr(next_sdt_name->s, '@');
192
193 if (bid2)
194 *bid2 = '\0';
195 if (strcmp(sdt_name->s, next_sdt_name->s) == 0)
196 show_detail = true;
197 if (bid2)
198 *bid2 = '@';
199 }
200 }
201 last_sdt_name = sdt_name->s;
202
203 if (show_detail) {
204 char *path = build_id_cache__origname(bid);
205
206 if (path) {
207 if (asprintf(&evt_name, "%s@%s(%.12s)", sdt_name->s, path, bid) < 0)
208 evt_name = NULL;
209 free(path);
210 }
211 }
212 print_cb->print_event(print_state,
213 /*topic=*/NULL,
214 /*pmu_name=*/NULL,
215 evt_name ?: sdt_name->s,
216 /*event_alias=*/NULL,
217 /*deprecated=*/false,
218 /*scale_unit=*/NULL,
219 "SDT event",
220 /*desc=*/NULL,
221 /*long_desc=*/NULL,
222 /*encoding_desc=*/NULL);
223
224 free(evt_name);
225 }
226 strlist__delete(sdtlist);
227 }
228
is_event_supported(u8 type,u64 config)229 bool is_event_supported(u8 type, u64 config)
230 {
231 bool ret = true;
232 struct evsel *evsel;
233 struct perf_event_attr attr = {
234 .type = type,
235 .config = config,
236 .disabled = 1,
237 };
238 struct perf_thread_map *tmap = thread_map__new_by_tid(0);
239
240 if (tmap == NULL)
241 return false;
242
243 evsel = evsel__new(&attr);
244 if (evsel) {
245 ret = evsel__open(evsel, NULL, tmap) >= 0;
246
247 if (!ret) {
248 /*
249 * The event may fail to open if the paranoid value
250 * /proc/sys/kernel/perf_event_paranoid is set to 2
251 * Re-run with exclude_kernel set; we don't do that by
252 * default as some ARM machines do not support it.
253 */
254 evsel->core.attr.exclude_kernel = 1;
255 ret = evsel__open(evsel, NULL, tmap) >= 0;
256 }
257
258 if (!ret) {
259 /*
260 * The event may fail to open if the PMU requires
261 * exclude_guest to be set (e.g. as the Apple M1 PMU
262 * requires).
263 * Re-run with exclude_guest set; we don't do that by
264 * default as it's equally legitimate for another PMU
265 * driver to require that exclude_guest is clear.
266 */
267 evsel->core.attr.exclude_guest = 1;
268 ret = evsel__open(evsel, NULL, tmap) >= 0;
269 }
270
271 evsel__close(evsel);
272 evsel__delete(evsel);
273 }
274
275 perf_thread_map__put(tmap);
276 return ret;
277 }
278
print_hwcache_events(const struct print_callbacks * print_cb,void * print_state)279 int print_hwcache_events(const struct print_callbacks *print_cb, void *print_state)
280 {
281 struct perf_pmu *pmu = NULL;
282 const char *event_type_descriptor = event_type_descriptors[PERF_TYPE_HW_CACHE];
283
284 /*
285 * Only print core PMUs, skipping uncore for performance and
286 * PERF_TYPE_SOFTWARE that can succeed in opening legacy cache evenst.
287 */
288 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
289 if (pmu->is_uncore || pmu->type == PERF_TYPE_SOFTWARE)
290 continue;
291
292 for (int type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
293 for (int op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
294 /* skip invalid cache type */
295 if (!evsel__is_cache_op_valid(type, op))
296 continue;
297
298 for (int res = 0; res < PERF_COUNT_HW_CACHE_RESULT_MAX; res++) {
299 char name[64];
300 char alias_name[128];
301 __u64 config;
302 int ret;
303
304 __evsel__hw_cache_type_op_res_name(type, op, res,
305 name, sizeof(name));
306
307 ret = parse_events__decode_legacy_cache(name, pmu->type,
308 &config);
309 if (ret || !is_event_supported(PERF_TYPE_HW_CACHE, config))
310 continue;
311 snprintf(alias_name, sizeof(alias_name), "%s/%s/",
312 pmu->name, name);
313 print_cb->print_event(print_state,
314 "cache",
315 pmu->name,
316 name,
317 alias_name,
318 /*scale_unit=*/NULL,
319 /*deprecated=*/false,
320 event_type_descriptor,
321 /*desc=*/NULL,
322 /*long_desc=*/NULL,
323 /*encoding_desc=*/NULL);
324 }
325 }
326 }
327 }
328 return 0;
329 }
330
print_symbol_events(const struct print_callbacks * print_cb,void * print_state,unsigned int type,const struct event_symbol * syms,unsigned int max)331 void print_symbol_events(const struct print_callbacks *print_cb, void *print_state,
332 unsigned int type, const struct event_symbol *syms,
333 unsigned int max)
334 {
335 struct strlist *evt_name_list = strlist__new(NULL, NULL);
336 struct str_node *nd;
337
338 if (!evt_name_list) {
339 pr_debug("Failed to allocate new strlist for symbol events\n");
340 return;
341 }
342 for (unsigned int i = 0; i < max; i++) {
343 /*
344 * New attr.config still not supported here, the latest
345 * example was PERF_COUNT_SW_CGROUP_SWITCHES
346 */
347 if (syms[i].symbol == NULL)
348 continue;
349
350 if (!is_event_supported(type, i))
351 continue;
352
353 if (strlen(syms[i].alias)) {
354 char name[MAX_NAME_LEN];
355
356 snprintf(name, MAX_NAME_LEN, "%s OR %s", syms[i].symbol, syms[i].alias);
357 strlist__add(evt_name_list, name);
358 } else
359 strlist__add(evt_name_list, syms[i].symbol);
360 }
361
362 strlist__for_each_entry(nd, evt_name_list) {
363 char *alias = strstr(nd->s, " OR ");
364
365 if (alias) {
366 *alias = '\0';
367 alias += 4;
368 }
369 print_cb->print_event(print_state,
370 /*topic=*/NULL,
371 /*pmu_name=*/NULL,
372 nd->s,
373 alias,
374 /*scale_unit=*/NULL,
375 /*deprecated=*/false,
376 event_type_descriptors[type],
377 /*desc=*/NULL,
378 /*long_desc=*/NULL,
379 /*encoding_desc=*/NULL);
380 }
381 strlist__delete(evt_name_list);
382 }
383
384 /*
385 * Print the help text for the event symbols:
386 */
print_events(const struct print_callbacks * print_cb,void * print_state)387 void print_events(const struct print_callbacks *print_cb, void *print_state)
388 {
389 print_symbol_events(print_cb, print_state, PERF_TYPE_HARDWARE,
390 event_symbols_hw, PERF_COUNT_HW_MAX);
391 print_symbol_events(print_cb, print_state, PERF_TYPE_SOFTWARE,
392 event_symbols_sw, PERF_COUNT_SW_MAX);
393
394 print_hwcache_events(print_cb, print_state);
395
396 perf_pmus__print_pmu_events(print_cb, print_state);
397
398 print_cb->print_event(print_state,
399 /*topic=*/NULL,
400 /*pmu_name=*/NULL,
401 "rNNN",
402 /*event_alias=*/NULL,
403 /*scale_unit=*/NULL,
404 /*deprecated=*/false,
405 event_type_descriptors[PERF_TYPE_RAW],
406 /*desc=*/NULL,
407 /*long_desc=*/NULL,
408 /*encoding_desc=*/NULL);
409
410 perf_pmus__print_raw_pmu_events(print_cb, print_state);
411
412 print_cb->print_event(print_state,
413 /*topic=*/NULL,
414 /*pmu_name=*/NULL,
415 "mem:<addr>[/len][:access]",
416 /*scale_unit=*/NULL,
417 /*event_alias=*/NULL,
418 /*deprecated=*/false,
419 event_type_descriptors[PERF_TYPE_BREAKPOINT],
420 /*desc=*/NULL,
421 /*long_desc=*/NULL,
422 /*encoding_desc=*/NULL);
423
424 print_tracepoint_events(print_cb, print_state);
425
426 print_sdt_events(print_cb, print_state);
427
428 metricgroup__print(print_cb, print_state);
429
430 print_libpfm_events(print_cb, print_state);
431 }
432