1 // SPDX-License-Identifier: GPL-2.0 2 #include <dirent.h> 3 #include <errno.h> 4 #include <stdio.h> 5 #include <stdlib.h> 6 #include <string.h> 7 #include <fcntl.h> 8 #include <sys/param.h> 9 #include <unistd.h> 10 11 #include <api/fs/tracing_path.h> 12 #include <api/io.h> 13 #include <linux/stddef.h> 14 #include <linux/perf_event.h> 15 #include <linux/zalloc.h> 16 #include <subcmd/pager.h> 17 18 #include "build-id.h" 19 #include "debug.h" 20 #include "evsel.h" 21 #include "metricgroup.h" 22 #include "parse-events.h" 23 #include "pmu.h" 24 #include "pmus.h" 25 #include "print-events.h" 26 #include "probe-file.h" 27 #include "string2.h" 28 #include "strlist.h" 29 #include "tracepoint.h" 30 #include "pfm.h" 31 #include "thread_map.h" 32 #include "tool_pmu.h" 33 #include "util.h" 34 35 #define MAX_NAME_LEN 100 36 37 /** Strings corresponding to enum perf_type_id. */ 38 static const char * const event_type_descriptors[] = { 39 "Hardware event", 40 "Software event", 41 "Tracepoint event", 42 "Hardware cache event", 43 "Raw event descriptor", 44 "Hardware breakpoint", 45 }; 46 47 /* 48 * Print the events from <debugfs_mount_point>/tracing/events 49 */ 50 void print_tracepoint_events(const struct print_callbacks *print_cb __maybe_unused, void *print_state __maybe_unused) 51 { 52 char *events_path = get_tracing_file("events"); 53 int events_fd = open(events_path, O_PATH); 54 struct dirent **sys_namelist = NULL; 55 int sys_items; 56 57 if (events_fd < 0) { 58 pr_err("Error: failed to open tracing events directory\n"); 59 pr_err("%s: %s\n", events_path, strerror(errno)); 60 return; 61 } 62 put_tracing_file(events_path); 63 64 sys_items = tracing_events__scandir_alphasort(&sys_namelist); 65 66 for (int i = 0; i < sys_items; i++) { 67 struct dirent *sys_dirent = sys_namelist[i]; 68 struct dirent **evt_namelist = NULL; 69 int dir_fd; 70 int evt_items; 71 72 if (sys_dirent->d_type != DT_DIR || 73 !strcmp(sys_dirent->d_name, ".") || 74 !strcmp(sys_dirent->d_name, "..")) 75 goto next_sys; 76 77 dir_fd = openat(events_fd, sys_dirent->d_name, O_PATH); 78 if (dir_fd < 0) 79 goto next_sys; 80 81 evt_items = scandirat(events_fd, sys_dirent->d_name, &evt_namelist, NULL, alphasort); 82 for (int j = 0; j < evt_items; j++) { 83 /* 84 * Buffer sized at twice the max filename length + 1 85 * separator + 1 \0 terminator. 86 */ 87 char buf[NAME_MAX * 2 + 2]; 88 /* 16 possible hex digits and 22 other characters and \0. */ 89 char encoding[16 + 22]; 90 struct dirent *evt_dirent = evt_namelist[j]; 91 struct io id; 92 __u64 config; 93 94 if (evt_dirent->d_type != DT_DIR || 95 !strcmp(evt_dirent->d_name, ".") || 96 !strcmp(evt_dirent->d_name, "..")) 97 goto next_evt; 98 99 snprintf(buf, sizeof(buf), "%s/id", evt_dirent->d_name); 100 io__init(&id, openat(dir_fd, buf, O_RDONLY), buf, sizeof(buf)); 101 102 if (id.fd < 0) 103 goto next_evt; 104 105 if (io__get_dec(&id, &config) < 0) { 106 close(id.fd); 107 goto next_evt; 108 } 109 close(id.fd); 110 111 snprintf(buf, sizeof(buf), "%s:%s", 112 sys_dirent->d_name, evt_dirent->d_name); 113 snprintf(encoding, sizeof(encoding), "tracepoint/config=0x%llx/", config); 114 print_cb->print_event(print_state, 115 /*topic=*/NULL, 116 /*pmu_name=*/NULL, /* really "tracepoint" */ 117 /*event_name=*/buf, 118 /*event_alias=*/NULL, 119 /*scale_unit=*/NULL, 120 /*deprecated=*/false, 121 "Tracepoint event", 122 /*desc=*/NULL, 123 /*long_desc=*/NULL, 124 encoding); 125 next_evt: 126 free(evt_namelist[j]); 127 } 128 close(dir_fd); 129 free(evt_namelist); 130 next_sys: 131 free(sys_namelist[i]); 132 } 133 134 free(sys_namelist); 135 close(events_fd); 136 } 137 138 void print_sdt_events(const struct print_callbacks *print_cb, void *print_state) 139 { 140 struct strlist *bidlist, *sdtlist; 141 struct str_node *bid_nd, *sdt_name, *next_sdt_name; 142 const char *last_sdt_name = NULL; 143 144 /* 145 * The implicitly sorted sdtlist will hold the tracepoint name followed 146 * by @<buildid>. If the tracepoint name is unique (determined by 147 * looking at the adjacent nodes) the @<buildid> is dropped otherwise 148 * the executable path and buildid are added to the name. 149 */ 150 sdtlist = strlist__new(NULL, NULL); 151 if (!sdtlist) { 152 pr_debug("Failed to allocate new strlist for SDT\n"); 153 return; 154 } 155 bidlist = build_id_cache__list_all(true); 156 if (!bidlist) { 157 pr_debug("Failed to get buildids: %d\n", errno); 158 return; 159 } 160 strlist__for_each_entry(bid_nd, bidlist) { 161 struct probe_cache *pcache; 162 struct probe_cache_entry *ent; 163 164 pcache = probe_cache__new(bid_nd->s, NULL); 165 if (!pcache) 166 continue; 167 list_for_each_entry(ent, &pcache->entries, node) { 168 char buf[1024]; 169 170 snprintf(buf, sizeof(buf), "%s:%s@%s", 171 ent->pev.group, ent->pev.event, bid_nd->s); 172 strlist__add(sdtlist, buf); 173 } 174 probe_cache__delete(pcache); 175 } 176 strlist__delete(bidlist); 177 178 strlist__for_each_entry(sdt_name, sdtlist) { 179 bool show_detail = false; 180 char *bid = strchr(sdt_name->s, '@'); 181 char *evt_name = NULL; 182 183 if (bid) 184 *(bid++) = '\0'; 185 186 if (last_sdt_name && !strcmp(last_sdt_name, sdt_name->s)) { 187 show_detail = true; 188 } else { 189 next_sdt_name = strlist__next(sdt_name); 190 if (next_sdt_name) { 191 char *bid2 = strchr(next_sdt_name->s, '@'); 192 193 if (bid2) 194 *bid2 = '\0'; 195 if (strcmp(sdt_name->s, next_sdt_name->s) == 0) 196 show_detail = true; 197 if (bid2) 198 *bid2 = '@'; 199 } 200 } 201 last_sdt_name = sdt_name->s; 202 203 if (show_detail) { 204 char *path = build_id_cache__origname(bid); 205 206 if (path) { 207 if (asprintf(&evt_name, "%s@%s(%.12s)", sdt_name->s, path, bid) < 0) 208 evt_name = NULL; 209 free(path); 210 } 211 } 212 print_cb->print_event(print_state, 213 /*topic=*/NULL, 214 /*pmu_name=*/NULL, 215 evt_name ?: sdt_name->s, 216 /*event_alias=*/NULL, 217 /*deprecated=*/false, 218 /*scale_unit=*/NULL, 219 "SDT event", 220 /*desc=*/NULL, 221 /*long_desc=*/NULL, 222 /*encoding_desc=*/NULL); 223 224 free(evt_name); 225 } 226 strlist__delete(sdtlist); 227 } 228 229 bool is_event_supported(u8 type, u64 config) 230 { 231 bool ret = true; 232 struct evsel *evsel; 233 struct perf_event_attr attr = { 234 .type = type, 235 .config = config, 236 .disabled = 1, 237 }; 238 struct perf_thread_map *tmap = thread_map__new_by_tid(0); 239 240 if (tmap == NULL) 241 return false; 242 243 evsel = evsel__new(&attr); 244 if (evsel) { 245 ret = evsel__open(evsel, NULL, tmap) >= 0; 246 247 if (!ret) { 248 /* 249 * The event may fail to open if the paranoid value 250 * /proc/sys/kernel/perf_event_paranoid is set to 2 251 * Re-run with exclude_kernel set; we don't do that by 252 * default as some ARM machines do not support it. 253 */ 254 evsel->core.attr.exclude_kernel = 1; 255 ret = evsel__open(evsel, NULL, tmap) >= 0; 256 } 257 258 if (!ret) { 259 /* 260 * The event may fail to open if the PMU requires 261 * exclude_guest to be set (e.g. as the Apple M1 PMU 262 * requires). 263 * Re-run with exclude_guest set; we don't do that by 264 * default as it's equally legitimate for another PMU 265 * driver to require that exclude_guest is clear. 266 */ 267 evsel->core.attr.exclude_guest = 1; 268 ret = evsel__open(evsel, NULL, tmap) >= 0; 269 } 270 271 evsel__delete(evsel); 272 } 273 274 perf_thread_map__put(tmap); 275 return ret; 276 } 277 278 int print_hwcache_events(const struct print_callbacks *print_cb, void *print_state) 279 { 280 struct perf_pmu *pmu = NULL; 281 const char *event_type_descriptor = event_type_descriptors[PERF_TYPE_HW_CACHE]; 282 283 /* 284 * Only print core PMUs, skipping uncore for performance and 285 * PERF_TYPE_SOFTWARE that can succeed in opening legacy cache evenst. 286 */ 287 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 288 if (pmu->is_uncore || pmu->type == PERF_TYPE_SOFTWARE) 289 continue; 290 291 for (int type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { 292 for (int op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { 293 /* skip invalid cache type */ 294 if (!evsel__is_cache_op_valid(type, op)) 295 continue; 296 297 for (int res = 0; res < PERF_COUNT_HW_CACHE_RESULT_MAX; res++) { 298 char name[64]; 299 char alias_name[128]; 300 __u64 config; 301 int ret; 302 303 __evsel__hw_cache_type_op_res_name(type, op, res, 304 name, sizeof(name)); 305 306 ret = parse_events__decode_legacy_cache(name, pmu->type, 307 &config); 308 if (ret || !is_event_supported(PERF_TYPE_HW_CACHE, config)) 309 continue; 310 snprintf(alias_name, sizeof(alias_name), "%s/%s/", 311 pmu->name, name); 312 print_cb->print_event(print_state, 313 "cache", 314 pmu->name, 315 name, 316 alias_name, 317 /*scale_unit=*/NULL, 318 /*deprecated=*/false, 319 event_type_descriptor, 320 /*desc=*/NULL, 321 /*long_desc=*/NULL, 322 /*encoding_desc=*/NULL); 323 } 324 } 325 } 326 } 327 return 0; 328 } 329 330 void print_symbol_events(const struct print_callbacks *print_cb, void *print_state, 331 unsigned int type, const struct event_symbol *syms, 332 unsigned int max) 333 { 334 struct strlist *evt_name_list = strlist__new(NULL, NULL); 335 struct str_node *nd; 336 337 if (!evt_name_list) { 338 pr_debug("Failed to allocate new strlist for symbol events\n"); 339 return; 340 } 341 for (unsigned int i = 0; i < max; i++) { 342 /* 343 * New attr.config still not supported here, the latest 344 * example was PERF_COUNT_SW_CGROUP_SWITCHES 345 */ 346 if (syms[i].symbol == NULL) 347 continue; 348 349 if (!is_event_supported(type, i)) 350 continue; 351 352 if (strlen(syms[i].alias)) { 353 char name[MAX_NAME_LEN]; 354 355 snprintf(name, MAX_NAME_LEN, "%s OR %s", syms[i].symbol, syms[i].alias); 356 strlist__add(evt_name_list, name); 357 } else 358 strlist__add(evt_name_list, syms[i].symbol); 359 } 360 361 strlist__for_each_entry(nd, evt_name_list) { 362 char *alias = strstr(nd->s, " OR "); 363 364 if (alias) { 365 *alias = '\0'; 366 alias += 4; 367 } 368 print_cb->print_event(print_state, 369 /*topic=*/NULL, 370 /*pmu_name=*/NULL, 371 nd->s, 372 alias, 373 /*scale_unit=*/NULL, 374 /*deprecated=*/false, 375 event_type_descriptors[type], 376 /*desc=*/NULL, 377 /*long_desc=*/NULL, 378 /*encoding_desc=*/NULL); 379 } 380 strlist__delete(evt_name_list); 381 } 382 383 /* 384 * Print the help text for the event symbols: 385 */ 386 void print_events(const struct print_callbacks *print_cb, void *print_state) 387 { 388 print_symbol_events(print_cb, print_state, PERF_TYPE_HARDWARE, 389 event_symbols_hw, PERF_COUNT_HW_MAX); 390 print_symbol_events(print_cb, print_state, PERF_TYPE_SOFTWARE, 391 event_symbols_sw, PERF_COUNT_SW_MAX); 392 393 print_hwcache_events(print_cb, print_state); 394 395 perf_pmus__print_pmu_events(print_cb, print_state); 396 397 print_cb->print_event(print_state, 398 /*topic=*/NULL, 399 /*pmu_name=*/NULL, 400 "rNNN", 401 /*event_alias=*/NULL, 402 /*scale_unit=*/NULL, 403 /*deprecated=*/false, 404 event_type_descriptors[PERF_TYPE_RAW], 405 /*desc=*/NULL, 406 /*long_desc=*/NULL, 407 /*encoding_desc=*/NULL); 408 409 perf_pmus__print_raw_pmu_events(print_cb, print_state); 410 411 print_cb->print_event(print_state, 412 /*topic=*/NULL, 413 /*pmu_name=*/NULL, 414 "mem:<addr>[/len][:access]", 415 /*scale_unit=*/NULL, 416 /*event_alias=*/NULL, 417 /*deprecated=*/false, 418 event_type_descriptors[PERF_TYPE_BREAKPOINT], 419 /*desc=*/NULL, 420 /*long_desc=*/NULL, 421 /*encoding_desc=*/NULL); 422 423 print_tracepoint_events(print_cb, print_state); 424 425 print_sdt_events(print_cb, print_state); 426 427 metricgroup__print(print_cb, print_state); 428 429 print_libpfm_events(print_cb, print_state); 430 } 431