1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/list.h> 3 #include <linux/zalloc.h> 4 #include <subcmd/pager.h> 5 #include <sys/types.h> 6 #include <dirent.h> 7 #include <pthread.h> 8 #include <string.h> 9 #include <unistd.h> 10 #include "debug.h" 11 #include "evsel.h" 12 #include "pmus.h" 13 #include "pmu.h" 14 #include "print-events.h" 15 16 /* 17 * core_pmus: A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs 18 * directory contains "cpus" file. All PMUs belonging to core_pmus 19 * must have pmu->is_core=1. If there are more than one PMU in 20 * this list, perf interprets it as a heterogeneous platform. 21 * (FWIW, certain ARM platforms having heterogeneous cores uses 22 * homogeneous PMU, and thus they are treated as homogeneous 23 * platform by perf because core_pmus will have only one entry) 24 * other_pmus: All other PMUs which are not part of core_pmus list. It doesn't 25 * matter whether PMU is present per SMT-thread or outside of the 26 * core in the hw. For e.g., an instance of AMD ibs_fetch// and 27 * ibs_op// PMUs is present in each hw SMT thread, however they 28 * are captured under other_pmus. PMUs belonging to other_pmus 29 * must have pmu->is_core=0 but pmu->is_uncore could be 0 or 1. 30 */ 31 static LIST_HEAD(core_pmus); 32 static LIST_HEAD(other_pmus); 33 static bool read_sysfs_core_pmus; 34 static bool read_sysfs_all_pmus; 35 36 void perf_pmus__destroy(void) 37 { 38 struct perf_pmu *pmu, *tmp; 39 40 list_for_each_entry_safe(pmu, tmp, &core_pmus, list) { 41 list_del(&pmu->list); 42 43 perf_pmu__delete(pmu); 44 } 45 list_for_each_entry_safe(pmu, tmp, &other_pmus, list) { 46 list_del(&pmu->list); 47 48 perf_pmu__delete(pmu); 49 } 50 read_sysfs_core_pmus = false; 51 read_sysfs_all_pmus = false; 52 } 53 54 static struct perf_pmu *pmu_find(const char *name) 55 { 56 struct perf_pmu *pmu; 57 58 list_for_each_entry(pmu, &core_pmus, list) { 59 if (!strcmp(pmu->name, name) || 60 (pmu->alias_name && !strcmp(pmu->alias_name, name))) 61 return pmu; 62 } 63 list_for_each_entry(pmu, &other_pmus, list) { 64 if (!strcmp(pmu->name, name) || 65 (pmu->alias_name && !strcmp(pmu->alias_name, name))) 66 return pmu; 67 } 68 69 return NULL; 70 } 71 72 struct perf_pmu *perf_pmus__find(const char *name) 73 { 74 struct perf_pmu *pmu; 75 int dirfd; 76 bool core_pmu; 77 78 /* 79 * Once PMU is loaded it stays in the list, 80 * so we keep us from multiple reading/parsing 81 * the pmu format definitions. 82 */ 83 pmu = pmu_find(name); 84 if (pmu) 85 return pmu; 86 87 if (read_sysfs_all_pmus) 88 return NULL; 89 90 core_pmu = is_pmu_core(name); 91 if (core_pmu && read_sysfs_core_pmus) 92 return NULL; 93 94 dirfd = perf_pmu__event_source_devices_fd(); 95 pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name); 96 close(dirfd); 97 98 return pmu; 99 } 100 101 static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name) 102 { 103 struct perf_pmu *pmu; 104 bool core_pmu; 105 106 /* 107 * Once PMU is loaded it stays in the list, 108 * so we keep us from multiple reading/parsing 109 * the pmu format definitions. 110 */ 111 pmu = pmu_find(name); 112 if (pmu) 113 return pmu; 114 115 if (read_sysfs_all_pmus) 116 return NULL; 117 118 core_pmu = is_pmu_core(name); 119 if (core_pmu && read_sysfs_core_pmus) 120 return NULL; 121 122 return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name); 123 } 124 125 /* Add all pmus in sysfs to pmu list: */ 126 static void pmu_read_sysfs(bool core_only) 127 { 128 int fd; 129 DIR *dir; 130 struct dirent *dent; 131 132 if (read_sysfs_all_pmus || (core_only && read_sysfs_core_pmus)) 133 return; 134 135 fd = perf_pmu__event_source_devices_fd(); 136 if (fd < 0) 137 return; 138 139 dir = fdopendir(fd); 140 if (!dir) 141 return; 142 143 while ((dent = readdir(dir))) { 144 if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, "..")) 145 continue; 146 if (core_only && !is_pmu_core(dent->d_name)) 147 continue; 148 /* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */ 149 perf_pmu__find2(fd, dent->d_name); 150 } 151 152 closedir(dir); 153 if (core_only) { 154 read_sysfs_core_pmus = true; 155 } else { 156 read_sysfs_core_pmus = true; 157 read_sysfs_all_pmus = true; 158 } 159 } 160 161 static struct perf_pmu *__perf_pmus__find_by_type(unsigned int type) 162 { 163 struct perf_pmu *pmu; 164 165 list_for_each_entry(pmu, &core_pmus, list) { 166 if (pmu->type == type) 167 return pmu; 168 } 169 170 list_for_each_entry(pmu, &other_pmus, list) { 171 if (pmu->type == type) 172 return pmu; 173 } 174 return NULL; 175 } 176 177 struct perf_pmu *perf_pmus__find_by_type(unsigned int type) 178 { 179 struct perf_pmu *pmu = __perf_pmus__find_by_type(type); 180 181 if (pmu || read_sysfs_all_pmus) 182 return pmu; 183 184 pmu_read_sysfs(/*core_only=*/false); 185 pmu = __perf_pmus__find_by_type(type); 186 return pmu; 187 } 188 189 /* 190 * pmu iterator: If pmu is NULL, we start at the begin, otherwise return the 191 * next pmu. Returns NULL on end. 192 */ 193 struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu) 194 { 195 bool use_core_pmus = !pmu || pmu->is_core; 196 197 if (!pmu) { 198 pmu_read_sysfs(/*core_only=*/false); 199 pmu = list_prepare_entry(pmu, &core_pmus, list); 200 } 201 if (use_core_pmus) { 202 list_for_each_entry_continue(pmu, &core_pmus, list) 203 return pmu; 204 205 pmu = NULL; 206 pmu = list_prepare_entry(pmu, &other_pmus, list); 207 } 208 list_for_each_entry_continue(pmu, &other_pmus, list) 209 return pmu; 210 return NULL; 211 } 212 213 struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu) 214 { 215 if (!pmu) { 216 pmu_read_sysfs(/*core_only=*/true); 217 pmu = list_prepare_entry(pmu, &core_pmus, list); 218 } 219 list_for_each_entry_continue(pmu, &core_pmus, list) 220 return pmu; 221 222 return NULL; 223 } 224 225 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str) 226 { 227 struct perf_pmu *pmu = NULL; 228 229 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 230 if (!strcmp(pmu->name, str)) 231 return pmu; 232 /* Ignore "uncore_" prefix. */ 233 if (!strncmp(pmu->name, "uncore_", 7)) { 234 if (!strcmp(pmu->name + 7, str)) 235 return pmu; 236 } 237 /* Ignore "cpu_" prefix on Intel hybrid PMUs. */ 238 if (!strncmp(pmu->name, "cpu_", 4)) { 239 if (!strcmp(pmu->name + 4, str)) 240 return pmu; 241 } 242 } 243 return NULL; 244 } 245 246 int __weak perf_pmus__num_mem_pmus(void) 247 { 248 /* All core PMUs are for mem events. */ 249 return perf_pmus__num_core_pmus(); 250 } 251 252 /** Struct for ordering events as output in perf list. */ 253 struct sevent { 254 /** PMU for event. */ 255 const struct perf_pmu *pmu; 256 /** 257 * Optional event for name, desc, etc. If not present then this is a 258 * selectable PMU and the event name is shown as "//". 259 */ 260 const struct perf_pmu_alias *event; 261 /** Is the PMU for the CPU? */ 262 bool is_cpu; 263 }; 264 265 static int cmp_sevent(const void *a, const void *b) 266 { 267 const struct sevent *as = a; 268 const struct sevent *bs = b; 269 const char *a_pmu_name = NULL, *b_pmu_name = NULL; 270 const char *a_name = "//", *a_desc = NULL, *a_topic = ""; 271 const char *b_name = "//", *b_desc = NULL, *b_topic = ""; 272 int ret; 273 274 if (as->event) { 275 a_name = as->event->name; 276 a_desc = as->event->desc; 277 a_topic = as->event->topic ?: ""; 278 a_pmu_name = as->event->pmu_name; 279 } 280 if (bs->event) { 281 b_name = bs->event->name; 282 b_desc = bs->event->desc; 283 b_topic = bs->event->topic ?: ""; 284 b_pmu_name = bs->event->pmu_name; 285 } 286 /* Put extra events last. */ 287 if (!!a_desc != !!b_desc) 288 return !!a_desc - !!b_desc; 289 290 /* Order by topics. */ 291 ret = strcmp(a_topic, b_topic); 292 if (ret) 293 return ret; 294 295 /* Order CPU core events to be first */ 296 if (as->is_cpu != bs->is_cpu) 297 return as->is_cpu ? -1 : 1; 298 299 /* Order by PMU name. */ 300 if (as->pmu != bs->pmu) { 301 a_pmu_name = a_pmu_name ?: (as->pmu->name ?: ""); 302 b_pmu_name = b_pmu_name ?: (bs->pmu->name ?: ""); 303 ret = strcmp(a_pmu_name, b_pmu_name); 304 if (ret) 305 return ret; 306 } 307 308 /* Order by event name. */ 309 return strcmp(a_name, b_name); 310 } 311 312 static bool pmu_alias_is_duplicate(struct sevent *alias_a, 313 struct sevent *alias_b) 314 { 315 const char *a_pmu_name = NULL, *b_pmu_name = NULL; 316 const char *a_name = "//", *b_name = "//"; 317 318 319 if (alias_a->event) { 320 a_name = alias_a->event->name; 321 a_pmu_name = alias_a->event->pmu_name; 322 } 323 if (alias_b->event) { 324 b_name = alias_b->event->name; 325 b_pmu_name = alias_b->event->pmu_name; 326 } 327 328 /* Different names -> never duplicates */ 329 if (strcmp(a_name, b_name)) 330 return false; 331 332 /* Don't remove duplicates for different PMUs */ 333 a_pmu_name = a_pmu_name ?: (alias_a->pmu->name ?: ""); 334 b_pmu_name = b_pmu_name ?: (alias_b->pmu->name ?: ""); 335 return strcmp(a_pmu_name, b_pmu_name) == 0; 336 } 337 338 static int sub_non_neg(int a, int b) 339 { 340 if (b > a) 341 return 0; 342 return a - b; 343 } 344 345 static char *format_alias(char *buf, int len, const struct perf_pmu *pmu, 346 const struct perf_pmu_alias *alias) 347 { 348 struct parse_events_term *term; 349 int used = snprintf(buf, len, "%s/%s", pmu->name, alias->name); 350 351 list_for_each_entry(term, &alias->terms, list) { 352 if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) 353 used += snprintf(buf + used, sub_non_neg(len, used), 354 ",%s=%s", term->config, 355 term->val.str); 356 } 357 358 if (sub_non_neg(len, used) > 0) { 359 buf[used] = '/'; 360 used++; 361 } 362 if (sub_non_neg(len, used) > 0) { 363 buf[used] = '\0'; 364 used++; 365 } else 366 buf[len - 1] = '\0'; 367 368 return buf; 369 } 370 371 void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state) 372 { 373 struct perf_pmu *pmu; 374 struct perf_pmu_alias *event; 375 char buf[1024]; 376 int printed = 0; 377 int len, j; 378 struct sevent *aliases; 379 380 pmu = NULL; 381 len = 0; 382 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 383 list_for_each_entry(event, &pmu->aliases, list) 384 len++; 385 if (pmu->selectable) 386 len++; 387 } 388 aliases = zalloc(sizeof(struct sevent) * len); 389 if (!aliases) { 390 pr_err("FATAL: not enough memory to print PMU events\n"); 391 return; 392 } 393 pmu = NULL; 394 j = 0; 395 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 396 bool is_cpu = pmu->is_core; 397 398 list_for_each_entry(event, &pmu->aliases, list) { 399 aliases[j].event = event; 400 aliases[j].pmu = pmu; 401 aliases[j].is_cpu = is_cpu; 402 j++; 403 } 404 if (pmu->selectable) { 405 aliases[j].event = NULL; 406 aliases[j].pmu = pmu; 407 aliases[j].is_cpu = is_cpu; 408 j++; 409 } 410 } 411 len = j; 412 qsort(aliases, len, sizeof(struct sevent), cmp_sevent); 413 for (j = 0; j < len; j++) { 414 const char *name, *alias = NULL, *scale_unit = NULL, 415 *desc = NULL, *long_desc = NULL, 416 *encoding_desc = NULL, *topic = NULL, 417 *pmu_name = NULL; 418 bool deprecated = false; 419 size_t buf_used; 420 421 /* Skip duplicates */ 422 if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1])) 423 continue; 424 425 if (!aliases[j].event) { 426 /* A selectable event. */ 427 pmu_name = aliases[j].pmu->name; 428 buf_used = snprintf(buf, sizeof(buf), "%s//", pmu_name) + 1; 429 name = buf; 430 } else { 431 if (aliases[j].event->desc) { 432 name = aliases[j].event->name; 433 buf_used = 0; 434 } else { 435 name = format_alias(buf, sizeof(buf), aliases[j].pmu, 436 aliases[j].event); 437 if (aliases[j].is_cpu) { 438 alias = name; 439 name = aliases[j].event->name; 440 } 441 buf_used = strlen(buf) + 1; 442 } 443 pmu_name = aliases[j].event->pmu_name ?: (aliases[j].pmu->name ?: ""); 444 if (strlen(aliases[j].event->unit) || aliases[j].event->scale != 1.0) { 445 scale_unit = buf + buf_used; 446 buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used, 447 "%G%s", aliases[j].event->scale, 448 aliases[j].event->unit) + 1; 449 } 450 desc = aliases[j].event->desc; 451 long_desc = aliases[j].event->long_desc; 452 topic = aliases[j].event->topic; 453 encoding_desc = buf + buf_used; 454 buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used, 455 "%s/%s/", pmu_name, aliases[j].event->str) + 1; 456 deprecated = aliases[j].event->deprecated; 457 } 458 print_cb->print_event(print_state, 459 pmu_name, 460 topic, 461 name, 462 alias, 463 scale_unit, 464 deprecated, 465 "Kernel PMU event", 466 desc, 467 long_desc, 468 encoding_desc); 469 } 470 if (printed && pager_in_use()) 471 printf("\n"); 472 473 zfree(&aliases); 474 } 475 476 bool perf_pmus__have_event(const char *pname, const char *name) 477 { 478 struct perf_pmu *pmu = perf_pmus__find(pname); 479 480 return pmu && perf_pmu__have_event(pmu, name); 481 } 482 483 int perf_pmus__num_core_pmus(void) 484 { 485 static int count; 486 487 if (!count) { 488 struct perf_pmu *pmu = NULL; 489 490 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) 491 count++; 492 } 493 return count; 494 } 495 496 static bool __perf_pmus__supports_extended_type(void) 497 { 498 struct perf_pmu *pmu = NULL; 499 500 if (perf_pmus__num_core_pmus() <= 1) 501 return false; 502 503 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 504 if (!is_event_supported(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES | ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT))) 505 return false; 506 } 507 508 return true; 509 } 510 511 static bool perf_pmus__do_support_extended_type; 512 513 static void perf_pmus__init_supports_extended_type(void) 514 { 515 perf_pmus__do_support_extended_type = __perf_pmus__supports_extended_type(); 516 } 517 518 bool perf_pmus__supports_extended_type(void) 519 { 520 static pthread_once_t extended_type_once = PTHREAD_ONCE_INIT; 521 522 pthread_once(&extended_type_once, perf_pmus__init_supports_extended_type); 523 524 return perf_pmus__do_support_extended_type; 525 } 526 527 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel) 528 { 529 struct perf_pmu *pmu = evsel->pmu; 530 531 if (!pmu) { 532 pmu = perf_pmus__find_by_type(evsel->core.attr.type); 533 ((struct evsel *)evsel)->pmu = pmu; 534 } 535 return pmu; 536 } 537