1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2017, Intel Corporation. 4 */ 5 6 /* Manage metrics and groups of metrics from JSON files */ 7 8 #include "metricgroup.h" 9 #include "debug.h" 10 #include "evlist.h" 11 #include "evsel.h" 12 #include "strbuf.h" 13 #include "pmu.h" 14 #include "pmu-hybrid.h" 15 #include "print-events.h" 16 #include "smt.h" 17 #include "expr.h" 18 #include "rblist.h" 19 #include <string.h> 20 #include <errno.h> 21 #include "strlist.h" 22 #include <assert.h> 23 #include <linux/ctype.h> 24 #include <linux/list_sort.h> 25 #include <linux/string.h> 26 #include <linux/zalloc.h> 27 #include <perf/cpumap.h> 28 #include <subcmd/parse-options.h> 29 #include <api/fs/fs.h> 30 #include "util.h" 31 #include <asm/bug.h> 32 #include "cgroup.h" 33 #include "util/hashmap.h" 34 35 struct metric_event *metricgroup__lookup(struct rblist *metric_events, 36 struct evsel *evsel, 37 bool create) 38 { 39 struct rb_node *nd; 40 struct metric_event me = { 41 .evsel = evsel 42 }; 43 44 if (!metric_events) 45 return NULL; 46 47 nd = rblist__find(metric_events, &me); 48 if (nd) 49 return container_of(nd, struct metric_event, nd); 50 if (create) { 51 rblist__add_node(metric_events, &me); 52 nd = rblist__find(metric_events, &me); 53 if (nd) 54 return container_of(nd, struct metric_event, nd); 55 } 56 return NULL; 57 } 58 59 static int metric_event_cmp(struct rb_node *rb_node, const void *entry) 60 { 61 struct metric_event *a = container_of(rb_node, 62 struct metric_event, 63 nd); 64 const struct metric_event *b = entry; 65 66 if (a->evsel == b->evsel) 67 return 0; 68 if ((char *)a->evsel < (char *)b->evsel) 69 return -1; 70 return +1; 71 } 72 73 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused, 74 const void *entry) 75 { 76 struct metric_event *me = malloc(sizeof(struct metric_event)); 77 78 if (!me) 79 return NULL; 80 memcpy(me, entry, sizeof(struct metric_event)); 81 me->evsel = ((struct metric_event *)entry)->evsel; 82 INIT_LIST_HEAD(&me->head); 83 return &me->nd; 84 } 85 86 static void metric_event_delete(struct rblist *rblist __maybe_unused, 87 struct rb_node *rb_node) 88 { 89 struct metric_event *me = container_of(rb_node, struct metric_event, nd); 90 struct metric_expr *expr, *tmp; 91 92 list_for_each_entry_safe(expr, tmp, &me->head, nd) { 93 zfree(&expr->metric_name); 94 zfree(&expr->metric_refs); 95 zfree(&expr->metric_events); 96 free(expr); 97 } 98 99 free(me); 100 } 101 102 static void metricgroup__rblist_init(struct rblist *metric_events) 103 { 104 rblist__init(metric_events); 105 metric_events->node_cmp = metric_event_cmp; 106 metric_events->node_new = metric_event_new; 107 metric_events->node_delete = metric_event_delete; 108 } 109 110 void metricgroup__rblist_exit(struct rblist *metric_events) 111 { 112 rblist__exit(metric_events); 113 } 114 115 /** 116 * The metric under construction. The data held here will be placed in a 117 * metric_expr. 118 */ 119 struct metric { 120 struct list_head nd; 121 /** 122 * The expression parse context importantly holding the IDs contained 123 * within the expression. 124 */ 125 struct expr_parse_ctx *pctx; 126 const char *pmu; 127 /** The name of the metric such as "IPC". */ 128 const char *metric_name; 129 /** Modifier on the metric such as "u" or NULL for none. */ 130 const char *modifier; 131 /** The expression to parse, for example, "instructions/cycles". */ 132 const char *metric_expr; 133 /** Optional threshold expression where zero value is green, otherwise red. */ 134 const char *metric_threshold; 135 /** 136 * The "ScaleUnit" that scales and adds a unit to the metric during 137 * output. 138 */ 139 const char *metric_unit; 140 /** Optional null terminated array of referenced metrics. */ 141 struct metric_ref *metric_refs; 142 /** 143 * Should events of the metric be grouped? 144 */ 145 bool group_events; 146 /** 147 * Parsed events for the metric. Optional as events may be taken from a 148 * different metric whose group contains all the IDs necessary for this 149 * one. 150 */ 151 struct evlist *evlist; 152 }; 153 154 static void metric__watchdog_constraint_hint(const char *name, bool foot) 155 { 156 static bool violate_nmi_constraint; 157 158 if (!foot) { 159 pr_warning("Not grouping metric %s's events.\n", name); 160 violate_nmi_constraint = true; 161 return; 162 } 163 164 if (!violate_nmi_constraint) 165 return; 166 167 pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n" 168 " echo 0 > /proc/sys/kernel/nmi_watchdog\n" 169 " perf stat ...\n" 170 " echo 1 > /proc/sys/kernel/nmi_watchdog\n"); 171 } 172 173 static bool metric__group_events(const struct pmu_metric *pm) 174 { 175 switch (pm->event_grouping) { 176 case MetricNoGroupEvents: 177 return false; 178 case MetricNoGroupEventsNmi: 179 if (!sysctl__nmi_watchdog_enabled()) 180 return true; 181 metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false); 182 return false; 183 case MetricNoGroupEventsSmt: 184 return !smt_on(); 185 case MetricGroupEvents: 186 default: 187 return true; 188 } 189 } 190 191 static void metric__free(struct metric *m) 192 { 193 if (!m) 194 return; 195 196 zfree(&m->metric_refs); 197 expr__ctx_free(m->pctx); 198 zfree(&m->modifier); 199 evlist__delete(m->evlist); 200 free(m); 201 } 202 203 static struct metric *metric__new(const struct pmu_metric *pm, 204 const char *modifier, 205 bool metric_no_group, 206 int runtime, 207 const char *user_requested_cpu_list, 208 bool system_wide) 209 { 210 struct metric *m; 211 212 m = zalloc(sizeof(*m)); 213 if (!m) 214 return NULL; 215 216 m->pctx = expr__ctx_new(); 217 if (!m->pctx) 218 goto out_err; 219 220 m->pmu = pm->pmu ?: "cpu"; 221 m->metric_name = pm->metric_name; 222 m->modifier = NULL; 223 if (modifier) { 224 m->modifier = strdup(modifier); 225 if (!m->modifier) 226 goto out_err; 227 } 228 m->metric_expr = pm->metric_expr; 229 m->metric_threshold = pm->metric_threshold; 230 m->metric_unit = pm->unit; 231 m->pctx->sctx.user_requested_cpu_list = NULL; 232 if (user_requested_cpu_list) { 233 m->pctx->sctx.user_requested_cpu_list = strdup(user_requested_cpu_list); 234 if (!m->pctx->sctx.user_requested_cpu_list) 235 goto out_err; 236 } 237 m->pctx->sctx.runtime = runtime; 238 m->pctx->sctx.system_wide = system_wide; 239 m->group_events = !metric_no_group && metric__group_events(pm); 240 m->metric_refs = NULL; 241 m->evlist = NULL; 242 243 return m; 244 out_err: 245 metric__free(m); 246 return NULL; 247 } 248 249 static bool contains_metric_id(struct evsel **metric_events, int num_events, 250 const char *metric_id) 251 { 252 int i; 253 254 for (i = 0; i < num_events; i++) { 255 if (!strcmp(evsel__metric_id(metric_events[i]), metric_id)) 256 return true; 257 } 258 return false; 259 } 260 261 /** 262 * setup_metric_events - Find a group of events in metric_evlist that correspond 263 * to the IDs from a parsed metric expression. 264 * @pmu: The PMU for the IDs. 265 * @ids: the metric IDs to match. 266 * @metric_evlist: the list of perf events. 267 * @out_metric_events: holds the created metric events array. 268 */ 269 static int setup_metric_events(const char *pmu, struct hashmap *ids, 270 struct evlist *metric_evlist, 271 struct evsel ***out_metric_events) 272 { 273 struct evsel **metric_events; 274 const char *metric_id; 275 struct evsel *ev; 276 size_t ids_size, matched_events, i; 277 bool all_pmus = !strcmp(pmu, "all") || !perf_pmu__is_hybrid(pmu); 278 279 *out_metric_events = NULL; 280 ids_size = hashmap__size(ids); 281 282 metric_events = calloc(sizeof(void *), ids_size + 1); 283 if (!metric_events) 284 return -ENOMEM; 285 286 matched_events = 0; 287 evlist__for_each_entry(metric_evlist, ev) { 288 struct expr_id_data *val_ptr; 289 290 /* Don't match events for the wrong hybrid PMU. */ 291 if (!all_pmus && ev->pmu_name && 292 perf_pmu__is_hybrid(ev->pmu_name) && 293 strcmp(ev->pmu_name, pmu)) 294 continue; 295 /* 296 * Check for duplicate events with the same name. For 297 * example, uncore_imc/cas_count_read/ will turn into 6 298 * events per socket on skylakex. Only the first such 299 * event is placed in metric_events. 300 */ 301 metric_id = evsel__metric_id(ev); 302 if (contains_metric_id(metric_events, matched_events, metric_id)) 303 continue; 304 /* 305 * Does this event belong to the parse context? For 306 * combined or shared groups, this metric may not care 307 * about this event. 308 */ 309 if (hashmap__find(ids, metric_id, &val_ptr)) { 310 pr_debug("Matched metric-id %s to %s\n", metric_id, evsel__name(ev)); 311 metric_events[matched_events++] = ev; 312 313 if (matched_events >= ids_size) 314 break; 315 } 316 } 317 if (matched_events < ids_size) { 318 free(metric_events); 319 return -EINVAL; 320 } 321 for (i = 0; i < ids_size; i++) { 322 ev = metric_events[i]; 323 ev->collect_stat = true; 324 325 /* 326 * The metric leader points to the identically named 327 * event in metric_events. 328 */ 329 ev->metric_leader = ev; 330 /* 331 * Mark two events with identical names in the same 332 * group (or globally) as being in use as uncore events 333 * may be duplicated for each pmu. Set the metric leader 334 * of such events to be the event that appears in 335 * metric_events. 336 */ 337 metric_id = evsel__metric_id(ev); 338 evlist__for_each_entry_continue(metric_evlist, ev) { 339 if (!strcmp(evsel__metric_id(ev), metric_id)) 340 ev->metric_leader = metric_events[i]; 341 } 342 } 343 *out_metric_events = metric_events; 344 return 0; 345 } 346 347 static bool match_metric(const char *n, const char *list) 348 { 349 int len; 350 char *m; 351 352 if (!list) 353 return false; 354 if (!strcmp(list, "all")) 355 return true; 356 if (!n) 357 return !strcasecmp(list, "No_group"); 358 len = strlen(list); 359 m = strcasestr(n, list); 360 if (!m) 361 return false; 362 if ((m == n || m[-1] == ';' || m[-1] == ' ') && 363 (m[len] == 0 || m[len] == ';')) 364 return true; 365 return false; 366 } 367 368 static bool match_pm_metric(const struct pmu_metric *pm, const char *pmu, const char *metric) 369 { 370 const char *pm_pmu = pm->pmu ?: "cpu"; 371 372 if (strcmp(pmu, "all") && strcmp(pm_pmu, pmu)) 373 return false; 374 375 return match_metric(pm->metric_group, metric) || 376 match_metric(pm->metric_name, metric); 377 } 378 379 /** struct mep - RB-tree node for building printing information. */ 380 struct mep { 381 /** nd - RB-tree element. */ 382 struct rb_node nd; 383 /** @metric_group: Owned metric group name, separated others with ';'. */ 384 char *metric_group; 385 const char *metric_name; 386 const char *metric_desc; 387 const char *metric_long_desc; 388 const char *metric_expr; 389 const char *metric_threshold; 390 const char *metric_unit; 391 }; 392 393 static int mep_cmp(struct rb_node *rb_node, const void *entry) 394 { 395 struct mep *a = container_of(rb_node, struct mep, nd); 396 struct mep *b = (struct mep *)entry; 397 int ret; 398 399 ret = strcmp(a->metric_group, b->metric_group); 400 if (ret) 401 return ret; 402 403 return strcmp(a->metric_name, b->metric_name); 404 } 405 406 static struct rb_node *mep_new(struct rblist *rl __maybe_unused, const void *entry) 407 { 408 struct mep *me = malloc(sizeof(struct mep)); 409 410 if (!me) 411 return NULL; 412 413 memcpy(me, entry, sizeof(struct mep)); 414 return &me->nd; 415 } 416 417 static void mep_delete(struct rblist *rl __maybe_unused, 418 struct rb_node *nd) 419 { 420 struct mep *me = container_of(nd, struct mep, nd); 421 422 zfree(&me->metric_group); 423 free(me); 424 } 425 426 static struct mep *mep_lookup(struct rblist *groups, const char *metric_group, 427 const char *metric_name) 428 { 429 struct rb_node *nd; 430 struct mep me = { 431 .metric_group = strdup(metric_group), 432 .metric_name = metric_name, 433 }; 434 nd = rblist__find(groups, &me); 435 if (nd) { 436 free(me.metric_group); 437 return container_of(nd, struct mep, nd); 438 } 439 rblist__add_node(groups, &me); 440 nd = rblist__find(groups, &me); 441 if (nd) 442 return container_of(nd, struct mep, nd); 443 return NULL; 444 } 445 446 static int metricgroup__add_to_mep_groups(const struct pmu_metric *pm, 447 struct rblist *groups) 448 { 449 const char *g; 450 char *omg, *mg; 451 452 mg = strdup(pm->metric_group ?: "No_group"); 453 if (!mg) 454 return -ENOMEM; 455 omg = mg; 456 while ((g = strsep(&mg, ";")) != NULL) { 457 struct mep *me; 458 459 g = skip_spaces(g); 460 if (strlen(g)) 461 me = mep_lookup(groups, g, pm->metric_name); 462 else 463 me = mep_lookup(groups, "No_group", pm->metric_name); 464 465 if (me) { 466 me->metric_desc = pm->desc; 467 me->metric_long_desc = pm->long_desc; 468 me->metric_expr = pm->metric_expr; 469 me->metric_threshold = pm->metric_threshold; 470 me->metric_unit = pm->unit; 471 } 472 } 473 free(omg); 474 475 return 0; 476 } 477 478 struct metricgroup_iter_data { 479 pmu_metric_iter_fn fn; 480 void *data; 481 }; 482 483 static int metricgroup__sys_event_iter(const struct pmu_metric *pm, 484 const struct pmu_metrics_table *table, 485 void *data) 486 { 487 struct metricgroup_iter_data *d = data; 488 struct perf_pmu *pmu = NULL; 489 490 if (!pm->metric_expr || !pm->compat) 491 return 0; 492 493 while ((pmu = perf_pmu__scan(pmu))) { 494 495 if (!pmu->id || strcmp(pmu->id, pm->compat)) 496 continue; 497 498 return d->fn(pm, table, d->data); 499 } 500 return 0; 501 } 502 503 static int metricgroup__add_to_mep_groups_callback(const struct pmu_metric *pm, 504 const struct pmu_metrics_table *table __maybe_unused, 505 void *vdata) 506 { 507 struct rblist *groups = vdata; 508 509 return metricgroup__add_to_mep_groups(pm, groups); 510 } 511 512 void metricgroup__print(const struct print_callbacks *print_cb, void *print_state) 513 { 514 struct rblist groups; 515 const struct pmu_metrics_table *table; 516 struct rb_node *node, *next; 517 518 rblist__init(&groups); 519 groups.node_new = mep_new; 520 groups.node_cmp = mep_cmp; 521 groups.node_delete = mep_delete; 522 table = pmu_metrics_table__find(); 523 if (table) { 524 pmu_metrics_table_for_each_metric(table, 525 metricgroup__add_to_mep_groups_callback, 526 &groups); 527 } 528 { 529 struct metricgroup_iter_data data = { 530 .fn = metricgroup__add_to_mep_groups_callback, 531 .data = &groups, 532 }; 533 pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data); 534 } 535 536 for (node = rb_first_cached(&groups.entries); node; node = next) { 537 struct mep *me = container_of(node, struct mep, nd); 538 539 print_cb->print_metric(print_state, 540 me->metric_group, 541 me->metric_name, 542 me->metric_desc, 543 me->metric_long_desc, 544 me->metric_expr, 545 me->metric_threshold, 546 me->metric_unit); 547 next = rb_next(node); 548 rblist__remove_node(&groups, node); 549 } 550 } 551 552 static const char *code_characters = ",-=@"; 553 554 static int encode_metric_id(struct strbuf *sb, const char *x) 555 { 556 char *c; 557 int ret = 0; 558 559 for (; *x; x++) { 560 c = strchr(code_characters, *x); 561 if (c) { 562 ret = strbuf_addch(sb, '!'); 563 if (ret) 564 break; 565 566 ret = strbuf_addch(sb, '0' + (c - code_characters)); 567 if (ret) 568 break; 569 } else { 570 ret = strbuf_addch(sb, *x); 571 if (ret) 572 break; 573 } 574 } 575 return ret; 576 } 577 578 static int decode_metric_id(struct strbuf *sb, const char *x) 579 { 580 const char *orig = x; 581 size_t i; 582 char c; 583 int ret; 584 585 for (; *x; x++) { 586 c = *x; 587 if (*x == '!') { 588 x++; 589 i = *x - '0'; 590 if (i > strlen(code_characters)) { 591 pr_err("Bad metric-id encoding in: '%s'", orig); 592 return -1; 593 } 594 c = code_characters[i]; 595 } 596 ret = strbuf_addch(sb, c); 597 if (ret) 598 return ret; 599 } 600 return 0; 601 } 602 603 static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier) 604 { 605 struct evsel *ev; 606 struct strbuf sb = STRBUF_INIT; 607 char *cur; 608 int ret = 0; 609 610 evlist__for_each_entry(perf_evlist, ev) { 611 if (!ev->metric_id) 612 continue; 613 614 ret = strbuf_setlen(&sb, 0); 615 if (ret) 616 break; 617 618 ret = decode_metric_id(&sb, ev->metric_id); 619 if (ret) 620 break; 621 622 free((char *)ev->metric_id); 623 ev->metric_id = strdup(sb.buf); 624 if (!ev->metric_id) { 625 ret = -ENOMEM; 626 break; 627 } 628 /* 629 * If the name is just the parsed event, use the metric-id to 630 * give a more friendly display version. 631 */ 632 if (strstr(ev->name, "metric-id=")) { 633 bool has_slash = false; 634 635 zfree(&ev->name); 636 for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) { 637 *cur = '/'; 638 has_slash = true; 639 } 640 641 if (modifier) { 642 if (!has_slash && !strchr(sb.buf, ':')) { 643 ret = strbuf_addch(&sb, ':'); 644 if (ret) 645 break; 646 } 647 ret = strbuf_addstr(&sb, modifier); 648 if (ret) 649 break; 650 } 651 ev->name = strdup(sb.buf); 652 if (!ev->name) { 653 ret = -ENOMEM; 654 break; 655 } 656 } 657 } 658 strbuf_release(&sb); 659 return ret; 660 } 661 662 static int metricgroup__build_event_string(struct strbuf *events, 663 const struct expr_parse_ctx *ctx, 664 const char *modifier, 665 bool group_events) 666 { 667 struct hashmap_entry *cur; 668 size_t bkt; 669 bool no_group = true, has_tool_events = false; 670 bool tool_events[PERF_TOOL_MAX] = {false}; 671 int ret = 0; 672 673 #define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0) 674 675 hashmap__for_each_entry(ctx->ids, cur, bkt) { 676 const char *sep, *rsep, *id = cur->pkey; 677 enum perf_tool_event ev; 678 679 pr_debug("found event %s\n", id); 680 681 /* Always move tool events outside of the group. */ 682 ev = perf_tool_event__from_str(id); 683 if (ev != PERF_TOOL_NONE) { 684 has_tool_events = true; 685 tool_events[ev] = true; 686 continue; 687 } 688 /* Separate events with commas and open the group if necessary. */ 689 if (no_group) { 690 if (group_events) { 691 ret = strbuf_addch(events, '{'); 692 RETURN_IF_NON_ZERO(ret); 693 } 694 695 no_group = false; 696 } else { 697 ret = strbuf_addch(events, ','); 698 RETURN_IF_NON_ZERO(ret); 699 } 700 /* 701 * Encode the ID as an event string. Add a qualifier for 702 * metric_id that is the original name except with characters 703 * that parse-events can't parse replaced. For example, 704 * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/ 705 */ 706 sep = strchr(id, '@'); 707 if (sep != NULL) { 708 ret = strbuf_add(events, id, sep - id); 709 RETURN_IF_NON_ZERO(ret); 710 ret = strbuf_addch(events, '/'); 711 RETURN_IF_NON_ZERO(ret); 712 rsep = strrchr(sep, '@'); 713 ret = strbuf_add(events, sep + 1, rsep - sep - 1); 714 RETURN_IF_NON_ZERO(ret); 715 ret = strbuf_addstr(events, ",metric-id="); 716 RETURN_IF_NON_ZERO(ret); 717 sep = rsep; 718 } else { 719 sep = strchr(id, ':'); 720 if (sep != NULL) { 721 ret = strbuf_add(events, id, sep - id); 722 RETURN_IF_NON_ZERO(ret); 723 } else { 724 ret = strbuf_addstr(events, id); 725 RETURN_IF_NON_ZERO(ret); 726 } 727 ret = strbuf_addstr(events, "/metric-id="); 728 RETURN_IF_NON_ZERO(ret); 729 } 730 ret = encode_metric_id(events, id); 731 RETURN_IF_NON_ZERO(ret); 732 ret = strbuf_addstr(events, "/"); 733 RETURN_IF_NON_ZERO(ret); 734 735 if (sep != NULL) { 736 ret = strbuf_addstr(events, sep + 1); 737 RETURN_IF_NON_ZERO(ret); 738 } 739 if (modifier) { 740 ret = strbuf_addstr(events, modifier); 741 RETURN_IF_NON_ZERO(ret); 742 } 743 } 744 if (!no_group && group_events) { 745 ret = strbuf_addf(events, "}:W"); 746 RETURN_IF_NON_ZERO(ret); 747 } 748 if (has_tool_events) { 749 int i; 750 751 perf_tool_event__for_each_event(i) { 752 if (tool_events[i]) { 753 if (!no_group) { 754 ret = strbuf_addch(events, ','); 755 RETURN_IF_NON_ZERO(ret); 756 } 757 no_group = false; 758 ret = strbuf_addstr(events, perf_tool_event__to_str(i)); 759 RETURN_IF_NON_ZERO(ret); 760 } 761 } 762 } 763 764 return ret; 765 #undef RETURN_IF_NON_ZERO 766 } 767 768 int __weak arch_get_runtimeparam(const struct pmu_metric *pm __maybe_unused) 769 { 770 return 1; 771 } 772 773 /* 774 * A singly linked list on the stack of the names of metrics being 775 * processed. Used to identify recursion. 776 */ 777 struct visited_metric { 778 const char *name; 779 const struct visited_metric *parent; 780 }; 781 782 struct metricgroup_add_iter_data { 783 struct list_head *metric_list; 784 const char *pmu; 785 const char *metric_name; 786 const char *modifier; 787 int *ret; 788 bool *has_match; 789 bool metric_no_group; 790 bool metric_no_threshold; 791 const char *user_requested_cpu_list; 792 bool system_wide; 793 struct metric *root_metric; 794 const struct visited_metric *visited; 795 const struct pmu_metrics_table *table; 796 }; 797 798 static bool metricgroup__find_metric(const char *pmu, 799 const char *metric, 800 const struct pmu_metrics_table *table, 801 struct pmu_metric *pm); 802 803 static int add_metric(struct list_head *metric_list, 804 const struct pmu_metric *pm, 805 const char *modifier, 806 bool metric_no_group, 807 bool metric_no_threshold, 808 const char *user_requested_cpu_list, 809 bool system_wide, 810 struct metric *root_metric, 811 const struct visited_metric *visited, 812 const struct pmu_metrics_table *table); 813 814 /** 815 * resolve_metric - Locate metrics within the root metric and recursively add 816 * references to them. 817 * @metric_list: The list the metric is added to. 818 * @pmu: The PMU name to resolve metrics on, or "all" for all PMUs. 819 * @modifier: if non-null event modifiers like "u". 820 * @metric_no_group: Should events written to events be grouped "{}" or 821 * global. Grouping is the default but due to multiplexing the 822 * user may override. 823 * @user_requested_cpu_list: Command line specified CPUs to record on. 824 * @system_wide: Are events for all processes recorded. 825 * @root_metric: Metrics may reference other metrics to form a tree. In this 826 * case the root_metric holds all the IDs and a list of referenced 827 * metrics. When adding a root this argument is NULL. 828 * @visited: A singly linked list of metric names being added that is used to 829 * detect recursion. 830 * @table: The table that is searched for metrics, most commonly the table for the 831 * architecture perf is running upon. 832 */ 833 static int resolve_metric(struct list_head *metric_list, 834 const char *pmu, 835 const char *modifier, 836 bool metric_no_group, 837 bool metric_no_threshold, 838 const char *user_requested_cpu_list, 839 bool system_wide, 840 struct metric *root_metric, 841 const struct visited_metric *visited, 842 const struct pmu_metrics_table *table) 843 { 844 struct hashmap_entry *cur; 845 size_t bkt; 846 struct to_resolve { 847 /* The metric to resolve. */ 848 struct pmu_metric pm; 849 /* 850 * The key in the IDs map, this may differ from in case, 851 * etc. from pm->metric_name. 852 */ 853 const char *key; 854 } *pending = NULL; 855 int i, ret = 0, pending_cnt = 0; 856 857 /* 858 * Iterate all the parsed IDs and if there's a matching metric and it to 859 * the pending array. 860 */ 861 hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) { 862 struct pmu_metric pm; 863 864 if (metricgroup__find_metric(pmu, cur->pkey, table, &pm)) { 865 pending = realloc(pending, 866 (pending_cnt + 1) * sizeof(struct to_resolve)); 867 if (!pending) 868 return -ENOMEM; 869 870 memcpy(&pending[pending_cnt].pm, &pm, sizeof(pm)); 871 pending[pending_cnt].key = cur->pkey; 872 pending_cnt++; 873 } 874 } 875 876 /* Remove the metric IDs from the context. */ 877 for (i = 0; i < pending_cnt; i++) 878 expr__del_id(root_metric->pctx, pending[i].key); 879 880 /* 881 * Recursively add all the metrics, IDs are added to the root metric's 882 * context. 883 */ 884 for (i = 0; i < pending_cnt; i++) { 885 ret = add_metric(metric_list, &pending[i].pm, modifier, metric_no_group, 886 metric_no_threshold, user_requested_cpu_list, system_wide, 887 root_metric, visited, table); 888 if (ret) 889 break; 890 } 891 892 free(pending); 893 return ret; 894 } 895 896 /** 897 * __add_metric - Add a metric to metric_list. 898 * @metric_list: The list the metric is added to. 899 * @pm: The pmu_metric containing the metric to be added. 900 * @modifier: if non-null event modifiers like "u". 901 * @metric_no_group: Should events written to events be grouped "{}" or 902 * global. Grouping is the default but due to multiplexing the 903 * user may override. 904 * @metric_no_threshold: Should threshold expressions be ignored? 905 * @runtime: A special argument for the parser only known at runtime. 906 * @user_requested_cpu_list: Command line specified CPUs to record on. 907 * @system_wide: Are events for all processes recorded. 908 * @root_metric: Metrics may reference other metrics to form a tree. In this 909 * case the root_metric holds all the IDs and a list of referenced 910 * metrics. When adding a root this argument is NULL. 911 * @visited: A singly linked list of metric names being added that is used to 912 * detect recursion. 913 * @table: The table that is searched for metrics, most commonly the table for the 914 * architecture perf is running upon. 915 */ 916 static int __add_metric(struct list_head *metric_list, 917 const struct pmu_metric *pm, 918 const char *modifier, 919 bool metric_no_group, 920 bool metric_no_threshold, 921 int runtime, 922 const char *user_requested_cpu_list, 923 bool system_wide, 924 struct metric *root_metric, 925 const struct visited_metric *visited, 926 const struct pmu_metrics_table *table) 927 { 928 const struct visited_metric *vm; 929 int ret; 930 bool is_root = !root_metric; 931 const char *expr; 932 struct visited_metric visited_node = { 933 .name = pm->metric_name, 934 .parent = visited, 935 }; 936 937 for (vm = visited; vm; vm = vm->parent) { 938 if (!strcmp(pm->metric_name, vm->name)) { 939 pr_err("failed: recursion detected for %s\n", pm->metric_name); 940 return -1; 941 } 942 } 943 944 if (is_root) { 945 /* 946 * This metric is the root of a tree and may reference other 947 * metrics that are added recursively. 948 */ 949 root_metric = metric__new(pm, modifier, metric_no_group, runtime, 950 user_requested_cpu_list, system_wide); 951 if (!root_metric) 952 return -ENOMEM; 953 954 } else { 955 int cnt = 0; 956 957 /* 958 * This metric was referenced in a metric higher in the 959 * tree. Check if the same metric is already resolved in the 960 * metric_refs list. 961 */ 962 if (root_metric->metric_refs) { 963 for (; root_metric->metric_refs[cnt].metric_name; cnt++) { 964 if (!strcmp(pm->metric_name, 965 root_metric->metric_refs[cnt].metric_name)) 966 return 0; 967 } 968 } 969 970 /* Create reference. Need space for the entry and the terminator. */ 971 root_metric->metric_refs = realloc(root_metric->metric_refs, 972 (cnt + 2) * sizeof(struct metric_ref)); 973 if (!root_metric->metric_refs) 974 return -ENOMEM; 975 976 /* 977 * Intentionally passing just const char pointers, 978 * from 'pe' object, so they never go away. We don't 979 * need to change them, so there's no need to create 980 * our own copy. 981 */ 982 root_metric->metric_refs[cnt].metric_name = pm->metric_name; 983 root_metric->metric_refs[cnt].metric_expr = pm->metric_expr; 984 985 /* Null terminate array. */ 986 root_metric->metric_refs[cnt+1].metric_name = NULL; 987 root_metric->metric_refs[cnt+1].metric_expr = NULL; 988 } 989 990 /* 991 * For both the parent and referenced metrics, we parse 992 * all the metric's IDs and add it to the root context. 993 */ 994 ret = 0; 995 expr = pm->metric_expr; 996 if (is_root && pm->metric_threshold) { 997 /* 998 * Threshold expressions are built off the actual metric. Switch 999 * to use that in case of additional necessary events. Change 1000 * the visited node name to avoid this being flagged as 1001 * recursion. If the threshold events are disabled, just use the 1002 * metric's name as a reference. This allows metric threshold 1003 * computation if there are sufficient events. 1004 */ 1005 assert(strstr(pm->metric_threshold, pm->metric_name)); 1006 expr = metric_no_threshold ? pm->metric_name : pm->metric_threshold; 1007 visited_node.name = "__threshold__"; 1008 } 1009 if (expr__find_ids(expr, NULL, root_metric->pctx) < 0) { 1010 /* Broken metric. */ 1011 ret = -EINVAL; 1012 } 1013 if (!ret) { 1014 /* Resolve referenced metrics. */ 1015 const char *pmu = pm->pmu ?: "cpu"; 1016 1017 ret = resolve_metric(metric_list, pmu, modifier, metric_no_group, 1018 metric_no_threshold, user_requested_cpu_list, 1019 system_wide, root_metric, &visited_node, 1020 table); 1021 } 1022 if (ret) { 1023 if (is_root) 1024 metric__free(root_metric); 1025 1026 } else if (is_root) 1027 list_add(&root_metric->nd, metric_list); 1028 1029 return ret; 1030 } 1031 1032 struct metricgroup__find_metric_data { 1033 const char *pmu; 1034 const char *metric; 1035 struct pmu_metric *pm; 1036 }; 1037 1038 static int metricgroup__find_metric_callback(const struct pmu_metric *pm, 1039 const struct pmu_metrics_table *table __maybe_unused, 1040 void *vdata) 1041 { 1042 struct metricgroup__find_metric_data *data = vdata; 1043 const char *pm_pmu = pm->pmu ?: "cpu"; 1044 1045 if (strcmp(data->pmu, "all") && strcmp(pm_pmu, data->pmu)) 1046 return 0; 1047 1048 if (!match_metric(pm->metric_name, data->metric)) 1049 return 0; 1050 1051 memcpy(data->pm, pm, sizeof(*pm)); 1052 return 1; 1053 } 1054 1055 static bool metricgroup__find_metric(const char *pmu, 1056 const char *metric, 1057 const struct pmu_metrics_table *table, 1058 struct pmu_metric *pm) 1059 { 1060 struct metricgroup__find_metric_data data = { 1061 .pmu = pmu, 1062 .metric = metric, 1063 .pm = pm, 1064 }; 1065 1066 return pmu_metrics_table_for_each_metric(table, metricgroup__find_metric_callback, &data) 1067 ? true : false; 1068 } 1069 1070 static int add_metric(struct list_head *metric_list, 1071 const struct pmu_metric *pm, 1072 const char *modifier, 1073 bool metric_no_group, 1074 bool metric_no_threshold, 1075 const char *user_requested_cpu_list, 1076 bool system_wide, 1077 struct metric *root_metric, 1078 const struct visited_metric *visited, 1079 const struct pmu_metrics_table *table) 1080 { 1081 int ret = 0; 1082 1083 pr_debug("metric expr %s for %s\n", pm->metric_expr, pm->metric_name); 1084 1085 if (!strstr(pm->metric_expr, "?")) { 1086 ret = __add_metric(metric_list, pm, modifier, metric_no_group, 1087 metric_no_threshold, 0, user_requested_cpu_list, 1088 system_wide, root_metric, visited, table); 1089 } else { 1090 int j, count; 1091 1092 count = arch_get_runtimeparam(pm); 1093 1094 /* This loop is added to create multiple 1095 * events depend on count value and add 1096 * those events to metric_list. 1097 */ 1098 1099 for (j = 0; j < count && !ret; j++) 1100 ret = __add_metric(metric_list, pm, modifier, metric_no_group, 1101 metric_no_threshold, j, user_requested_cpu_list, 1102 system_wide, root_metric, visited, table); 1103 } 1104 1105 return ret; 1106 } 1107 1108 static int metricgroup__add_metric_sys_event_iter(const struct pmu_metric *pm, 1109 const struct pmu_metrics_table *table __maybe_unused, 1110 void *data) 1111 { 1112 struct metricgroup_add_iter_data *d = data; 1113 int ret; 1114 1115 if (!match_pm_metric(pm, d->pmu, d->metric_name)) 1116 return 0; 1117 1118 ret = add_metric(d->metric_list, pm, d->modifier, d->metric_no_group, 1119 d->metric_no_threshold, d->user_requested_cpu_list, 1120 d->system_wide, d->root_metric, d->visited, d->table); 1121 if (ret) 1122 goto out; 1123 1124 *(d->has_match) = true; 1125 1126 out: 1127 *(d->ret) = ret; 1128 return ret; 1129 } 1130 1131 /** 1132 * metric_list_cmp - list_sort comparator that sorts metrics with more events to 1133 * the front. tool events are excluded from the count. 1134 */ 1135 static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l, 1136 const struct list_head *r) 1137 { 1138 const struct metric *left = container_of(l, struct metric, nd); 1139 const struct metric *right = container_of(r, struct metric, nd); 1140 struct expr_id_data *data; 1141 int i, left_count, right_count; 1142 1143 left_count = hashmap__size(left->pctx->ids); 1144 perf_tool_event__for_each_event(i) { 1145 if (!expr__get_id(left->pctx, perf_tool_event__to_str(i), &data)) 1146 left_count--; 1147 } 1148 1149 right_count = hashmap__size(right->pctx->ids); 1150 perf_tool_event__for_each_event(i) { 1151 if (!expr__get_id(right->pctx, perf_tool_event__to_str(i), &data)) 1152 right_count--; 1153 } 1154 1155 return right_count - left_count; 1156 } 1157 1158 struct metricgroup__add_metric_data { 1159 struct list_head *list; 1160 const char *pmu; 1161 const char *metric_name; 1162 const char *modifier; 1163 const char *user_requested_cpu_list; 1164 bool metric_no_group; 1165 bool metric_no_threshold; 1166 bool system_wide; 1167 bool has_match; 1168 }; 1169 1170 static int metricgroup__add_metric_callback(const struct pmu_metric *pm, 1171 const struct pmu_metrics_table *table, 1172 void *vdata) 1173 { 1174 struct metricgroup__add_metric_data *data = vdata; 1175 int ret = 0; 1176 1177 if (pm->metric_expr && match_pm_metric(pm, data->pmu, data->metric_name)) { 1178 bool metric_no_group = data->metric_no_group || 1179 match_metric(data->metric_name, pm->metricgroup_no_group); 1180 1181 data->has_match = true; 1182 ret = add_metric(data->list, pm, data->modifier, metric_no_group, 1183 data->metric_no_threshold, data->user_requested_cpu_list, 1184 data->system_wide, /*root_metric=*/NULL, 1185 /*visited_metrics=*/NULL, table); 1186 } 1187 return ret; 1188 } 1189 1190 /** 1191 * metricgroup__add_metric - Find and add a metric, or a metric group. 1192 * @pmu: The PMU name to search for metrics on, or "all" for all PMUs. 1193 * @metric_name: The name of the metric or metric group. For example, "IPC" 1194 * could be the name of a metric and "TopDownL1" the name of a 1195 * metric group. 1196 * @modifier: if non-null event modifiers like "u". 1197 * @metric_no_group: Should events written to events be grouped "{}" or 1198 * global. Grouping is the default but due to multiplexing the 1199 * user may override. 1200 * @user_requested_cpu_list: Command line specified CPUs to record on. 1201 * @system_wide: Are events for all processes recorded. 1202 * @metric_list: The list that the metric or metric group are added to. 1203 * @table: The table that is searched for metrics, most commonly the table for the 1204 * architecture perf is running upon. 1205 */ 1206 static int metricgroup__add_metric(const char *pmu, const char *metric_name, const char *modifier, 1207 bool metric_no_group, bool metric_no_threshold, 1208 const char *user_requested_cpu_list, 1209 bool system_wide, 1210 struct list_head *metric_list, 1211 const struct pmu_metrics_table *table) 1212 { 1213 LIST_HEAD(list); 1214 int ret; 1215 bool has_match = false; 1216 1217 { 1218 struct metricgroup__add_metric_data data = { 1219 .list = &list, 1220 .pmu = pmu, 1221 .metric_name = metric_name, 1222 .modifier = modifier, 1223 .metric_no_group = metric_no_group, 1224 .metric_no_threshold = metric_no_threshold, 1225 .user_requested_cpu_list = user_requested_cpu_list, 1226 .system_wide = system_wide, 1227 .has_match = false, 1228 }; 1229 /* 1230 * Iterate over all metrics seeing if metric matches either the 1231 * name or group. When it does add the metric to the list. 1232 */ 1233 ret = pmu_metrics_table_for_each_metric(table, metricgroup__add_metric_callback, 1234 &data); 1235 if (ret) 1236 goto out; 1237 1238 has_match = data.has_match; 1239 } 1240 { 1241 struct metricgroup_iter_data data = { 1242 .fn = metricgroup__add_metric_sys_event_iter, 1243 .data = (void *) &(struct metricgroup_add_iter_data) { 1244 .metric_list = &list, 1245 .pmu = pmu, 1246 .metric_name = metric_name, 1247 .modifier = modifier, 1248 .metric_no_group = metric_no_group, 1249 .user_requested_cpu_list = user_requested_cpu_list, 1250 .system_wide = system_wide, 1251 .has_match = &has_match, 1252 .ret = &ret, 1253 .table = table, 1254 }, 1255 }; 1256 1257 pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data); 1258 } 1259 /* End of pmu events. */ 1260 if (!has_match) 1261 ret = -EINVAL; 1262 1263 out: 1264 /* 1265 * add to metric_list so that they can be released 1266 * even if it's failed 1267 */ 1268 list_splice(&list, metric_list); 1269 return ret; 1270 } 1271 1272 /** 1273 * metricgroup__add_metric_list - Find and add metrics, or metric groups, 1274 * specified in a list. 1275 * @pmu: A pmu to restrict the metrics to, or "all" for all PMUS. 1276 * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1" 1277 * would match the IPC and CPI metrics, and TopDownL1 would match all 1278 * the metrics in the TopDownL1 group. 1279 * @metric_no_group: Should events written to events be grouped "{}" or 1280 * global. Grouping is the default but due to multiplexing the 1281 * user may override. 1282 * @user_requested_cpu_list: Command line specified CPUs to record on. 1283 * @system_wide: Are events for all processes recorded. 1284 * @metric_list: The list that metrics are added to. 1285 * @table: The table that is searched for metrics, most commonly the table for the 1286 * architecture perf is running upon. 1287 */ 1288 static int metricgroup__add_metric_list(const char *pmu, const char *list, 1289 bool metric_no_group, 1290 bool metric_no_threshold, 1291 const char *user_requested_cpu_list, 1292 bool system_wide, struct list_head *metric_list, 1293 const struct pmu_metrics_table *table) 1294 { 1295 char *list_itr, *list_copy, *metric_name, *modifier; 1296 int ret, count = 0; 1297 1298 list_copy = strdup(list); 1299 if (!list_copy) 1300 return -ENOMEM; 1301 list_itr = list_copy; 1302 1303 while ((metric_name = strsep(&list_itr, ",")) != NULL) { 1304 modifier = strchr(metric_name, ':'); 1305 if (modifier) 1306 *modifier++ = '\0'; 1307 1308 ret = metricgroup__add_metric(pmu, metric_name, modifier, 1309 metric_no_group, metric_no_threshold, 1310 user_requested_cpu_list, 1311 system_wide, metric_list, table); 1312 if (ret == -EINVAL) 1313 pr_err("Cannot find metric or group `%s'\n", metric_name); 1314 1315 if (ret) 1316 break; 1317 1318 count++; 1319 } 1320 free(list_copy); 1321 1322 if (!ret) { 1323 /* 1324 * Warn about nmi_watchdog if any parsed metrics had the 1325 * NO_NMI_WATCHDOG constraint. 1326 */ 1327 metric__watchdog_constraint_hint(NULL, /*foot=*/true); 1328 /* No metrics. */ 1329 if (count == 0) 1330 return -EINVAL; 1331 } 1332 return ret; 1333 } 1334 1335 static void metricgroup__free_metrics(struct list_head *metric_list) 1336 { 1337 struct metric *m, *tmp; 1338 1339 list_for_each_entry_safe (m, tmp, metric_list, nd) { 1340 list_del_init(&m->nd); 1341 metric__free(m); 1342 } 1343 } 1344 1345 /** 1346 * find_tool_events - Search for the pressence of tool events in metric_list. 1347 * @metric_list: List to take metrics from. 1348 * @tool_events: Array of false values, indices corresponding to tool events set 1349 * to true if tool event is found. 1350 */ 1351 static void find_tool_events(const struct list_head *metric_list, 1352 bool tool_events[PERF_TOOL_MAX]) 1353 { 1354 struct metric *m; 1355 1356 list_for_each_entry(m, metric_list, nd) { 1357 int i; 1358 1359 perf_tool_event__for_each_event(i) { 1360 struct expr_id_data *data; 1361 1362 if (!tool_events[i] && 1363 !expr__get_id(m->pctx, perf_tool_event__to_str(i), &data)) 1364 tool_events[i] = true; 1365 } 1366 } 1367 } 1368 1369 /** 1370 * build_combined_expr_ctx - Make an expr_parse_ctx with all !group_events 1371 * metric IDs, as the IDs are held in a set, 1372 * duplicates will be removed. 1373 * @metric_list: List to take metrics from. 1374 * @combined: Out argument for result. 1375 */ 1376 static int build_combined_expr_ctx(const struct list_head *metric_list, 1377 struct expr_parse_ctx **combined) 1378 { 1379 struct hashmap_entry *cur; 1380 size_t bkt; 1381 struct metric *m; 1382 char *dup; 1383 int ret; 1384 1385 *combined = expr__ctx_new(); 1386 if (!*combined) 1387 return -ENOMEM; 1388 1389 list_for_each_entry(m, metric_list, nd) { 1390 if (!m->group_events && !m->modifier) { 1391 hashmap__for_each_entry(m->pctx->ids, cur, bkt) { 1392 dup = strdup(cur->pkey); 1393 if (!dup) { 1394 ret = -ENOMEM; 1395 goto err_out; 1396 } 1397 ret = expr__add_id(*combined, dup); 1398 if (ret) 1399 goto err_out; 1400 } 1401 } 1402 } 1403 return 0; 1404 err_out: 1405 expr__ctx_free(*combined); 1406 *combined = NULL; 1407 return ret; 1408 } 1409 1410 /** 1411 * parse_ids - Build the event string for the ids and parse them creating an 1412 * evlist. The encoded metric_ids are decoded. 1413 * @metric_no_merge: is metric sharing explicitly disabled. 1414 * @fake_pmu: used when testing metrics not supported by the current CPU. 1415 * @ids: the event identifiers parsed from a metric. 1416 * @modifier: any modifiers added to the events. 1417 * @group_events: should events be placed in a weak group. 1418 * @tool_events: entries set true if the tool event of index could be present in 1419 * the overall list of metrics. 1420 * @out_evlist: the created list of events. 1421 */ 1422 static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu, 1423 struct expr_parse_ctx *ids, const char *modifier, 1424 bool group_events, const bool tool_events[PERF_TOOL_MAX], 1425 struct evlist **out_evlist) 1426 { 1427 struct parse_events_error parse_error; 1428 struct evlist *parsed_evlist; 1429 struct strbuf events = STRBUF_INIT; 1430 int ret; 1431 1432 *out_evlist = NULL; 1433 if (!metric_no_merge || hashmap__size(ids->ids) == 0) { 1434 bool added_event = false; 1435 int i; 1436 /* 1437 * We may fail to share events between metrics because a tool 1438 * event isn't present in one metric. For example, a ratio of 1439 * cache misses doesn't need duration_time but the same events 1440 * may be used for a misses per second. Events without sharing 1441 * implies multiplexing, that is best avoided, so place 1442 * all tool events in every group. 1443 * 1444 * Also, there may be no ids/events in the expression parsing 1445 * context because of constant evaluation, e.g.: 1446 * event1 if #smt_on else 0 1447 * Add a tool event to avoid a parse error on an empty string. 1448 */ 1449 perf_tool_event__for_each_event(i) { 1450 if (tool_events[i]) { 1451 char *tmp = strdup(perf_tool_event__to_str(i)); 1452 1453 if (!tmp) 1454 return -ENOMEM; 1455 ids__insert(ids->ids, tmp); 1456 added_event = true; 1457 } 1458 } 1459 if (!added_event && hashmap__size(ids->ids) == 0) { 1460 char *tmp = strdup("duration_time"); 1461 1462 if (!tmp) 1463 return -ENOMEM; 1464 ids__insert(ids->ids, tmp); 1465 } 1466 } 1467 ret = metricgroup__build_event_string(&events, ids, modifier, 1468 group_events); 1469 if (ret) 1470 return ret; 1471 1472 parsed_evlist = evlist__new(); 1473 if (!parsed_evlist) { 1474 ret = -ENOMEM; 1475 goto err_out; 1476 } 1477 pr_debug("Parsing metric events '%s'\n", events.buf); 1478 parse_events_error__init(&parse_error); 1479 ret = __parse_events(parsed_evlist, events.buf, /*pmu_filter=*/NULL, 1480 &parse_error, fake_pmu, /*warn_if_reordered=*/false); 1481 if (ret) { 1482 parse_events_error__print(&parse_error, events.buf); 1483 goto err_out; 1484 } 1485 ret = decode_all_metric_ids(parsed_evlist, modifier); 1486 if (ret) 1487 goto err_out; 1488 1489 *out_evlist = parsed_evlist; 1490 parsed_evlist = NULL; 1491 err_out: 1492 parse_events_error__exit(&parse_error); 1493 evlist__delete(parsed_evlist); 1494 strbuf_release(&events); 1495 return ret; 1496 } 1497 1498 static int parse_groups(struct evlist *perf_evlist, 1499 const char *pmu, const char *str, 1500 bool metric_no_group, 1501 bool metric_no_merge, 1502 bool metric_no_threshold, 1503 const char *user_requested_cpu_list, 1504 bool system_wide, 1505 struct perf_pmu *fake_pmu, 1506 struct rblist *metric_events_list, 1507 const struct pmu_metrics_table *table) 1508 { 1509 struct evlist *combined_evlist = NULL; 1510 LIST_HEAD(metric_list); 1511 struct metric *m; 1512 bool tool_events[PERF_TOOL_MAX] = {false}; 1513 int ret; 1514 1515 if (metric_events_list->nr_entries == 0) 1516 metricgroup__rblist_init(metric_events_list); 1517 ret = metricgroup__add_metric_list(pmu, str, metric_no_group, metric_no_threshold, 1518 user_requested_cpu_list, 1519 system_wide, &metric_list, table); 1520 if (ret) 1521 goto out; 1522 1523 /* Sort metrics from largest to smallest. */ 1524 list_sort(NULL, &metric_list, metric_list_cmp); 1525 1526 if (!metric_no_merge) { 1527 struct expr_parse_ctx *combined = NULL; 1528 1529 find_tool_events(&metric_list, tool_events); 1530 1531 ret = build_combined_expr_ctx(&metric_list, &combined); 1532 1533 if (!ret && combined && hashmap__size(combined->ids)) { 1534 ret = parse_ids(metric_no_merge, fake_pmu, combined, 1535 /*modifier=*/NULL, 1536 /*group_events=*/false, 1537 tool_events, 1538 &combined_evlist); 1539 } 1540 if (combined) 1541 expr__ctx_free(combined); 1542 1543 if (ret) 1544 goto out; 1545 } 1546 1547 list_for_each_entry(m, &metric_list, nd) { 1548 struct metric_event *me; 1549 struct evsel **metric_events; 1550 struct evlist *metric_evlist = NULL; 1551 struct metric *n; 1552 struct metric_expr *expr; 1553 1554 if (combined_evlist && !m->group_events) { 1555 metric_evlist = combined_evlist; 1556 } else if (!metric_no_merge) { 1557 /* 1558 * See if the IDs for this metric are a subset of an 1559 * earlier metric. 1560 */ 1561 list_for_each_entry(n, &metric_list, nd) { 1562 if (m == n) 1563 break; 1564 1565 if (n->evlist == NULL) 1566 continue; 1567 1568 if ((!m->modifier && n->modifier) || 1569 (m->modifier && !n->modifier) || 1570 (m->modifier && n->modifier && 1571 strcmp(m->modifier, n->modifier))) 1572 continue; 1573 1574 if ((!m->pmu && n->pmu) || 1575 (m->pmu && !n->pmu) || 1576 (m->pmu && n->pmu && strcmp(m->pmu, n->pmu))) 1577 continue; 1578 1579 if (expr__subset_of_ids(n->pctx, m->pctx)) { 1580 pr_debug("Events in '%s' fully contained within '%s'\n", 1581 m->metric_name, n->metric_name); 1582 metric_evlist = n->evlist; 1583 break; 1584 } 1585 1586 } 1587 } 1588 if (!metric_evlist) { 1589 ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier, 1590 m->group_events, tool_events, &m->evlist); 1591 if (ret) 1592 goto out; 1593 1594 metric_evlist = m->evlist; 1595 } 1596 ret = setup_metric_events(fake_pmu ? "all" : m->pmu, m->pctx->ids, 1597 metric_evlist, &metric_events); 1598 if (ret) { 1599 pr_err("Cannot resolve IDs for %s: %s\n", 1600 m->metric_name, m->metric_expr); 1601 goto out; 1602 } 1603 1604 me = metricgroup__lookup(metric_events_list, metric_events[0], true); 1605 1606 expr = malloc(sizeof(struct metric_expr)); 1607 if (!expr) { 1608 ret = -ENOMEM; 1609 free(metric_events); 1610 goto out; 1611 } 1612 1613 expr->metric_refs = m->metric_refs; 1614 m->metric_refs = NULL; 1615 expr->metric_expr = m->metric_expr; 1616 if (m->modifier) { 1617 char *tmp; 1618 1619 if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0) 1620 expr->metric_name = NULL; 1621 else 1622 expr->metric_name = tmp; 1623 } else 1624 expr->metric_name = strdup(m->metric_name); 1625 1626 if (!expr->metric_name) { 1627 ret = -ENOMEM; 1628 free(metric_events); 1629 goto out; 1630 } 1631 expr->metric_threshold = m->metric_threshold; 1632 expr->metric_unit = m->metric_unit; 1633 expr->metric_events = metric_events; 1634 expr->runtime = m->pctx->sctx.runtime; 1635 list_add(&expr->nd, &me->head); 1636 } 1637 1638 1639 if (combined_evlist) { 1640 evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries); 1641 evlist__delete(combined_evlist); 1642 } 1643 1644 list_for_each_entry(m, &metric_list, nd) { 1645 if (m->evlist) 1646 evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries); 1647 } 1648 1649 out: 1650 metricgroup__free_metrics(&metric_list); 1651 return ret; 1652 } 1653 1654 int metricgroup__parse_groups(struct evlist *perf_evlist, 1655 const char *pmu, 1656 const char *str, 1657 bool metric_no_group, 1658 bool metric_no_merge, 1659 bool metric_no_threshold, 1660 const char *user_requested_cpu_list, 1661 bool system_wide, 1662 struct rblist *metric_events) 1663 { 1664 const struct pmu_metrics_table *table = pmu_metrics_table__find(); 1665 1666 if (!table) 1667 return -EINVAL; 1668 1669 return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge, 1670 metric_no_threshold, user_requested_cpu_list, system_wide, 1671 /*fake_pmu=*/NULL, metric_events, table); 1672 } 1673 1674 int metricgroup__parse_groups_test(struct evlist *evlist, 1675 const struct pmu_metrics_table *table, 1676 const char *str, 1677 struct rblist *metric_events) 1678 { 1679 return parse_groups(evlist, "all", str, 1680 /*metric_no_group=*/false, 1681 /*metric_no_merge=*/false, 1682 /*metric_no_threshold=*/false, 1683 /*user_requested_cpu_list=*/NULL, 1684 /*system_wide=*/false, 1685 &perf_pmu__fake, metric_events, table); 1686 } 1687 1688 struct metricgroup__has_metric_data { 1689 const char *pmu; 1690 const char *metric; 1691 }; 1692 static int metricgroup__has_metric_callback(const struct pmu_metric *pm, 1693 const struct pmu_metrics_table *table __maybe_unused, 1694 void *vdata) 1695 { 1696 struct metricgroup__has_metric_data *data = vdata; 1697 1698 return match_pm_metric(pm, data->pmu, data->metric) ? 1 : 0; 1699 } 1700 1701 bool metricgroup__has_metric(const char *pmu, const char *metric) 1702 { 1703 const struct pmu_metrics_table *table = pmu_metrics_table__find(); 1704 struct metricgroup__has_metric_data data = { 1705 .pmu = pmu, 1706 .metric = metric, 1707 }; 1708 1709 if (!table) 1710 return false; 1711 1712 return pmu_metrics_table_for_each_metric(table, metricgroup__has_metric_callback, &data) 1713 ? true : false; 1714 } 1715 1716 static int metricgroup__topdown_max_level_callback(const struct pmu_metric *pm, 1717 const struct pmu_metrics_table *table __maybe_unused, 1718 void *data) 1719 { 1720 unsigned int *max_level = data; 1721 unsigned int level; 1722 const char *p = strstr(pm->metric_group, "TopdownL"); 1723 1724 if (!p || p[8] == '\0') 1725 return 0; 1726 1727 level = p[8] - '0'; 1728 if (level > *max_level) 1729 *max_level = level; 1730 1731 return 0; 1732 } 1733 1734 unsigned int metricgroups__topdown_max_level(void) 1735 { 1736 unsigned int max_level = 0; 1737 const struct pmu_metrics_table *table = pmu_metrics_table__find(); 1738 1739 if (!table) 1740 return false; 1741 1742 pmu_metrics_table_for_each_metric(table, metricgroup__topdown_max_level_callback, 1743 &max_level); 1744 return max_level; 1745 } 1746 1747 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp, 1748 struct rblist *new_metric_events, 1749 struct rblist *old_metric_events) 1750 { 1751 unsigned int i; 1752 1753 for (i = 0; i < rblist__nr_entries(old_metric_events); i++) { 1754 struct rb_node *nd; 1755 struct metric_event *old_me, *new_me; 1756 struct metric_expr *old_expr, *new_expr; 1757 struct evsel *evsel; 1758 size_t alloc_size; 1759 int idx, nr; 1760 1761 nd = rblist__entry(old_metric_events, i); 1762 old_me = container_of(nd, struct metric_event, nd); 1763 1764 evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx); 1765 if (!evsel) 1766 return -EINVAL; 1767 new_me = metricgroup__lookup(new_metric_events, evsel, true); 1768 if (!new_me) 1769 return -ENOMEM; 1770 1771 pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n", 1772 cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx); 1773 1774 list_for_each_entry(old_expr, &old_me->head, nd) { 1775 new_expr = malloc(sizeof(*new_expr)); 1776 if (!new_expr) 1777 return -ENOMEM; 1778 1779 new_expr->metric_expr = old_expr->metric_expr; 1780 new_expr->metric_threshold = old_expr->metric_threshold; 1781 new_expr->metric_name = strdup(old_expr->metric_name); 1782 if (!new_expr->metric_name) 1783 return -ENOMEM; 1784 1785 new_expr->metric_unit = old_expr->metric_unit; 1786 new_expr->runtime = old_expr->runtime; 1787 1788 if (old_expr->metric_refs) { 1789 /* calculate number of metric_events */ 1790 for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++) 1791 continue; 1792 alloc_size = sizeof(*new_expr->metric_refs); 1793 new_expr->metric_refs = calloc(nr + 1, alloc_size); 1794 if (!new_expr->metric_refs) { 1795 free(new_expr); 1796 return -ENOMEM; 1797 } 1798 1799 memcpy(new_expr->metric_refs, old_expr->metric_refs, 1800 nr * alloc_size); 1801 } else { 1802 new_expr->metric_refs = NULL; 1803 } 1804 1805 /* calculate number of metric_events */ 1806 for (nr = 0; old_expr->metric_events[nr]; nr++) 1807 continue; 1808 alloc_size = sizeof(*new_expr->metric_events); 1809 new_expr->metric_events = calloc(nr + 1, alloc_size); 1810 if (!new_expr->metric_events) { 1811 zfree(&new_expr->metric_refs); 1812 free(new_expr); 1813 return -ENOMEM; 1814 } 1815 1816 /* copy evsel in the same position */ 1817 for (idx = 0; idx < nr; idx++) { 1818 evsel = old_expr->metric_events[idx]; 1819 evsel = evlist__find_evsel(evlist, evsel->core.idx); 1820 if (evsel == NULL) { 1821 zfree(&new_expr->metric_events); 1822 zfree(&new_expr->metric_refs); 1823 free(new_expr); 1824 return -EINVAL; 1825 } 1826 new_expr->metric_events[idx] = evsel; 1827 } 1828 1829 list_add(&new_expr->nd, &new_me->head); 1830 } 1831 } 1832 return 0; 1833 } 1834