1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2017, Intel Corporation.
4 */
5
6 /* Manage metrics and groups of metrics from JSON files */
7
8 #include "metricgroup.h"
9 #include "debug.h"
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "strbuf.h"
13 #include "pmu.h"
14 #include "pmus.h"
15 #include "print-events.h"
16 #include "smt.h"
17 #include "tool_pmu.h"
18 #include "expr.h"
19 #include "rblist.h"
20 #include <string.h>
21 #include <errno.h>
22 #include "strlist.h"
23 #include <assert.h>
24 #include <linux/ctype.h>
25 #include <linux/list_sort.h>
26 #include <linux/string.h>
27 #include <linux/zalloc.h>
28 #include <perf/cpumap.h>
29 #include <subcmd/parse-options.h>
30 #include <api/fs/fs.h>
31 #include "util.h"
32 #include <asm/bug.h>
33 #include "cgroup.h"
34 #include "util/hashmap.h"
35
metricgroup__lookup(struct rblist * metric_events,struct evsel * evsel,bool create)36 struct metric_event *metricgroup__lookup(struct rblist *metric_events,
37 struct evsel *evsel,
38 bool create)
39 {
40 struct rb_node *nd;
41 struct metric_event me = {
42 .evsel = evsel
43 };
44
45 if (!metric_events)
46 return NULL;
47
48 if (evsel && evsel->metric_leader)
49 me.evsel = evsel->metric_leader;
50 nd = rblist__find(metric_events, &me);
51 if (nd)
52 return container_of(nd, struct metric_event, nd);
53 if (create) {
54 rblist__add_node(metric_events, &me);
55 nd = rblist__find(metric_events, &me);
56 if (nd)
57 return container_of(nd, struct metric_event, nd);
58 }
59 return NULL;
60 }
61
metric_event_cmp(struct rb_node * rb_node,const void * entry)62 static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
63 {
64 struct metric_event *a = container_of(rb_node,
65 struct metric_event,
66 nd);
67 const struct metric_event *b = entry;
68
69 if (a->evsel == b->evsel)
70 return 0;
71 if ((char *)a->evsel < (char *)b->evsel)
72 return -1;
73 return +1;
74 }
75
metric_event_new(struct rblist * rblist __maybe_unused,const void * entry)76 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
77 const void *entry)
78 {
79 struct metric_event *me = malloc(sizeof(struct metric_event));
80
81 if (!me)
82 return NULL;
83 memcpy(me, entry, sizeof(struct metric_event));
84 me->evsel = ((struct metric_event *)entry)->evsel;
85 me->is_default = false;
86 INIT_LIST_HEAD(&me->head);
87 return &me->nd;
88 }
89
metric_event_delete(struct rblist * rblist __maybe_unused,struct rb_node * rb_node)90 static void metric_event_delete(struct rblist *rblist __maybe_unused,
91 struct rb_node *rb_node)
92 {
93 struct metric_event *me = container_of(rb_node, struct metric_event, nd);
94 struct metric_expr *expr, *tmp;
95
96 list_for_each_entry_safe(expr, tmp, &me->head, nd) {
97 zfree(&expr->metric_name);
98 zfree(&expr->metric_refs);
99 zfree(&expr->metric_events);
100 free(expr);
101 }
102
103 free(me);
104 }
105
metricgroup__rblist_init(struct rblist * metric_events)106 void metricgroup__rblist_init(struct rblist *metric_events)
107 {
108 rblist__init(metric_events);
109 metric_events->node_cmp = metric_event_cmp;
110 metric_events->node_new = metric_event_new;
111 metric_events->node_delete = metric_event_delete;
112 }
113
metricgroup__rblist_exit(struct rblist * metric_events)114 void metricgroup__rblist_exit(struct rblist *metric_events)
115 {
116 rblist__exit(metric_events);
117 }
118
119 /**
120 * The metric under construction. The data held here will be placed in a
121 * metric_expr.
122 */
123 struct metric {
124 struct list_head nd;
125 /**
126 * The expression parse context importantly holding the IDs contained
127 * within the expression.
128 */
129 struct expr_parse_ctx *pctx;
130 const char *pmu;
131 /** The name of the metric such as "IPC". */
132 const char *metric_name;
133 /** Modifier on the metric such as "u" or NULL for none. */
134 const char *modifier;
135 /** The expression to parse, for example, "instructions/cycles". */
136 const char *metric_expr;
137 /** Optional threshold expression where zero value is green, otherwise red. */
138 const char *metric_threshold;
139 /**
140 * The "ScaleUnit" that scales and adds a unit to the metric during
141 * output.
142 */
143 const char *metric_unit;
144 /**
145 * Optional name of the metric group reported
146 * if the Default metric group is being processed.
147 */
148 const char *default_metricgroup_name;
149 /** Optional null terminated array of referenced metrics. */
150 struct metric_ref *metric_refs;
151 /**
152 * Should events of the metric be grouped?
153 */
154 bool group_events;
155 /** Show events even if in the Default metric group. */
156 bool default_show_events;
157 /**
158 * Parsed events for the metric. Optional as events may be taken from a
159 * different metric whose group contains all the IDs necessary for this
160 * one.
161 */
162 struct evlist *evlist;
163 };
164
metric__watchdog_constraint_hint(const char * name,bool foot)165 static void metric__watchdog_constraint_hint(const char *name, bool foot)
166 {
167 static bool violate_nmi_constraint;
168
169 if (!foot) {
170 pr_warning("Not grouping metric %s's events.\n", name);
171 violate_nmi_constraint = true;
172 return;
173 }
174
175 if (!violate_nmi_constraint)
176 return;
177
178 pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
179 " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
180 " perf stat ...\n"
181 " echo 1 > /proc/sys/kernel/nmi_watchdog\n");
182 }
183
metric__group_events(const struct pmu_metric * pm,bool metric_no_threshold)184 static bool metric__group_events(const struct pmu_metric *pm, bool metric_no_threshold)
185 {
186 switch (pm->event_grouping) {
187 case MetricNoGroupEvents:
188 return false;
189 case MetricNoGroupEventsNmi:
190 if (!sysctl__nmi_watchdog_enabled())
191 return true;
192 metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false);
193 return false;
194 case MetricNoGroupEventsSmt:
195 return !smt_on();
196 case MetricNoGroupEventsThresholdAndNmi:
197 if (metric_no_threshold)
198 return true;
199 if (!sysctl__nmi_watchdog_enabled())
200 return true;
201 metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false);
202 return false;
203 case MetricGroupEvents:
204 default:
205 return true;
206 }
207 }
208
metric__free(struct metric * m)209 static void metric__free(struct metric *m)
210 {
211 if (!m)
212 return;
213
214 zfree(&m->metric_refs);
215 expr__ctx_free(m->pctx);
216 zfree(&m->modifier);
217 evlist__delete(m->evlist);
218 free(m);
219 }
220
metric__new(const struct pmu_metric * pm,const char * modifier,bool metric_no_group,bool metric_no_threshold,int runtime,const char * user_requested_cpu_list,bool system_wide)221 static struct metric *metric__new(const struct pmu_metric *pm,
222 const char *modifier,
223 bool metric_no_group,
224 bool metric_no_threshold,
225 int runtime,
226 const char *user_requested_cpu_list,
227 bool system_wide)
228 {
229 struct metric *m;
230
231 m = zalloc(sizeof(*m));
232 if (!m)
233 return NULL;
234
235 m->pctx = expr__ctx_new();
236 if (!m->pctx)
237 goto out_err;
238
239 m->pmu = pm->pmu ?: "cpu";
240 m->metric_name = pm->metric_name;
241 m->default_metricgroup_name = pm->default_metricgroup_name ?: "";
242 m->modifier = NULL;
243 if (modifier) {
244 m->modifier = strdup(modifier);
245 if (!m->modifier)
246 goto out_err;
247 }
248 m->metric_expr = pm->metric_expr;
249 m->metric_threshold = pm->metric_threshold;
250 m->metric_unit = pm->unit;
251 m->pctx->sctx.user_requested_cpu_list = NULL;
252 if (user_requested_cpu_list) {
253 m->pctx->sctx.user_requested_cpu_list = strdup(user_requested_cpu_list);
254 if (!m->pctx->sctx.user_requested_cpu_list)
255 goto out_err;
256 }
257 m->pctx->sctx.runtime = runtime;
258 m->pctx->sctx.system_wide = system_wide;
259 m->group_events = !metric_no_group && metric__group_events(pm, metric_no_threshold);
260 m->default_show_events = pm->default_show_events;
261 m->metric_refs = NULL;
262 m->evlist = NULL;
263
264 return m;
265 out_err:
266 metric__free(m);
267 return NULL;
268 }
269
contains_metric_id(struct evsel ** metric_events,int num_events,const char * metric_id)270 static bool contains_metric_id(struct evsel **metric_events, int num_events,
271 const char *metric_id)
272 {
273 int i;
274
275 for (i = 0; i < num_events; i++) {
276 if (!strcmp(evsel__metric_id(metric_events[i]), metric_id))
277 return true;
278 }
279 return false;
280 }
281
282 /**
283 * setup_metric_events - Find a group of events in metric_evlist that correspond
284 * to the IDs from a parsed metric expression.
285 * @pmu: The PMU for the IDs.
286 * @ids: the metric IDs to match.
287 * @metric_evlist: the list of perf events.
288 * @out_metric_events: holds the created metric events array.
289 */
setup_metric_events(const char * pmu,struct hashmap * ids,struct evlist * metric_evlist,struct evsel *** out_metric_events)290 static int setup_metric_events(const char *pmu, struct hashmap *ids,
291 struct evlist *metric_evlist,
292 struct evsel ***out_metric_events)
293 {
294 struct evsel **metric_events;
295 const char *metric_id;
296 struct evsel *ev;
297 size_t ids_size, matched_events, i;
298 bool all_pmus = !strcmp(pmu, "all") || perf_pmus__num_core_pmus() == 1 || !is_pmu_core(pmu);
299
300 *out_metric_events = NULL;
301 ids_size = hashmap__size(ids);
302
303 metric_events = calloc(ids_size + 1, sizeof(void *));
304 if (!metric_events)
305 return -ENOMEM;
306
307 matched_events = 0;
308 evlist__for_each_entry(metric_evlist, ev) {
309 struct expr_id_data *val_ptr;
310
311 /* Don't match events for the wrong hybrid PMU. */
312 if (!all_pmus && ev->pmu && evsel__is_hybrid(ev) &&
313 strcmp(ev->pmu->name, pmu))
314 continue;
315 /*
316 * Check for duplicate events with the same name. For
317 * example, uncore_imc/cas_count_read/ will turn into 6
318 * events per socket on skylakex. Only the first such
319 * event is placed in metric_events.
320 */
321 metric_id = evsel__metric_id(ev);
322 if (contains_metric_id(metric_events, matched_events, metric_id))
323 continue;
324 /*
325 * Does this event belong to the parse context? For
326 * combined or shared groups, this metric may not care
327 * about this event.
328 */
329 if (hashmap__find(ids, metric_id, &val_ptr)) {
330 pr_debug("Matched metric-id %s to %s\n", metric_id, evsel__name(ev));
331 metric_events[matched_events++] = ev;
332
333 if (matched_events >= ids_size)
334 break;
335 }
336 }
337 if (matched_events < ids_size) {
338 free(metric_events);
339 return -EINVAL;
340 }
341 for (i = 0; i < ids_size; i++) {
342 ev = metric_events[i];
343 ev->collect_stat = true;
344
345 /*
346 * The metric leader points to the identically named
347 * event in metric_events.
348 */
349 ev->metric_leader = ev;
350 /*
351 * Mark two events with identical names in the same
352 * group (or globally) as being in use as uncore events
353 * may be duplicated for each pmu. Set the metric leader
354 * of such events to be the event that appears in
355 * metric_events.
356 */
357 metric_id = evsel__metric_id(ev);
358 evlist__for_each_entry_continue(metric_evlist, ev) {
359 if (!strcmp(evsel__metric_id(ev), metric_id))
360 ev->metric_leader = metric_events[i];
361 }
362 }
363 *out_metric_events = metric_events;
364 return 0;
365 }
366
match_metric_or_groups(const char * metric_or_groups,const char * sought)367 static bool match_metric_or_groups(const char *metric_or_groups, const char *sought)
368 {
369 int len;
370 const char *m;
371
372 if (!sought)
373 return false;
374 if (!strcmp(sought, "all"))
375 return true;
376 if (!metric_or_groups)
377 return !strcasecmp(sought, "No_group");
378 len = strlen(sought);
379 if (!strncasecmp(metric_or_groups, sought, len) &&
380 (metric_or_groups[len] == 0 || metric_or_groups[len] == ';'))
381 return true;
382 m = strchr(metric_or_groups, ';');
383 return m && match_metric_or_groups(m + 1, sought);
384 }
385
match_pm_metric_or_groups(const struct pmu_metric * pm,const char * pmu,const char * metric_or_groups)386 static bool match_pm_metric_or_groups(const struct pmu_metric *pm, const char *pmu,
387 const char *metric_or_groups)
388 {
389 const char *pm_pmu = pm->pmu ?: "cpu";
390
391 if (strcmp(pmu, "all") && strcmp(pm_pmu, pmu))
392 return false;
393
394 return match_metric_or_groups(pm->metric_group, metric_or_groups) ||
395 match_metric_or_groups(pm->metric_name, metric_or_groups);
396 }
397
398 struct metricgroup_iter_data {
399 pmu_metric_iter_fn fn;
400 void *data;
401 };
402
metricgroup__sys_event_iter(const struct pmu_metric * pm,const struct pmu_metrics_table * table,void * data)403 static int metricgroup__sys_event_iter(const struct pmu_metric *pm,
404 const struct pmu_metrics_table *table,
405 void *data)
406 {
407 struct metricgroup_iter_data *d = data;
408 struct perf_pmu *pmu = NULL;
409
410 if (!pm->metric_expr || !pm->compat)
411 return 0;
412
413 while ((pmu = perf_pmus__scan(pmu))) {
414
415 if (!pmu->id || !pmu_uncore_identifier_match(pm->compat, pmu->id))
416 continue;
417
418 return d->fn(pm, table, d->data);
419 }
420 return 0;
421 }
422
metricgroup__for_each_metric(const struct pmu_metrics_table * table,pmu_metric_iter_fn fn,void * data)423 int metricgroup__for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
424 void *data)
425 {
426 struct metricgroup_iter_data sys_data = {
427 .fn = fn,
428 .data = data,
429 };
430 const struct pmu_metrics_table *tables[2] = {
431 table,
432 pmu_metrics_table__default(),
433 };
434
435 for (size_t i = 0; i < ARRAY_SIZE(tables); i++) {
436 int ret;
437
438 if (!tables[i])
439 continue;
440
441 ret = pmu_metrics_table__for_each_metric(tables[i], fn, data);
442 if (ret)
443 return ret;
444 }
445
446 return pmu_for_each_sys_metric(metricgroup__sys_event_iter, &sys_data);
447 }
448
449 static const char *code_characters = ",-=@";
450
encode_metric_id(struct strbuf * sb,const char * x)451 static int encode_metric_id(struct strbuf *sb, const char *x)
452 {
453 int ret = 0;
454
455 for (; *x; x++) {
456 const char *c = strchr(code_characters, *x);
457 if (c) {
458 ret = strbuf_addch(sb, '!');
459 if (ret)
460 break;
461
462 ret = strbuf_addch(sb, '0' + (c - code_characters));
463 if (ret)
464 break;
465 } else {
466 ret = strbuf_addch(sb, *x);
467 if (ret)
468 break;
469 }
470 }
471 return ret;
472 }
473
decode_metric_id(struct strbuf * sb,const char * x)474 static int decode_metric_id(struct strbuf *sb, const char *x)
475 {
476 const char *orig = x;
477 size_t i;
478 char c;
479 int ret;
480
481 for (; *x; x++) {
482 c = *x;
483 if (*x == '!') {
484 x++;
485 i = *x - '0';
486 if (i > strlen(code_characters)) {
487 pr_err("Bad metric-id encoding in: '%s'", orig);
488 return -1;
489 }
490 c = code_characters[i];
491 }
492 ret = strbuf_addch(sb, c);
493 if (ret)
494 return ret;
495 }
496 return 0;
497 }
498
decode_all_metric_ids(struct evlist * perf_evlist,const char * modifier)499 static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier)
500 {
501 struct evsel *ev;
502 struct strbuf sb = STRBUF_INIT;
503 char *cur;
504 int ret = 0;
505
506 evlist__for_each_entry(perf_evlist, ev) {
507 if (!ev->metric_id)
508 continue;
509
510 ret = strbuf_setlen(&sb, 0);
511 if (ret)
512 break;
513
514 ret = decode_metric_id(&sb, ev->metric_id);
515 if (ret)
516 break;
517
518 free((char *)ev->metric_id);
519 ev->metric_id = strdup(sb.buf);
520 if (!ev->metric_id) {
521 ret = -ENOMEM;
522 break;
523 }
524 /*
525 * If the name is just the parsed event, use the metric-id to
526 * give a more friendly display version.
527 */
528 if (strstr(ev->name, "metric-id=")) {
529 bool has_slash = false;
530
531 zfree(&ev->name);
532 for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) {
533 *cur = '/';
534 has_slash = true;
535 }
536
537 if (modifier) {
538 if (!has_slash && !strchr(sb.buf, ':')) {
539 ret = strbuf_addch(&sb, ':');
540 if (ret)
541 break;
542 }
543 ret = strbuf_addstr(&sb, modifier);
544 if (ret)
545 break;
546 }
547 ev->name = strdup(sb.buf);
548 if (!ev->name) {
549 ret = -ENOMEM;
550 break;
551 }
552 }
553 }
554 strbuf_release(&sb);
555 return ret;
556 }
557
metricgroup__build_event_string(struct strbuf * events,const struct expr_parse_ctx * ctx,const char * modifier,bool group_events)558 static int metricgroup__build_event_string(struct strbuf *events,
559 const struct expr_parse_ctx *ctx,
560 const char *modifier,
561 bool group_events)
562 {
563 struct hashmap_entry *cur;
564 size_t bkt;
565 bool no_group = true, has_tool_events = false;
566 bool tool_events[TOOL_PMU__EVENT_MAX] = {false};
567 int ret = 0;
568
569 #define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
570
571 hashmap__for_each_entry(ctx->ids, cur, bkt) {
572 const char *sep, *rsep, *id = cur->pkey;
573 enum tool_pmu_event ev;
574
575 pr_debug("found event %s\n", id);
576
577 /* Always move tool events outside of the group. */
578 ev = tool_pmu__str_to_event(id);
579 if (ev != TOOL_PMU__EVENT_NONE) {
580 has_tool_events = true;
581 tool_events[ev] = true;
582 continue;
583 }
584 /* Separate events with commas and open the group if necessary. */
585 if (no_group) {
586 if (group_events) {
587 ret = strbuf_addch(events, '{');
588 RETURN_IF_NON_ZERO(ret);
589 }
590
591 no_group = false;
592 } else {
593 ret = strbuf_addch(events, ',');
594 RETURN_IF_NON_ZERO(ret);
595 }
596 /*
597 * Encode the ID as an event string. Add a qualifier for
598 * metric_id that is the original name except with characters
599 * that parse-events can't parse replaced. For example,
600 * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/
601 */
602 sep = strchr(id, '@');
603 if (sep != NULL) {
604 ret = strbuf_add(events, id, sep - id);
605 RETURN_IF_NON_ZERO(ret);
606 ret = strbuf_addch(events, '/');
607 RETURN_IF_NON_ZERO(ret);
608 rsep = strrchr(sep, '@');
609 ret = strbuf_add(events, sep + 1, rsep - sep - 1);
610 RETURN_IF_NON_ZERO(ret);
611 ret = strbuf_addstr(events, ",metric-id=");
612 RETURN_IF_NON_ZERO(ret);
613 sep = rsep;
614 } else {
615 sep = strchr(id, ':');
616 if (sep != NULL) {
617 ret = strbuf_add(events, id, sep - id);
618 RETURN_IF_NON_ZERO(ret);
619 } else {
620 ret = strbuf_addstr(events, id);
621 RETURN_IF_NON_ZERO(ret);
622 }
623 ret = strbuf_addstr(events, "/metric-id=");
624 RETURN_IF_NON_ZERO(ret);
625 }
626 ret = encode_metric_id(events, id);
627 RETURN_IF_NON_ZERO(ret);
628 ret = strbuf_addstr(events, "/");
629 RETURN_IF_NON_ZERO(ret);
630
631 if (sep != NULL) {
632 ret = strbuf_addstr(events, sep + 1);
633 RETURN_IF_NON_ZERO(ret);
634 }
635 if (modifier) {
636 ret = strbuf_addstr(events, modifier);
637 RETURN_IF_NON_ZERO(ret);
638 }
639 }
640 if (!no_group && group_events) {
641 ret = strbuf_addf(events, "}:W");
642 RETURN_IF_NON_ZERO(ret);
643 }
644 if (has_tool_events) {
645 int i;
646
647 tool_pmu__for_each_event(i) {
648 if (tool_events[i]) {
649 if (!no_group) {
650 ret = strbuf_addch(events, ',');
651 RETURN_IF_NON_ZERO(ret);
652 }
653 no_group = false;
654 ret = strbuf_addstr(events, tool_pmu__event_to_str(i));
655 RETURN_IF_NON_ZERO(ret);
656 }
657 }
658 }
659
660 return ret;
661 #undef RETURN_IF_NON_ZERO
662 }
663
arch_get_runtimeparam(const struct pmu_metric * pm __maybe_unused)664 int __weak arch_get_runtimeparam(const struct pmu_metric *pm __maybe_unused)
665 {
666 return 1;
667 }
668
669 /*
670 * A singly linked list on the stack of the names of metrics being
671 * processed. Used to identify recursion.
672 */
673 struct visited_metric {
674 const char *name;
675 const struct visited_metric *parent;
676 };
677
678 struct metricgroup_add_iter_data {
679 struct list_head *metric_list;
680 const char *pmu;
681 const char *metric_name;
682 const char *modifier;
683 int *ret;
684 bool *has_match;
685 bool metric_no_group;
686 bool metric_no_threshold;
687 const char *user_requested_cpu_list;
688 bool system_wide;
689 struct metric *root_metric;
690 const struct visited_metric *visited;
691 const struct pmu_metrics_table *table;
692 };
693
694 static int add_metric(struct list_head *metric_list,
695 const struct pmu_metric *pm,
696 const char *modifier,
697 bool metric_no_group,
698 bool metric_no_threshold,
699 const char *user_requested_cpu_list,
700 bool system_wide,
701 struct metric *root_metric,
702 const struct visited_metric *visited,
703 const struct pmu_metrics_table *table);
704
metricgroup__find_metric_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table __maybe_unused,void * vdata)705 static int metricgroup__find_metric_callback(const struct pmu_metric *pm,
706 const struct pmu_metrics_table *table __maybe_unused,
707 void *vdata)
708 {
709 struct pmu_metric *copied_pm = vdata;
710
711 memcpy(copied_pm, pm, sizeof(*pm));
712 return 0;
713 }
714
715 /**
716 * resolve_metric - Locate metrics within the root metric and recursively add
717 * references to them.
718 * @metric_list: The list the metric is added to.
719 * @pmu: The PMU name to resolve metrics on, or "all" for all PMUs.
720 * @modifier: if non-null event modifiers like "u".
721 * @metric_no_group: Should events written to events be grouped "{}" or
722 * global. Grouping is the default but due to multiplexing the
723 * user may override.
724 * @user_requested_cpu_list: Command line specified CPUs to record on.
725 * @system_wide: Are events for all processes recorded.
726 * @root_metric: Metrics may reference other metrics to form a tree. In this
727 * case the root_metric holds all the IDs and a list of referenced
728 * metrics. When adding a root this argument is NULL.
729 * @visited: A singly linked list of metric names being added that is used to
730 * detect recursion.
731 * @table: The table that is searched for metrics, most commonly the table for the
732 * architecture perf is running upon.
733 */
resolve_metric(struct list_head * metric_list,struct perf_pmu * pmu,const char * modifier,bool metric_no_group,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct metric * root_metric,const struct visited_metric * visited,const struct pmu_metrics_table * table)734 static int resolve_metric(struct list_head *metric_list,
735 struct perf_pmu *pmu,
736 const char *modifier,
737 bool metric_no_group,
738 bool metric_no_threshold,
739 const char *user_requested_cpu_list,
740 bool system_wide,
741 struct metric *root_metric,
742 const struct visited_metric *visited,
743 const struct pmu_metrics_table *table)
744 {
745 struct hashmap_entry *cur;
746 size_t bkt;
747 struct to_resolve {
748 /* The metric to resolve. */
749 struct pmu_metric pm;
750 /*
751 * The key in the IDs map, this may differ from in case,
752 * etc. from pm->metric_name.
753 */
754 const char *key;
755 } *pending = NULL;
756 int i, ret = 0, pending_cnt = 0;
757
758 /*
759 * Iterate all the parsed IDs and if there's a matching metric and it to
760 * the pending array.
761 */
762 hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
763 struct pmu_metric pm;
764
765 if (pmu_metrics_table__find_metric(table, pmu, cur->pkey,
766 metricgroup__find_metric_callback,
767 &pm) != PMU_METRICS__NOT_FOUND) {
768 pending = realloc(pending,
769 (pending_cnt + 1) * sizeof(struct to_resolve));
770 if (!pending)
771 return -ENOMEM;
772
773 memcpy(&pending[pending_cnt].pm, &pm, sizeof(pm));
774 pending[pending_cnt].key = cur->pkey;
775 pending_cnt++;
776 }
777 }
778
779 /* Remove the metric IDs from the context. */
780 for (i = 0; i < pending_cnt; i++)
781 expr__del_id(root_metric->pctx, pending[i].key);
782
783 /*
784 * Recursively add all the metrics, IDs are added to the root metric's
785 * context.
786 */
787 for (i = 0; i < pending_cnt; i++) {
788 ret = add_metric(metric_list, &pending[i].pm, modifier, metric_no_group,
789 metric_no_threshold, user_requested_cpu_list, system_wide,
790 root_metric, visited, table);
791 if (ret)
792 break;
793 }
794
795 free(pending);
796 return ret;
797 }
798
799 /**
800 * __add_metric - Add a metric to metric_list.
801 * @metric_list: The list the metric is added to.
802 * @pm: The pmu_metric containing the metric to be added.
803 * @modifier: if non-null event modifiers like "u".
804 * @metric_no_group: Should events written to events be grouped "{}" or
805 * global. Grouping is the default but due to multiplexing the
806 * user may override.
807 * @metric_no_threshold: Should threshold expressions be ignored?
808 * @runtime: A special argument for the parser only known at runtime.
809 * @user_requested_cpu_list: Command line specified CPUs to record on.
810 * @system_wide: Are events for all processes recorded.
811 * @root_metric: Metrics may reference other metrics to form a tree. In this
812 * case the root_metric holds all the IDs and a list of referenced
813 * metrics. When adding a root this argument is NULL.
814 * @visited: A singly linked list of metric names being added that is used to
815 * detect recursion.
816 * @table: The table that is searched for metrics, most commonly the table for the
817 * architecture perf is running upon.
818 */
__add_metric(struct list_head * metric_list,const struct pmu_metric * pm,const char * modifier,bool metric_no_group,bool metric_no_threshold,int runtime,const char * user_requested_cpu_list,bool system_wide,struct metric * root_metric,const struct visited_metric * visited,const struct pmu_metrics_table * table)819 static int __add_metric(struct list_head *metric_list,
820 const struct pmu_metric *pm,
821 const char *modifier,
822 bool metric_no_group,
823 bool metric_no_threshold,
824 int runtime,
825 const char *user_requested_cpu_list,
826 bool system_wide,
827 struct metric *root_metric,
828 const struct visited_metric *visited,
829 const struct pmu_metrics_table *table)
830 {
831 const struct visited_metric *vm;
832 int ret;
833 bool is_root = !root_metric;
834 const char *expr;
835 struct visited_metric visited_node = {
836 .name = pm->metric_name,
837 .parent = visited,
838 };
839
840 for (vm = visited; vm; vm = vm->parent) {
841 if (!strcmp(pm->metric_name, vm->name)) {
842 pr_err("failed: recursion detected for %s\n", pm->metric_name);
843 return -1;
844 }
845 }
846
847 if (is_root) {
848 /*
849 * This metric is the root of a tree and may reference other
850 * metrics that are added recursively.
851 */
852 root_metric = metric__new(pm, modifier, metric_no_group, metric_no_threshold,
853 runtime, user_requested_cpu_list, system_wide);
854 if (!root_metric)
855 return -ENOMEM;
856
857 } else {
858 int cnt = 0;
859
860 /*
861 * This metric was referenced in a metric higher in the
862 * tree. Check if the same metric is already resolved in the
863 * metric_refs list.
864 */
865 if (root_metric->metric_refs) {
866 for (; root_metric->metric_refs[cnt].metric_name; cnt++) {
867 if (!strcmp(pm->metric_name,
868 root_metric->metric_refs[cnt].metric_name))
869 return 0;
870 }
871 }
872
873 /* Create reference. Need space for the entry and the terminator. */
874 root_metric->metric_refs = realloc(root_metric->metric_refs,
875 (cnt + 2) * sizeof(struct metric_ref));
876 if (!root_metric->metric_refs)
877 return -ENOMEM;
878
879 /*
880 * Intentionally passing just const char pointers,
881 * from 'pe' object, so they never go away. We don't
882 * need to change them, so there's no need to create
883 * our own copy.
884 */
885 root_metric->metric_refs[cnt].metric_name = pm->metric_name;
886 root_metric->metric_refs[cnt].metric_expr = pm->metric_expr;
887
888 /* Null terminate array. */
889 root_metric->metric_refs[cnt+1].metric_name = NULL;
890 root_metric->metric_refs[cnt+1].metric_expr = NULL;
891 }
892
893 /*
894 * For both the parent and referenced metrics, we parse
895 * all the metric's IDs and add it to the root context.
896 */
897 ret = 0;
898 expr = pm->metric_expr;
899 if (is_root && pm->metric_threshold) {
900 /*
901 * Threshold expressions are built off the actual metric. Switch
902 * to use that in case of additional necessary events. Change
903 * the visited node name to avoid this being flagged as
904 * recursion. If the threshold events are disabled, just use the
905 * metric's name as a reference. This allows metric threshold
906 * computation if there are sufficient events.
907 */
908 assert(strstr(pm->metric_threshold, pm->metric_name));
909 expr = metric_no_threshold ? pm->metric_name : pm->metric_threshold;
910 visited_node.name = "__threshold__";
911 }
912 if (expr__find_ids(expr, NULL, root_metric->pctx) < 0) {
913 /* Broken metric. */
914 ret = -EINVAL;
915 }
916 if (!ret) {
917 /* Resolve referenced metrics. */
918 struct perf_pmu *pmu;
919
920 if (pm->pmu && pm->pmu[0] != '\0')
921 pmu = perf_pmus__find(pm->pmu);
922 else
923 pmu = perf_pmus__scan_core(/*pmu=*/ NULL);
924
925 ret = resolve_metric(metric_list, pmu, modifier, metric_no_group,
926 metric_no_threshold, user_requested_cpu_list,
927 system_wide, root_metric, &visited_node,
928 table);
929 }
930 if (ret) {
931 if (is_root)
932 metric__free(root_metric);
933
934 } else if (is_root)
935 list_add(&root_metric->nd, metric_list);
936
937 return ret;
938 }
939
add_metric(struct list_head * metric_list,const struct pmu_metric * pm,const char * modifier,bool metric_no_group,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct metric * root_metric,const struct visited_metric * visited,const struct pmu_metrics_table * table)940 static int add_metric(struct list_head *metric_list,
941 const struct pmu_metric *pm,
942 const char *modifier,
943 bool metric_no_group,
944 bool metric_no_threshold,
945 const char *user_requested_cpu_list,
946 bool system_wide,
947 struct metric *root_metric,
948 const struct visited_metric *visited,
949 const struct pmu_metrics_table *table)
950 {
951 int ret = 0;
952
953 pr_debug("metric expr %s for %s\n", pm->metric_expr, pm->metric_name);
954
955 if (!strstr(pm->metric_expr, "?")) {
956 ret = __add_metric(metric_list, pm, modifier, metric_no_group,
957 metric_no_threshold, 0, user_requested_cpu_list,
958 system_wide, root_metric, visited, table);
959 } else {
960 int j, count;
961
962 count = arch_get_runtimeparam(pm);
963
964 /* This loop is added to create multiple
965 * events depend on count value and add
966 * those events to metric_list.
967 */
968
969 for (j = 0; j < count && !ret; j++)
970 ret = __add_metric(metric_list, pm, modifier, metric_no_group,
971 metric_no_threshold, j, user_requested_cpu_list,
972 system_wide, root_metric, visited, table);
973 }
974
975 return ret;
976 }
977
978 /**
979 * metric_list_cmp - list_sort comparator that sorts metrics with more events to
980 * the front. tool events are excluded from the count.
981 */
metric_list_cmp(void * priv __maybe_unused,const struct list_head * l,const struct list_head * r)982 static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
983 const struct list_head *r)
984 {
985 const struct metric *left = container_of(l, struct metric, nd);
986 const struct metric *right = container_of(r, struct metric, nd);
987 struct expr_id_data *data;
988 int i, left_count, right_count;
989
990 left_count = hashmap__size(left->pctx->ids);
991 tool_pmu__for_each_event(i) {
992 if (!expr__get_id(left->pctx, tool_pmu__event_to_str(i), &data))
993 left_count--;
994 }
995
996 right_count = hashmap__size(right->pctx->ids);
997 tool_pmu__for_each_event(i) {
998 if (!expr__get_id(right->pctx, tool_pmu__event_to_str(i), &data))
999 right_count--;
1000 }
1001
1002 return right_count - left_count;
1003 }
1004
1005 /**
1006 * default_metricgroup_cmp - Implements complex key for the Default metricgroup
1007 * that first sorts by default_metricgroup_name, then
1008 * metric_name.
1009 */
default_metricgroup_cmp(void * priv __maybe_unused,const struct list_head * l,const struct list_head * r)1010 static int default_metricgroup_cmp(void *priv __maybe_unused,
1011 const struct list_head *l,
1012 const struct list_head *r)
1013 {
1014 const struct metric *left = container_of(l, struct metric, nd);
1015 const struct metric *right = container_of(r, struct metric, nd);
1016 int diff = strcmp(right->default_metricgroup_name, left->default_metricgroup_name);
1017
1018 if (diff)
1019 return diff;
1020
1021 return strcmp(right->metric_name, left->metric_name);
1022 }
1023
1024 struct metricgroup__add_metric_data {
1025 struct list_head *list;
1026 const char *pmu;
1027 const char *metric_name;
1028 const char *modifier;
1029 const char *user_requested_cpu_list;
1030 bool metric_no_group;
1031 bool metric_no_threshold;
1032 bool system_wide;
1033 bool has_match;
1034 };
1035
metricgroup__add_metric_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table,void * vdata)1036 static int metricgroup__add_metric_callback(const struct pmu_metric *pm,
1037 const struct pmu_metrics_table *table,
1038 void *vdata)
1039 {
1040 struct metricgroup__add_metric_data *data = vdata;
1041 int ret = 0;
1042
1043 if (pm->metric_expr && match_pm_metric_or_groups(pm, data->pmu, data->metric_name)) {
1044 bool metric_no_group = data->metric_no_group ||
1045 match_metric_or_groups(pm->metricgroup_no_group, data->metric_name);
1046
1047 data->has_match = true;
1048 ret = add_metric(data->list, pm, data->modifier, metric_no_group,
1049 data->metric_no_threshold, data->user_requested_cpu_list,
1050 data->system_wide, /*root_metric=*/NULL,
1051 /*visited_metrics=*/NULL, table);
1052 }
1053 return ret;
1054 }
1055
1056 /**
1057 * metricgroup__add_metric - Find and add a metric, or a metric group.
1058 * @pmu: The PMU name to search for metrics on, or "all" for all PMUs.
1059 * @metric_name: The name of the metric or metric group. For example, "IPC"
1060 * could be the name of a metric and "TopDownL1" the name of a
1061 * metric group.
1062 * @modifier: if non-null event modifiers like "u".
1063 * @metric_no_group: Should events written to events be grouped "{}" or
1064 * global. Grouping is the default but due to multiplexing the
1065 * user may override.
1066 * @user_requested_cpu_list: Command line specified CPUs to record on.
1067 * @system_wide: Are events for all processes recorded.
1068 * @metric_list: The list that the metric or metric group are added to.
1069 * @table: The table that is searched for metrics, most commonly the table for the
1070 * architecture perf is running upon.
1071 */
metricgroup__add_metric(const char * pmu,const char * metric_name,const char * modifier,bool metric_no_group,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct list_head * metric_list,const struct pmu_metrics_table * table)1072 static int metricgroup__add_metric(const char *pmu, const char *metric_name, const char *modifier,
1073 bool metric_no_group, bool metric_no_threshold,
1074 const char *user_requested_cpu_list,
1075 bool system_wide,
1076 struct list_head *metric_list,
1077 const struct pmu_metrics_table *table)
1078 {
1079 LIST_HEAD(list);
1080 int ret;
1081 struct metricgroup__add_metric_data data = {
1082 .list = &list,
1083 .pmu = pmu,
1084 .metric_name = metric_name,
1085 .modifier = modifier,
1086 .metric_no_group = metric_no_group,
1087 .metric_no_threshold = metric_no_threshold,
1088 .user_requested_cpu_list = user_requested_cpu_list,
1089 .system_wide = system_wide,
1090 .has_match = false,
1091 };
1092
1093 /*
1094 * Iterate over all metrics seeing if metric matches either the
1095 * name or group. When it does add the metric to the list.
1096 */
1097 ret = metricgroup__for_each_metric(table, metricgroup__add_metric_callback, &data);
1098 if (!ret && !data.has_match)
1099 ret = -EINVAL;
1100
1101 /*
1102 * add to metric_list so that they can be released
1103 * even if it's failed
1104 */
1105 list_splice(&list, metric_list);
1106 return ret;
1107 }
1108
1109 /**
1110 * metricgroup__add_metric_list - Find and add metrics, or metric groups,
1111 * specified in a list.
1112 * @pmu: A pmu to restrict the metrics to, or "all" for all PMUS.
1113 * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1"
1114 * would match the IPC and CPI metrics, and TopDownL1 would match all
1115 * the metrics in the TopDownL1 group.
1116 * @metric_no_group: Should events written to events be grouped "{}" or
1117 * global. Grouping is the default but due to multiplexing the
1118 * user may override.
1119 * @user_requested_cpu_list: Command line specified CPUs to record on.
1120 * @system_wide: Are events for all processes recorded.
1121 * @metric_list: The list that metrics are added to.
1122 * @table: The table that is searched for metrics, most commonly the table for the
1123 * architecture perf is running upon.
1124 */
metricgroup__add_metric_list(const char * pmu,const char * list,bool metric_no_group,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct list_head * metric_list,const struct pmu_metrics_table * table)1125 static int metricgroup__add_metric_list(const char *pmu, const char *list,
1126 bool metric_no_group,
1127 bool metric_no_threshold,
1128 const char *user_requested_cpu_list,
1129 bool system_wide, struct list_head *metric_list,
1130 const struct pmu_metrics_table *table)
1131 {
1132 char *list_itr, *list_copy, *metric_name, *modifier;
1133 int ret, count = 0;
1134
1135 list_copy = strdup(list);
1136 if (!list_copy)
1137 return -ENOMEM;
1138 list_itr = list_copy;
1139
1140 while ((metric_name = strsep(&list_itr, ",")) != NULL) {
1141 modifier = strchr(metric_name, ':');
1142 if (modifier)
1143 *modifier++ = '\0';
1144
1145 ret = metricgroup__add_metric(pmu, metric_name, modifier,
1146 metric_no_group, metric_no_threshold,
1147 user_requested_cpu_list,
1148 system_wide, metric_list, table);
1149 if (ret == -EINVAL)
1150 pr_err("Cannot find metric or group `%s'\n", metric_name);
1151
1152 if (ret)
1153 break;
1154
1155 count++;
1156 }
1157 free(list_copy);
1158
1159 if (!ret) {
1160 /*
1161 * Warn about nmi_watchdog if any parsed metrics had the
1162 * NO_NMI_WATCHDOG constraint.
1163 */
1164 metric__watchdog_constraint_hint(NULL, /*foot=*/true);
1165 /* No metrics. */
1166 if (count == 0)
1167 return -EINVAL;
1168 }
1169 return ret;
1170 }
1171
metricgroup__free_metrics(struct list_head * metric_list)1172 static void metricgroup__free_metrics(struct list_head *metric_list)
1173 {
1174 struct metric *m, *tmp;
1175
1176 list_for_each_entry_safe (m, tmp, metric_list, nd) {
1177 list_del_init(&m->nd);
1178 metric__free(m);
1179 }
1180 }
1181
1182 /**
1183 * find_tool_events - Search for the pressence of tool events in metric_list.
1184 * @metric_list: List to take metrics from.
1185 * @tool_events: Array of false values, indices corresponding to tool events set
1186 * to true if tool event is found.
1187 */
find_tool_events(const struct list_head * metric_list,bool tool_events[TOOL_PMU__EVENT_MAX])1188 static void find_tool_events(const struct list_head *metric_list,
1189 bool tool_events[TOOL_PMU__EVENT_MAX])
1190 {
1191 struct metric *m;
1192
1193 list_for_each_entry(m, metric_list, nd) {
1194 int i;
1195
1196 tool_pmu__for_each_event(i) {
1197 struct expr_id_data *data;
1198
1199 if (!tool_events[i] &&
1200 !expr__get_id(m->pctx, tool_pmu__event_to_str(i), &data))
1201 tool_events[i] = true;
1202 }
1203 }
1204 }
1205
1206 /**
1207 * build_combined_expr_ctx - Make an expr_parse_ctx with all !group_events
1208 * metric IDs, as the IDs are held in a set,
1209 * duplicates will be removed.
1210 * @metric_list: List to take metrics from.
1211 * @combined: Out argument for result.
1212 */
build_combined_expr_ctx(const struct list_head * metric_list,struct expr_parse_ctx ** combined)1213 static int build_combined_expr_ctx(const struct list_head *metric_list,
1214 struct expr_parse_ctx **combined)
1215 {
1216 struct hashmap_entry *cur;
1217 size_t bkt;
1218 struct metric *m;
1219 char *dup;
1220 int ret;
1221
1222 *combined = expr__ctx_new();
1223 if (!*combined)
1224 return -ENOMEM;
1225
1226 list_for_each_entry(m, metric_list, nd) {
1227 if (!m->group_events && !m->modifier) {
1228 hashmap__for_each_entry(m->pctx->ids, cur, bkt) {
1229 dup = strdup(cur->pkey);
1230 if (!dup) {
1231 ret = -ENOMEM;
1232 goto err_out;
1233 }
1234 ret = expr__add_id(*combined, dup);
1235 if (ret)
1236 goto err_out;
1237 }
1238 }
1239 }
1240 return 0;
1241 err_out:
1242 expr__ctx_free(*combined);
1243 *combined = NULL;
1244 return ret;
1245 }
1246
1247 /**
1248 * parse_ids - Build the event string for the ids and parse them creating an
1249 * evlist. The encoded metric_ids are decoded.
1250 * @metric_no_merge: is metric sharing explicitly disabled.
1251 * @fake_pmu: use a fake PMU when testing metrics not supported by the current CPU.
1252 * @ids: the event identifiers parsed from a metric.
1253 * @modifier: any modifiers added to the events.
1254 * @group_events: should events be placed in a weak group.
1255 * @tool_events: entries set true if the tool event of index could be present in
1256 * the overall list of metrics.
1257 * @out_evlist: the created list of events.
1258 */
parse_ids(bool metric_no_merge,bool fake_pmu,struct expr_parse_ctx * ids,const char * modifier,bool group_events,const bool tool_events[TOOL_PMU__EVENT_MAX],struct evlist ** out_evlist)1259 static int parse_ids(bool metric_no_merge, bool fake_pmu,
1260 struct expr_parse_ctx *ids, const char *modifier,
1261 bool group_events, const bool tool_events[TOOL_PMU__EVENT_MAX],
1262 struct evlist **out_evlist)
1263 {
1264 struct parse_events_error parse_error;
1265 struct evlist *parsed_evlist;
1266 struct strbuf events = STRBUF_INIT;
1267 int ret;
1268
1269 *out_evlist = NULL;
1270 if (!metric_no_merge || hashmap__size(ids->ids) == 0) {
1271 bool added_event = false;
1272 int i;
1273 /*
1274 * We may fail to share events between metrics because a tool
1275 * event isn't present in one metric. For example, a ratio of
1276 * cache misses doesn't need duration_time but the same events
1277 * may be used for a misses per second. Events without sharing
1278 * implies multiplexing, that is best avoided, so place
1279 * all tool events in every group.
1280 *
1281 * Also, there may be no ids/events in the expression parsing
1282 * context because of constant evaluation, e.g.:
1283 * event1 if #smt_on else 0
1284 * Add a tool event to avoid a parse error on an empty string.
1285 */
1286 tool_pmu__for_each_event(i) {
1287 if (tool_events[i]) {
1288 char *tmp = strdup(tool_pmu__event_to_str(i));
1289
1290 if (!tmp)
1291 return -ENOMEM;
1292 ids__insert(ids->ids, tmp);
1293 added_event = true;
1294 }
1295 }
1296 if (!added_event && hashmap__size(ids->ids) == 0) {
1297 char *tmp = strdup("duration_time");
1298
1299 if (!tmp)
1300 return -ENOMEM;
1301 ids__insert(ids->ids, tmp);
1302 }
1303 }
1304 ret = metricgroup__build_event_string(&events, ids, modifier,
1305 group_events);
1306 if (ret)
1307 return ret;
1308
1309 parsed_evlist = evlist__new();
1310 if (!parsed_evlist) {
1311 ret = -ENOMEM;
1312 goto err_out;
1313 }
1314 pr_debug("Parsing metric events '%s'\n", events.buf);
1315 parse_events_error__init(&parse_error);
1316 ret = __parse_events(parsed_evlist, events.buf, /*pmu_filter=*/NULL,
1317 &parse_error, fake_pmu, /*warn_if_reordered=*/false,
1318 /*fake_tp=*/false);
1319 if (ret) {
1320 parse_events_error__print(&parse_error, events.buf);
1321 goto err_out;
1322 }
1323 ret = decode_all_metric_ids(parsed_evlist, modifier);
1324 if (ret)
1325 goto err_out;
1326
1327 *out_evlist = parsed_evlist;
1328 parsed_evlist = NULL;
1329 err_out:
1330 parse_events_error__exit(&parse_error);
1331 evlist__delete(parsed_evlist);
1332 strbuf_release(&events);
1333 return ret;
1334 }
1335
1336 /* How many times will a given evsel be used in a set of metrics? */
count_uses(struct list_head * metric_list,struct evsel * evsel)1337 static int count_uses(struct list_head *metric_list, struct evsel *evsel)
1338 {
1339 const char *metric_id = evsel__metric_id(evsel);
1340 struct metric *m;
1341 int uses = 0;
1342
1343 list_for_each_entry(m, metric_list, nd) {
1344 if (hashmap__find(m->pctx->ids, metric_id, NULL))
1345 uses++;
1346 }
1347 return uses;
1348 }
1349
1350 /*
1351 * Select the evsel that stat-display will use to trigger shadow/metric
1352 * printing. Pick the least shared non-tool evsel, encouraging metrics to be
1353 * with a hardware counter that is specific to them.
1354 */
pick_display_evsel(struct list_head * metric_list,struct evsel ** metric_events)1355 static struct evsel *pick_display_evsel(struct list_head *metric_list,
1356 struct evsel **metric_events)
1357 {
1358 struct evsel *selected = metric_events[0];
1359 size_t selected_uses;
1360 bool selected_is_tool;
1361
1362 if (!selected)
1363 return NULL;
1364
1365 selected_uses = count_uses(metric_list, selected);
1366 selected_is_tool = evsel__is_tool(selected);
1367 for (int i = 1; metric_events[i]; i++) {
1368 struct evsel *candidate = metric_events[i];
1369 size_t candidate_uses = count_uses(metric_list, candidate);
1370
1371 if ((selected_is_tool && !evsel__is_tool(candidate)) ||
1372 (candidate_uses < selected_uses)) {
1373 selected = candidate;
1374 selected_uses = candidate_uses;
1375 selected_is_tool = evsel__is_tool(selected);
1376 }
1377 }
1378 return selected;
1379 }
1380
parse_groups(struct evlist * perf_evlist,const char * pmu,const char * str,bool metric_no_group,bool metric_no_merge,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,bool fake_pmu,const struct pmu_metrics_table * table)1381 static int parse_groups(struct evlist *perf_evlist,
1382 const char *pmu, const char *str,
1383 bool metric_no_group,
1384 bool metric_no_merge,
1385 bool metric_no_threshold,
1386 const char *user_requested_cpu_list,
1387 bool system_wide,
1388 bool fake_pmu,
1389 const struct pmu_metrics_table *table)
1390 {
1391 struct evlist *combined_evlist = NULL;
1392 LIST_HEAD(metric_list);
1393 struct metric *m;
1394 bool tool_events[TOOL_PMU__EVENT_MAX] = {false};
1395 bool is_default = !strcmp(str, "Default");
1396 int ret;
1397
1398 ret = metricgroup__add_metric_list(pmu, str, metric_no_group, metric_no_threshold,
1399 user_requested_cpu_list,
1400 system_wide, &metric_list, table);
1401 if (ret)
1402 goto out;
1403
1404 /* Sort metrics from largest to smallest. */
1405 list_sort(NULL, &metric_list, metric_list_cmp);
1406
1407 if (!metric_no_merge) {
1408 struct expr_parse_ctx *combined = NULL;
1409
1410 find_tool_events(&metric_list, tool_events);
1411
1412 ret = build_combined_expr_ctx(&metric_list, &combined);
1413
1414 if (!ret && combined && hashmap__size(combined->ids)) {
1415 ret = parse_ids(metric_no_merge, fake_pmu, combined,
1416 /*modifier=*/NULL,
1417 /*group_events=*/false,
1418 tool_events,
1419 &combined_evlist);
1420 }
1421 if (combined)
1422 expr__ctx_free(combined);
1423
1424 if (ret)
1425 goto out;
1426 }
1427
1428 if (is_default)
1429 list_sort(NULL, &metric_list, default_metricgroup_cmp);
1430
1431 list_for_each_entry(m, &metric_list, nd) {
1432 struct metric_event *me;
1433 struct evsel **metric_events;
1434 struct evlist *metric_evlist = NULL;
1435 struct metric *n;
1436 struct metric_expr *expr;
1437
1438 if (combined_evlist && !m->group_events) {
1439 metric_evlist = combined_evlist;
1440 } else if (!metric_no_merge) {
1441 /*
1442 * See if the IDs for this metric are a subset of an
1443 * earlier metric.
1444 */
1445 list_for_each_entry(n, &metric_list, nd) {
1446 if (m == n)
1447 break;
1448
1449 if (n->evlist == NULL)
1450 continue;
1451
1452 if ((!m->modifier && n->modifier) ||
1453 (m->modifier && !n->modifier) ||
1454 (m->modifier && n->modifier &&
1455 strcmp(m->modifier, n->modifier)))
1456 continue;
1457
1458 if ((!m->pmu && n->pmu) ||
1459 (m->pmu && !n->pmu) ||
1460 (m->pmu && n->pmu && strcmp(m->pmu, n->pmu)))
1461 continue;
1462
1463 if (expr__subset_of_ids(n->pctx, m->pctx)) {
1464 pr_debug("Events in '%s' fully contained within '%s'\n",
1465 m->metric_name, n->metric_name);
1466 metric_evlist = n->evlist;
1467 break;
1468 }
1469
1470 }
1471 }
1472 if (!metric_evlist) {
1473 ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier,
1474 m->group_events, tool_events, &m->evlist);
1475 if (ret)
1476 goto out;
1477
1478 metric_evlist = m->evlist;
1479 }
1480 ret = setup_metric_events(fake_pmu ? "all" : m->pmu, m->pctx->ids,
1481 metric_evlist, &metric_events);
1482 if (ret) {
1483 pr_err("Cannot resolve IDs for %s: %s\n",
1484 m->metric_name, m->metric_expr);
1485 goto out;
1486 }
1487
1488 me = metricgroup__lookup(&perf_evlist->metric_events,
1489 pick_display_evsel(&metric_list, metric_events),
1490 /*create=*/true);
1491
1492 expr = malloc(sizeof(struct metric_expr));
1493 if (!expr) {
1494 ret = -ENOMEM;
1495 free(metric_events);
1496 goto out;
1497 }
1498
1499 expr->metric_refs = m->metric_refs;
1500 m->metric_refs = NULL;
1501 expr->metric_expr = m->metric_expr;
1502 if (m->modifier) {
1503 char *tmp;
1504
1505 if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0)
1506 expr->metric_name = NULL;
1507 else
1508 expr->metric_name = tmp;
1509 } else
1510 expr->metric_name = strdup(m->metric_name);
1511
1512 if (!expr->metric_name) {
1513 ret = -ENOMEM;
1514 free(expr);
1515 free(metric_events);
1516 goto out;
1517 }
1518 if (m->default_show_events) {
1519 struct evsel *pos;
1520
1521 for (int i = 0; metric_events[i]; i++)
1522 metric_events[i]->default_show_events = true;
1523 evlist__for_each_entry(metric_evlist, pos) {
1524 if (pos->metric_leader && pos->metric_leader->default_show_events)
1525 pos->default_show_events = true;
1526 }
1527 }
1528 expr->metric_threshold = m->metric_threshold;
1529 expr->metric_unit = m->metric_unit;
1530 expr->metric_events = metric_events;
1531 expr->runtime = m->pctx->sctx.runtime;
1532 expr->default_metricgroup_name = m->default_metricgroup_name;
1533 me->is_default = is_default;
1534 list_add(&expr->nd, &me->head);
1535 }
1536
1537
1538 if (combined_evlist) {
1539 evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries);
1540 evlist__delete(combined_evlist);
1541 }
1542
1543 list_for_each_entry(m, &metric_list, nd) {
1544 if (m->evlist)
1545 evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries);
1546 }
1547
1548 out:
1549 metricgroup__free_metrics(&metric_list);
1550 return ret;
1551 }
1552
metricgroup__parse_groups(struct evlist * perf_evlist,const char * pmu,const char * str,bool metric_no_group,bool metric_no_merge,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,bool hardware_aware_grouping)1553 int metricgroup__parse_groups(struct evlist *perf_evlist,
1554 const char *pmu,
1555 const char *str,
1556 bool metric_no_group,
1557 bool metric_no_merge,
1558 bool metric_no_threshold,
1559 const char *user_requested_cpu_list,
1560 bool system_wide,
1561 bool hardware_aware_grouping)
1562 {
1563 const struct pmu_metrics_table *table = pmu_metrics_table__find();
1564
1565 if (hardware_aware_grouping)
1566 pr_debug("Use hardware aware grouping instead of traditional metric grouping method\n");
1567
1568 return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge,
1569 metric_no_threshold, user_requested_cpu_list, system_wide,
1570 /*fake_pmu=*/false, table);
1571 }
1572
metricgroup__parse_groups_test(struct evlist * evlist,const struct pmu_metrics_table * table,const char * str)1573 int metricgroup__parse_groups_test(struct evlist *evlist,
1574 const struct pmu_metrics_table *table,
1575 const char *str)
1576 {
1577 return parse_groups(evlist, "all", str,
1578 /*metric_no_group=*/false,
1579 /*metric_no_merge=*/false,
1580 /*metric_no_threshold=*/false,
1581 /*user_requested_cpu_list=*/NULL,
1582 /*system_wide=*/false,
1583 /*fake_pmu=*/true, table);
1584 }
1585
1586 struct metricgroup__has_metric_data {
1587 const char *pmu;
1588 const char *metric_or_groups;
1589 };
metricgroup__has_metric_or_groups_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table __maybe_unused,void * vdata)1590 static int metricgroup__has_metric_or_groups_callback(const struct pmu_metric *pm,
1591 const struct pmu_metrics_table *table
1592 __maybe_unused,
1593 void *vdata)
1594 {
1595 struct metricgroup__has_metric_data *data = vdata;
1596
1597 return match_pm_metric_or_groups(pm, data->pmu, data->metric_or_groups) ? 1 : 0;
1598 }
1599
metricgroup__has_metric_or_groups(const char * pmu,const char * metric_or_groups)1600 bool metricgroup__has_metric_or_groups(const char *pmu, const char *metric_or_groups)
1601 {
1602 const struct pmu_metrics_table *table = pmu_metrics_table__find();
1603 struct metricgroup__has_metric_data data = {
1604 .pmu = pmu,
1605 .metric_or_groups = metric_or_groups,
1606 };
1607
1608 return pmu_metrics_table__for_each_metric(table,
1609 metricgroup__has_metric_or_groups_callback,
1610 &data)
1611 ? true : false;
1612 }
1613
metricgroup__topdown_max_level_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table __maybe_unused,void * data)1614 static int metricgroup__topdown_max_level_callback(const struct pmu_metric *pm,
1615 const struct pmu_metrics_table *table __maybe_unused,
1616 void *data)
1617 {
1618 unsigned int *max_level = data;
1619 unsigned int level;
1620 const char *p = strstr(pm->metric_group ?: "", "TopdownL");
1621
1622 if (!p || p[8] == '\0')
1623 return 0;
1624
1625 level = p[8] - '0';
1626 if (level > *max_level)
1627 *max_level = level;
1628
1629 return 0;
1630 }
1631
metricgroups__topdown_max_level(void)1632 unsigned int metricgroups__topdown_max_level(void)
1633 {
1634 unsigned int max_level = 0;
1635 const struct pmu_metrics_table *table = pmu_metrics_table__find();
1636
1637 if (!table)
1638 return false;
1639
1640 pmu_metrics_table__for_each_metric(table, metricgroup__topdown_max_level_callback,
1641 &max_level);
1642 return max_level;
1643 }
1644
metricgroup__copy_metric_events(struct evlist * evlist,struct cgroup * cgrp,struct rblist * new_metric_events,struct rblist * old_metric_events)1645 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1646 struct rblist *new_metric_events,
1647 struct rblist *old_metric_events)
1648 {
1649 unsigned int i;
1650
1651 for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1652 struct rb_node *nd;
1653 struct metric_event *old_me, *new_me;
1654 struct metric_expr *old_expr, *new_expr;
1655 struct evsel *evsel;
1656 size_t alloc_size;
1657 int idx, nr;
1658
1659 nd = rblist__entry(old_metric_events, i);
1660 old_me = container_of(nd, struct metric_event, nd);
1661
1662 evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx);
1663 if (!evsel)
1664 return -EINVAL;
1665 new_me = metricgroup__lookup(new_metric_events, evsel, /*create=*/true);
1666 if (!new_me)
1667 return -ENOMEM;
1668
1669 pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1670 cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx);
1671
1672 new_me->is_default = old_me->is_default;
1673 list_for_each_entry(old_expr, &old_me->head, nd) {
1674 new_expr = malloc(sizeof(*new_expr));
1675 if (!new_expr)
1676 return -ENOMEM;
1677
1678 new_expr->metric_expr = old_expr->metric_expr;
1679 new_expr->metric_threshold = old_expr->metric_threshold;
1680 new_expr->metric_name = strdup(old_expr->metric_name);
1681 if (!new_expr->metric_name)
1682 return -ENOMEM;
1683
1684 new_expr->metric_unit = old_expr->metric_unit;
1685 new_expr->runtime = old_expr->runtime;
1686 new_expr->default_metricgroup_name = old_expr->default_metricgroup_name;
1687
1688 if (old_expr->metric_refs) {
1689 /* calculate number of metric_events */
1690 for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1691 continue;
1692 alloc_size = sizeof(*new_expr->metric_refs);
1693 new_expr->metric_refs = calloc(nr + 1, alloc_size);
1694 if (!new_expr->metric_refs) {
1695 free(new_expr);
1696 return -ENOMEM;
1697 }
1698
1699 memcpy(new_expr->metric_refs, old_expr->metric_refs,
1700 nr * alloc_size);
1701 } else {
1702 new_expr->metric_refs = NULL;
1703 }
1704
1705 /* calculate number of metric_events */
1706 for (nr = 0; old_expr->metric_events[nr]; nr++)
1707 continue;
1708 alloc_size = sizeof(*new_expr->metric_events);
1709 new_expr->metric_events = calloc(nr + 1, alloc_size);
1710 if (!new_expr->metric_events) {
1711 zfree(&new_expr->metric_refs);
1712 free(new_expr);
1713 return -ENOMEM;
1714 }
1715
1716 /* copy evsel in the same position */
1717 for (idx = 0; idx < nr; idx++) {
1718 evsel = old_expr->metric_events[idx];
1719 evsel = evlist__find_evsel(evlist, evsel->core.idx);
1720 if (evsel == NULL) {
1721 zfree(&new_expr->metric_events);
1722 zfree(&new_expr->metric_refs);
1723 free(new_expr);
1724 return -EINVAL;
1725 }
1726 new_expr->metric_events[idx] = evsel;
1727 }
1728
1729 list_add(&new_expr->nd, &new_me->head);
1730 }
1731 }
1732 return 0;
1733 }
1734