1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2017, Intel Corporation.
4 */
5
6 /* Manage metrics and groups of metrics from JSON files */
7
8 #include "metricgroup.h"
9 #include "debug.h"
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "strbuf.h"
13 #include "pmu.h"
14 #include "pmus.h"
15 #include "print-events.h"
16 #include "smt.h"
17 #include "tool_pmu.h"
18 #include "expr.h"
19 #include "rblist.h"
20 #include <string.h>
21 #include <errno.h>
22 #include "strlist.h"
23 #include <assert.h>
24 #include <linux/ctype.h>
25 #include <linux/list_sort.h>
26 #include <linux/string.h>
27 #include <linux/zalloc.h>
28 #include <perf/cpumap.h>
29 #include <subcmd/parse-options.h>
30 #include <api/fs/fs.h>
31 #include "util.h"
32 #include <asm/bug.h>
33 #include "cgroup.h"
34 #include "util/hashmap.h"
35
metricgroup__lookup(struct rblist * metric_events,struct evsel * evsel,bool create)36 struct metric_event *metricgroup__lookup(struct rblist *metric_events,
37 struct evsel *evsel,
38 bool create)
39 {
40 struct rb_node *nd;
41 struct metric_event me = {
42 .evsel = evsel
43 };
44
45 if (!metric_events)
46 return NULL;
47
48 if (evsel && evsel->metric_leader)
49 me.evsel = evsel->metric_leader;
50 nd = rblist__find(metric_events, &me);
51 if (nd)
52 return container_of(nd, struct metric_event, nd);
53 if (create) {
54 rblist__add_node(metric_events, &me);
55 nd = rblist__find(metric_events, &me);
56 if (nd)
57 return container_of(nd, struct metric_event, nd);
58 }
59 return NULL;
60 }
61
metric_event_cmp(struct rb_node * rb_node,const void * entry)62 static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
63 {
64 struct metric_event *a = container_of(rb_node,
65 struct metric_event,
66 nd);
67 const struct metric_event *b = entry;
68
69 if (a->evsel == b->evsel)
70 return 0;
71 if ((char *)a->evsel < (char *)b->evsel)
72 return -1;
73 return +1;
74 }
75
metric_event_new(struct rblist * rblist __maybe_unused,const void * entry)76 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
77 const void *entry)
78 {
79 struct metric_event *me = malloc(sizeof(struct metric_event));
80
81 if (!me)
82 return NULL;
83 memcpy(me, entry, sizeof(struct metric_event));
84 me->evsel = ((struct metric_event *)entry)->evsel;
85 me->is_default = false;
86 INIT_LIST_HEAD(&me->head);
87 return &me->nd;
88 }
89
metric_event_delete(struct rblist * rblist __maybe_unused,struct rb_node * rb_node)90 static void metric_event_delete(struct rblist *rblist __maybe_unused,
91 struct rb_node *rb_node)
92 {
93 struct metric_event *me = container_of(rb_node, struct metric_event, nd);
94 struct metric_expr *expr, *tmp;
95
96 list_for_each_entry_safe(expr, tmp, &me->head, nd) {
97 zfree(&expr->metric_name);
98 zfree(&expr->metric_refs);
99 zfree(&expr->metric_events);
100 free(expr);
101 }
102
103 free(me);
104 }
105
metricgroup__rblist_init(struct rblist * metric_events)106 void metricgroup__rblist_init(struct rblist *metric_events)
107 {
108 rblist__init(metric_events);
109 metric_events->node_cmp = metric_event_cmp;
110 metric_events->node_new = metric_event_new;
111 metric_events->node_delete = metric_event_delete;
112 }
113
metricgroup__rblist_exit(struct rblist * metric_events)114 void metricgroup__rblist_exit(struct rblist *metric_events)
115 {
116 rblist__exit(metric_events);
117 }
118
119 /**
120 * The metric under construction. The data held here will be placed in a
121 * metric_expr.
122 */
123 struct metric {
124 struct list_head nd;
125 /**
126 * The expression parse context importantly holding the IDs contained
127 * within the expression.
128 */
129 struct expr_parse_ctx *pctx;
130 const char *pmu;
131 /** The name of the metric such as "IPC". */
132 const char *metric_name;
133 /** Modifier on the metric such as "u" or NULL for none. */
134 const char *modifier;
135 /** The expression to parse, for example, "instructions/cycles". */
136 const char *metric_expr;
137 /** Optional threshold expression where zero value is green, otherwise red. */
138 const char *metric_threshold;
139 /**
140 * The "ScaleUnit" that scales and adds a unit to the metric during
141 * output.
142 */
143 const char *metric_unit;
144 /**
145 * Optional name of the metric group reported
146 * if the Default metric group is being processed.
147 */
148 const char *default_metricgroup_name;
149 /** Optional null terminated array of referenced metrics. */
150 struct metric_ref *metric_refs;
151 /**
152 * Should events of the metric be grouped?
153 */
154 bool group_events;
155 /** Show events even if in the Default metric group. */
156 bool default_show_events;
157 /**
158 * Parsed events for the metric. Optional as events may be taken from a
159 * different metric whose group contains all the IDs necessary for this
160 * one.
161 */
162 struct evlist *evlist;
163 };
164
metric__watchdog_constraint_hint(const char * name,bool foot)165 static void metric__watchdog_constraint_hint(const char *name, bool foot)
166 {
167 static bool violate_nmi_constraint;
168
169 if (!foot) {
170 pr_warning("Not grouping metric %s's events.\n", name);
171 violate_nmi_constraint = true;
172 return;
173 }
174
175 if (!violate_nmi_constraint)
176 return;
177
178 pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
179 " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
180 " perf stat ...\n"
181 " echo 1 > /proc/sys/kernel/nmi_watchdog\n");
182 }
183
metric__group_events(const struct pmu_metric * pm,bool metric_no_threshold)184 static bool metric__group_events(const struct pmu_metric *pm, bool metric_no_threshold)
185 {
186 switch (pm->event_grouping) {
187 case MetricNoGroupEvents:
188 return false;
189 case MetricNoGroupEventsNmi:
190 if (!sysctl__nmi_watchdog_enabled())
191 return true;
192 metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false);
193 return false;
194 case MetricNoGroupEventsSmt:
195 return !smt_on();
196 case MetricNoGroupEventsThresholdAndNmi:
197 if (metric_no_threshold)
198 return true;
199 if (!sysctl__nmi_watchdog_enabled())
200 return true;
201 metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false);
202 return false;
203 case MetricGroupEvents:
204 default:
205 return true;
206 }
207 }
208
metric__free(struct metric * m)209 static void metric__free(struct metric *m)
210 {
211 if (!m)
212 return;
213
214 zfree(&m->metric_refs);
215 expr__ctx_free(m->pctx);
216 zfree(&m->modifier);
217 evlist__delete(m->evlist);
218 free(m);
219 }
220
metric__new(const struct pmu_metric * pm,const char * modifier,bool metric_no_group,bool metric_no_threshold,int runtime,const char * user_requested_cpu_list,bool system_wide)221 static struct metric *metric__new(const struct pmu_metric *pm,
222 const char *modifier,
223 bool metric_no_group,
224 bool metric_no_threshold,
225 int runtime,
226 const char *user_requested_cpu_list,
227 bool system_wide)
228 {
229 struct metric *m;
230
231 m = zalloc(sizeof(*m));
232 if (!m)
233 return NULL;
234
235 m->pctx = expr__ctx_new();
236 if (!m->pctx)
237 goto out_err;
238
239 m->pmu = pm->pmu ?: "cpu";
240 m->metric_name = pm->metric_name;
241 m->default_metricgroup_name = pm->default_metricgroup_name ?: "";
242 m->modifier = NULL;
243 if (modifier) {
244 m->modifier = strdup(modifier);
245 if (!m->modifier)
246 goto out_err;
247 }
248 m->metric_expr = pm->metric_expr;
249 m->metric_threshold = pm->metric_threshold;
250 m->metric_unit = pm->unit;
251 m->pctx->sctx.user_requested_cpu_list = NULL;
252 if (user_requested_cpu_list) {
253 m->pctx->sctx.user_requested_cpu_list = strdup(user_requested_cpu_list);
254 if (!m->pctx->sctx.user_requested_cpu_list)
255 goto out_err;
256 }
257 m->pctx->sctx.runtime = runtime;
258 m->pctx->sctx.system_wide = system_wide;
259 m->group_events = !metric_no_group && metric__group_events(pm, metric_no_threshold);
260 m->default_show_events = pm->default_show_events;
261 m->metric_refs = NULL;
262 m->evlist = NULL;
263
264 return m;
265 out_err:
266 metric__free(m);
267 return NULL;
268 }
269
contains_metric_id(struct evsel ** metric_events,int num_events,const char * metric_id)270 static bool contains_metric_id(struct evsel **metric_events, int num_events,
271 const char *metric_id)
272 {
273 int i;
274
275 for (i = 0; i < num_events; i++) {
276 if (!strcmp(evsel__metric_id(metric_events[i]), metric_id))
277 return true;
278 }
279 return false;
280 }
281
282 /**
283 * setup_metric_events - Find a group of events in metric_evlist that correspond
284 * to the IDs from a parsed metric expression.
285 * @pmu: The PMU for the IDs.
286 * @ids: the metric IDs to match.
287 * @metric_evlist: the list of perf events.
288 * @out_metric_events: holds the created metric events array.
289 */
setup_metric_events(const char * pmu,struct hashmap * ids,struct evlist * metric_evlist,struct evsel *** out_metric_events)290 static int setup_metric_events(const char *pmu, struct hashmap *ids,
291 struct evlist *metric_evlist,
292 struct evsel ***out_metric_events)
293 {
294 struct evsel **metric_events;
295 const char *metric_id;
296 struct evsel *ev;
297 size_t ids_size, matched_events, i;
298 bool all_pmus = !strcmp(pmu, "all") || perf_pmus__num_core_pmus() == 1 || !is_pmu_core(pmu);
299
300 *out_metric_events = NULL;
301 ids_size = hashmap__size(ids);
302
303 metric_events = calloc(ids_size + 1, sizeof(void *));
304 if (!metric_events)
305 return -ENOMEM;
306
307 matched_events = 0;
308 evlist__for_each_entry(metric_evlist, ev) {
309 struct expr_id_data *val_ptr;
310
311 /* Don't match events for the wrong hybrid PMU. */
312 if (!all_pmus && ev->pmu && evsel__is_hybrid(ev) &&
313 strcmp(ev->pmu->name, pmu))
314 continue;
315 /*
316 * Check for duplicate events with the same name. For
317 * example, uncore_imc/cas_count_read/ will turn into 6
318 * events per socket on skylakex. Only the first such
319 * event is placed in metric_events.
320 */
321 metric_id = evsel__metric_id(ev);
322 if (contains_metric_id(metric_events, matched_events, metric_id))
323 continue;
324 /*
325 * Does this event belong to the parse context? For
326 * combined or shared groups, this metric may not care
327 * about this event.
328 */
329 if (hashmap__find(ids, metric_id, &val_ptr)) {
330 pr_debug("Matched metric-id %s to %s\n", metric_id, evsel__name(ev));
331 metric_events[matched_events++] = ev;
332
333 if (matched_events >= ids_size)
334 break;
335 }
336 }
337 if (matched_events < ids_size) {
338 free(metric_events);
339 return -EINVAL;
340 }
341 for (i = 0; i < ids_size; i++) {
342 ev = metric_events[i];
343 ev->collect_stat = true;
344
345 /*
346 * The metric leader points to the identically named
347 * event in metric_events.
348 */
349 ev->metric_leader = ev;
350 /*
351 * Mark two events with identical names in the same
352 * group (or globally) as being in use as uncore events
353 * may be duplicated for each pmu. Set the metric leader
354 * of such events to be the event that appears in
355 * metric_events.
356 */
357 metric_id = evsel__metric_id(ev);
358 evlist__for_each_entry_continue(metric_evlist, ev) {
359 if (!strcmp(evsel__metric_id(ev), metric_id))
360 ev->metric_leader = metric_events[i];
361 }
362 }
363 *out_metric_events = metric_events;
364 return 0;
365 }
366
match_metric_or_groups(const char * metric_or_groups,const char * sought)367 static bool match_metric_or_groups(const char *metric_or_groups, const char *sought)
368 {
369 int len;
370 const char *m;
371
372 if (!sought)
373 return false;
374 if (!strcmp(sought, "all"))
375 return true;
376 if (!metric_or_groups)
377 return !strcasecmp(sought, "No_group");
378 len = strlen(sought);
379 if (!strncasecmp(metric_or_groups, sought, len) &&
380 (metric_or_groups[len] == 0 || metric_or_groups[len] == ';'))
381 return true;
382 m = strchr(metric_or_groups, ';');
383 return m && match_metric_or_groups(m + 1, sought);
384 }
385
match_pm_metric_or_groups(const struct pmu_metric * pm,const char * pmu,const char * metric_or_groups)386 static bool match_pm_metric_or_groups(const struct pmu_metric *pm, const char *pmu,
387 const char *metric_or_groups)
388 {
389 const char *pm_pmu = pm->pmu ?: "cpu";
390 struct perf_pmu *perf_pmu = NULL;
391
392 if (pm->pmu)
393 perf_pmu = perf_pmus__find(pm->pmu);
394
395 if (strcmp(pmu, "all") && strcmp(pm_pmu, pmu) &&
396 (perf_pmu && !perf_pmu__name_wildcard_match(perf_pmu, pmu)))
397 return false;
398
399 return match_metric_or_groups(pm->metric_group, metric_or_groups) ||
400 match_metric_or_groups(pm->metric_name, metric_or_groups);
401 }
402
403 struct metricgroup_iter_data {
404 pmu_metric_iter_fn fn;
405 void *data;
406 };
407
metricgroup__sys_event_iter(const struct pmu_metric * pm,const struct pmu_metrics_table * table,void * data)408 static int metricgroup__sys_event_iter(const struct pmu_metric *pm,
409 const struct pmu_metrics_table *table,
410 void *data)
411 {
412 struct metricgroup_iter_data *d = data;
413 struct perf_pmu *pmu = NULL;
414
415 if (!pm->metric_expr || !pm->compat)
416 return 0;
417
418 while ((pmu = perf_pmus__scan(pmu))) {
419
420 if (!pmu->id || !pmu_uncore_identifier_match(pm->compat, pmu->id))
421 continue;
422
423 return d->fn(pm, table, d->data);
424 }
425 return 0;
426 }
427
metricgroup__for_each_metric(const struct pmu_metrics_table * table,pmu_metric_iter_fn fn,void * data)428 int metricgroup__for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
429 void *data)
430 {
431 struct metricgroup_iter_data sys_data = {
432 .fn = fn,
433 .data = data,
434 };
435 const struct pmu_metrics_table *tables[2] = {
436 table,
437 pmu_metrics_table__default(),
438 };
439
440 for (size_t i = 0; i < ARRAY_SIZE(tables); i++) {
441 int ret;
442
443 if (!tables[i])
444 continue;
445
446 ret = pmu_metrics_table__for_each_metric(tables[i], fn, data);
447 if (ret)
448 return ret;
449 }
450
451 return pmu_for_each_sys_metric(metricgroup__sys_event_iter, &sys_data);
452 }
453
454 static const char *code_characters = ",-=@";
455
encode_metric_id(struct strbuf * sb,const char * x)456 static int encode_metric_id(struct strbuf *sb, const char *x)
457 {
458 int ret = 0;
459
460 for (; *x; x++) {
461 const char *c = strchr(code_characters, *x);
462 if (c) {
463 ret = strbuf_addch(sb, '!');
464 if (ret)
465 break;
466
467 ret = strbuf_addch(sb, '0' + (c - code_characters));
468 if (ret)
469 break;
470 } else {
471 ret = strbuf_addch(sb, *x);
472 if (ret)
473 break;
474 }
475 }
476 return ret;
477 }
478
decode_metric_id(struct strbuf * sb,const char * x)479 static int decode_metric_id(struct strbuf *sb, const char *x)
480 {
481 const char *orig = x;
482 size_t i;
483 char c;
484 int ret;
485
486 for (; *x; x++) {
487 c = *x;
488 if (*x == '!') {
489 x++;
490 i = *x - '0';
491 if (i > strlen(code_characters)) {
492 pr_err("Bad metric-id encoding in: '%s'", orig);
493 return -1;
494 }
495 c = code_characters[i];
496 }
497 ret = strbuf_addch(sb, c);
498 if (ret)
499 return ret;
500 }
501 return 0;
502 }
503
decode_all_metric_ids(struct evlist * perf_evlist,const char * modifier)504 static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier)
505 {
506 struct evsel *ev;
507 struct strbuf sb = STRBUF_INIT;
508 char *cur;
509 int ret = 0;
510
511 evlist__for_each_entry(perf_evlist, ev) {
512 if (!ev->metric_id)
513 continue;
514
515 ret = strbuf_setlen(&sb, 0);
516 if (ret)
517 break;
518
519 ret = decode_metric_id(&sb, ev->metric_id);
520 if (ret)
521 break;
522
523 free((char *)ev->metric_id);
524 ev->metric_id = strdup(sb.buf);
525 if (!ev->metric_id) {
526 ret = -ENOMEM;
527 break;
528 }
529 /*
530 * If the name is just the parsed event, use the metric-id to
531 * give a more friendly display version.
532 */
533 if (strstr(ev->name, "metric-id=")) {
534 bool has_slash = false;
535
536 zfree(&ev->name);
537 for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) {
538 *cur = '/';
539 has_slash = true;
540 }
541
542 if (modifier) {
543 if (!has_slash && !strchr(sb.buf, ':')) {
544 ret = strbuf_addch(&sb, ':');
545 if (ret)
546 break;
547 }
548 ret = strbuf_addstr(&sb, modifier);
549 if (ret)
550 break;
551 }
552 ev->name = strdup(sb.buf);
553 if (!ev->name) {
554 ret = -ENOMEM;
555 break;
556 }
557 }
558 }
559 strbuf_release(&sb);
560 return ret;
561 }
562
metricgroup__build_event_string(struct strbuf * events,const struct expr_parse_ctx * ctx,const char * modifier,bool group_events)563 static int metricgroup__build_event_string(struct strbuf *events,
564 const struct expr_parse_ctx *ctx,
565 const char *modifier,
566 bool group_events)
567 {
568 struct hashmap_entry *cur;
569 size_t bkt;
570 bool no_group = true, has_tool_events = false;
571 bool tool_events[TOOL_PMU__EVENT_MAX] = {false};
572 int ret = 0;
573
574 #define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
575
576 hashmap__for_each_entry(ctx->ids, cur, bkt) {
577 const char *sep, *rsep, *id = cur->pkey;
578 enum tool_pmu_event ev;
579
580 pr_debug("found event %s\n", id);
581
582 /* Always move tool events outside of the group. */
583 ev = tool_pmu__str_to_event(id);
584 if (ev != TOOL_PMU__EVENT_NONE) {
585 has_tool_events = true;
586 tool_events[ev] = true;
587 continue;
588 }
589 /* Separate events with commas and open the group if necessary. */
590 if (no_group) {
591 if (group_events) {
592 ret = strbuf_addch(events, '{');
593 RETURN_IF_NON_ZERO(ret);
594 }
595
596 no_group = false;
597 } else {
598 ret = strbuf_addch(events, ',');
599 RETURN_IF_NON_ZERO(ret);
600 }
601 /*
602 * Encode the ID as an event string. Add a qualifier for
603 * metric_id that is the original name except with characters
604 * that parse-events can't parse replaced. For example,
605 * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/
606 */
607 sep = strchr(id, '@');
608 if (sep != NULL) {
609 ret = strbuf_add(events, id, sep - id);
610 RETURN_IF_NON_ZERO(ret);
611 ret = strbuf_addch(events, '/');
612 RETURN_IF_NON_ZERO(ret);
613 rsep = strrchr(sep, '@');
614 ret = strbuf_add(events, sep + 1, rsep - sep - 1);
615 RETURN_IF_NON_ZERO(ret);
616 ret = strbuf_addstr(events, ",metric-id=");
617 RETURN_IF_NON_ZERO(ret);
618 sep = rsep;
619 } else {
620 sep = strchr(id, ':');
621 if (sep != NULL) {
622 ret = strbuf_add(events, id, sep - id);
623 RETURN_IF_NON_ZERO(ret);
624 } else {
625 ret = strbuf_addstr(events, id);
626 RETURN_IF_NON_ZERO(ret);
627 }
628 ret = strbuf_addstr(events, "/metric-id=");
629 RETURN_IF_NON_ZERO(ret);
630 }
631 ret = encode_metric_id(events, id);
632 RETURN_IF_NON_ZERO(ret);
633 ret = strbuf_addstr(events, "/");
634 RETURN_IF_NON_ZERO(ret);
635
636 if (sep != NULL) {
637 ret = strbuf_addstr(events, sep + 1);
638 RETURN_IF_NON_ZERO(ret);
639 }
640 if (modifier) {
641 ret = strbuf_addstr(events, modifier);
642 RETURN_IF_NON_ZERO(ret);
643 }
644 }
645 if (!no_group && group_events) {
646 ret = strbuf_addf(events, "}:W");
647 RETURN_IF_NON_ZERO(ret);
648 }
649 if (has_tool_events) {
650 int i;
651
652 tool_pmu__for_each_event(i) {
653 if (tool_events[i]) {
654 if (!no_group) {
655 ret = strbuf_addch(events, ',');
656 RETURN_IF_NON_ZERO(ret);
657 }
658 no_group = false;
659 ret = strbuf_addstr(events, tool_pmu__event_to_str(i));
660 RETURN_IF_NON_ZERO(ret);
661 }
662 }
663 }
664
665 return ret;
666 #undef RETURN_IF_NON_ZERO
667 }
668
arch_get_runtimeparam(const struct pmu_metric * pm __maybe_unused)669 int __weak arch_get_runtimeparam(const struct pmu_metric *pm __maybe_unused)
670 {
671 return 1;
672 }
673
674 /*
675 * A singly linked list on the stack of the names of metrics being
676 * processed. Used to identify recursion.
677 */
678 struct visited_metric {
679 const char *name;
680 const struct visited_metric *parent;
681 };
682
683 struct metricgroup_add_iter_data {
684 struct list_head *metric_list;
685 const char *pmu;
686 const char *metric_name;
687 const char *modifier;
688 int *ret;
689 bool *has_match;
690 bool metric_no_group;
691 bool metric_no_threshold;
692 const char *user_requested_cpu_list;
693 bool system_wide;
694 struct metric *root_metric;
695 const struct visited_metric *visited;
696 const struct pmu_metrics_table *table;
697 };
698
699 static int add_metric(struct list_head *metric_list,
700 const struct pmu_metric *pm,
701 const char *modifier,
702 bool metric_no_group,
703 bool metric_no_threshold,
704 const char *user_requested_cpu_list,
705 bool system_wide,
706 struct metric *root_metric,
707 const struct visited_metric *visited,
708 const struct pmu_metrics_table *table);
709
metricgroup__find_metric_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table __maybe_unused,void * vdata)710 static int metricgroup__find_metric_callback(const struct pmu_metric *pm,
711 const struct pmu_metrics_table *table __maybe_unused,
712 void *vdata)
713 {
714 struct pmu_metric *copied_pm = vdata;
715
716 memcpy(copied_pm, pm, sizeof(*pm));
717 return 0;
718 }
719
720 /**
721 * resolve_metric - Locate metrics within the root metric and recursively add
722 * references to them.
723 * @metric_list: The list the metric is added to.
724 * @pmu: The PMU name to resolve metrics on, or "all" for all PMUs.
725 * @modifier: if non-null event modifiers like "u".
726 * @metric_no_group: Should events written to events be grouped "{}" or
727 * global. Grouping is the default but due to multiplexing the
728 * user may override.
729 * @user_requested_cpu_list: Command line specified CPUs to record on.
730 * @system_wide: Are events for all processes recorded.
731 * @root_metric: Metrics may reference other metrics to form a tree. In this
732 * case the root_metric holds all the IDs and a list of referenced
733 * metrics. When adding a root this argument is NULL.
734 * @visited: A singly linked list of metric names being added that is used to
735 * detect recursion.
736 * @table: The table that is searched for metrics, most commonly the table for the
737 * architecture perf is running upon.
738 */
resolve_metric(struct list_head * metric_list,struct perf_pmu * pmu,const char * modifier,bool metric_no_group,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct metric * root_metric,const struct visited_metric * visited,const struct pmu_metrics_table * table)739 static int resolve_metric(struct list_head *metric_list,
740 struct perf_pmu *pmu,
741 const char *modifier,
742 bool metric_no_group,
743 bool metric_no_threshold,
744 const char *user_requested_cpu_list,
745 bool system_wide,
746 struct metric *root_metric,
747 const struct visited_metric *visited,
748 const struct pmu_metrics_table *table)
749 {
750 struct hashmap_entry *cur;
751 size_t bkt;
752 struct to_resolve {
753 /* The metric to resolve. */
754 struct pmu_metric pm;
755 /*
756 * The key in the IDs map, this may differ from in case,
757 * etc. from pm->metric_name.
758 */
759 const char *key;
760 } *pending = NULL;
761 int i, ret = 0, pending_cnt = 0;
762
763 /*
764 * Iterate all the parsed IDs and if there's a matching metric and it to
765 * the pending array.
766 */
767 hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
768 struct pmu_metric pm;
769
770 if (pmu_metrics_table__find_metric(table, pmu, cur->pkey,
771 metricgroup__find_metric_callback,
772 &pm) != PMU_METRICS__NOT_FOUND) {
773 pending = realloc(pending,
774 (pending_cnt + 1) * sizeof(struct to_resolve));
775 if (!pending)
776 return -ENOMEM;
777
778 memcpy(&pending[pending_cnt].pm, &pm, sizeof(pm));
779 pending[pending_cnt].key = cur->pkey;
780 pending_cnt++;
781 }
782 }
783
784 /* Remove the metric IDs from the context. */
785 for (i = 0; i < pending_cnt; i++)
786 expr__del_id(root_metric->pctx, pending[i].key);
787
788 /*
789 * Recursively add all the metrics, IDs are added to the root metric's
790 * context.
791 */
792 for (i = 0; i < pending_cnt; i++) {
793 ret = add_metric(metric_list, &pending[i].pm, modifier, metric_no_group,
794 metric_no_threshold, user_requested_cpu_list, system_wide,
795 root_metric, visited, table);
796 if (ret)
797 break;
798 }
799
800 free(pending);
801 return ret;
802 }
803
804 /**
805 * __add_metric - Add a metric to metric_list.
806 * @metric_list: The list the metric is added to.
807 * @pm: The pmu_metric containing the metric to be added.
808 * @modifier: if non-null event modifiers like "u".
809 * @metric_no_group: Should events written to events be grouped "{}" or
810 * global. Grouping is the default but due to multiplexing the
811 * user may override.
812 * @metric_no_threshold: Should threshold expressions be ignored?
813 * @runtime: A special argument for the parser only known at runtime.
814 * @user_requested_cpu_list: Command line specified CPUs to record on.
815 * @system_wide: Are events for all processes recorded.
816 * @root_metric: Metrics may reference other metrics to form a tree. In this
817 * case the root_metric holds all the IDs and a list of referenced
818 * metrics. When adding a root this argument is NULL.
819 * @visited: A singly linked list of metric names being added that is used to
820 * detect recursion.
821 * @table: The table that is searched for metrics, most commonly the table for the
822 * architecture perf is running upon.
823 */
__add_metric(struct list_head * metric_list,const struct pmu_metric * pm,const char * modifier,bool metric_no_group,bool metric_no_threshold,int runtime,const char * user_requested_cpu_list,bool system_wide,struct metric * root_metric,const struct visited_metric * visited,const struct pmu_metrics_table * table)824 static int __add_metric(struct list_head *metric_list,
825 const struct pmu_metric *pm,
826 const char *modifier,
827 bool metric_no_group,
828 bool metric_no_threshold,
829 int runtime,
830 const char *user_requested_cpu_list,
831 bool system_wide,
832 struct metric *root_metric,
833 const struct visited_metric *visited,
834 const struct pmu_metrics_table *table)
835 {
836 const struct visited_metric *vm;
837 int ret;
838 bool is_root = !root_metric;
839 const char *expr;
840 struct visited_metric visited_node = {
841 .name = pm->metric_name,
842 .parent = visited,
843 };
844
845 for (vm = visited; vm; vm = vm->parent) {
846 if (!strcmp(pm->metric_name, vm->name)) {
847 pr_err("failed: recursion detected for %s\n", pm->metric_name);
848 return -1;
849 }
850 }
851
852 if (is_root) {
853 /*
854 * This metric is the root of a tree and may reference other
855 * metrics that are added recursively.
856 */
857 root_metric = metric__new(pm, modifier, metric_no_group, metric_no_threshold,
858 runtime, user_requested_cpu_list, system_wide);
859 if (!root_metric)
860 return -ENOMEM;
861
862 } else {
863 int cnt = 0;
864
865 /*
866 * This metric was referenced in a metric higher in the
867 * tree. Check if the same metric is already resolved in the
868 * metric_refs list.
869 */
870 if (root_metric->metric_refs) {
871 for (; root_metric->metric_refs[cnt].metric_name; cnt++) {
872 if (!strcmp(pm->metric_name,
873 root_metric->metric_refs[cnt].metric_name))
874 return 0;
875 }
876 }
877
878 /* Create reference. Need space for the entry and the terminator. */
879 root_metric->metric_refs = realloc(root_metric->metric_refs,
880 (cnt + 2) * sizeof(struct metric_ref));
881 if (!root_metric->metric_refs)
882 return -ENOMEM;
883
884 /*
885 * Intentionally passing just const char pointers,
886 * from 'pe' object, so they never go away. We don't
887 * need to change them, so there's no need to create
888 * our own copy.
889 */
890 root_metric->metric_refs[cnt].metric_name = pm->metric_name;
891 root_metric->metric_refs[cnt].metric_expr = pm->metric_expr;
892
893 /* Null terminate array. */
894 root_metric->metric_refs[cnt+1].metric_name = NULL;
895 root_metric->metric_refs[cnt+1].metric_expr = NULL;
896 }
897
898 /*
899 * For both the parent and referenced metrics, we parse
900 * all the metric's IDs and add it to the root context.
901 */
902 ret = 0;
903 expr = pm->metric_expr;
904 if (is_root && pm->metric_threshold) {
905 /*
906 * Threshold expressions are built off the actual metric. Switch
907 * to use that in case of additional necessary events. Change
908 * the visited node name to avoid this being flagged as
909 * recursion. If the threshold events are disabled, just use the
910 * metric's name as a reference. This allows metric threshold
911 * computation if there are sufficient events.
912 */
913 assert(strstr(pm->metric_threshold, pm->metric_name));
914 expr = metric_no_threshold ? pm->metric_name : pm->metric_threshold;
915 visited_node.name = "__threshold__";
916 }
917
918 ret = expr__find_ids(expr, NULL, root_metric->pctx);
919
920 if (!ret) {
921 /* Resolve referenced metrics. */
922 struct perf_pmu *pmu;
923
924 if (pm->pmu && pm->pmu[0] != '\0')
925 pmu = perf_pmus__find(pm->pmu);
926 else
927 pmu = perf_pmus__scan_core(/*pmu=*/ NULL);
928
929 ret = resolve_metric(metric_list, pmu, modifier, metric_no_group,
930 metric_no_threshold, user_requested_cpu_list,
931 system_wide, root_metric, &visited_node,
932 table);
933 }
934 if (ret) {
935 if (is_root)
936 metric__free(root_metric);
937
938 } else if (is_root)
939 list_add(&root_metric->nd, metric_list);
940
941 return ret;
942 }
943
add_metric(struct list_head * metric_list,const struct pmu_metric * pm,const char * modifier,bool metric_no_group,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct metric * root_metric,const struct visited_metric * visited,const struct pmu_metrics_table * table)944 static int add_metric(struct list_head *metric_list,
945 const struct pmu_metric *pm,
946 const char *modifier,
947 bool metric_no_group,
948 bool metric_no_threshold,
949 const char *user_requested_cpu_list,
950 bool system_wide,
951 struct metric *root_metric,
952 const struct visited_metric *visited,
953 const struct pmu_metrics_table *table)
954 {
955 int ret = 0;
956
957 pr_debug("metric expr %s for %s\n", pm->metric_expr, pm->metric_name);
958
959 if (!strstr(pm->metric_expr, "?")) {
960 ret = __add_metric(metric_list, pm, modifier, metric_no_group,
961 metric_no_threshold, 0, user_requested_cpu_list,
962 system_wide, root_metric, visited, table);
963 } else {
964 int j, count;
965
966 count = arch_get_runtimeparam(pm);
967
968 /* This loop is added to create multiple
969 * events depend on count value and add
970 * those events to metric_list.
971 */
972
973 for (j = 0; j < count && !ret; j++)
974 ret = __add_metric(metric_list, pm, modifier, metric_no_group,
975 metric_no_threshold, j, user_requested_cpu_list,
976 system_wide, root_metric, visited, table);
977 }
978
979 return ret;
980 }
981
982 /**
983 * metric_list_cmp - list_sort comparator that sorts metrics with more events to
984 * the front. tool events are excluded from the count.
985 */
metric_list_cmp(void * priv __maybe_unused,const struct list_head * l,const struct list_head * r)986 static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
987 const struct list_head *r)
988 {
989 const struct metric *left = container_of(l, struct metric, nd);
990 const struct metric *right = container_of(r, struct metric, nd);
991 struct expr_id_data *data;
992 int i, left_count, right_count;
993
994 left_count = hashmap__size(left->pctx->ids);
995 tool_pmu__for_each_event(i) {
996 if (!expr__get_id(left->pctx, tool_pmu__event_to_str(i), &data))
997 left_count--;
998 }
999
1000 right_count = hashmap__size(right->pctx->ids);
1001 tool_pmu__for_each_event(i) {
1002 if (!expr__get_id(right->pctx, tool_pmu__event_to_str(i), &data))
1003 right_count--;
1004 }
1005
1006 return right_count - left_count;
1007 }
1008
1009 /**
1010 * default_metricgroup_cmp - Implements complex key for the Default metricgroup
1011 * that first sorts by default_metricgroup_name, then
1012 * metric_name.
1013 */
default_metricgroup_cmp(void * priv __maybe_unused,const struct list_head * l,const struct list_head * r)1014 static int default_metricgroup_cmp(void *priv __maybe_unused,
1015 const struct list_head *l,
1016 const struct list_head *r)
1017 {
1018 const struct metric *left = container_of(l, struct metric, nd);
1019 const struct metric *right = container_of(r, struct metric, nd);
1020 int diff = strcmp(right->default_metricgroup_name, left->default_metricgroup_name);
1021
1022 if (diff)
1023 return diff;
1024
1025 return strcmp(right->metric_name, left->metric_name);
1026 }
1027
1028 struct metricgroup__add_metric_data {
1029 struct list_head *list;
1030 const char *pmu;
1031 const char *metric_name;
1032 const char *modifier;
1033 const char *user_requested_cpu_list;
1034 bool metric_no_group;
1035 bool metric_no_threshold;
1036 bool system_wide;
1037 bool has_match;
1038 };
1039
metricgroup__add_metric_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table,void * vdata)1040 static int metricgroup__add_metric_callback(const struct pmu_metric *pm,
1041 const struct pmu_metrics_table *table,
1042 void *vdata)
1043 {
1044 struct metricgroup__add_metric_data *data = vdata;
1045 int ret = 0;
1046
1047 if (pm->metric_expr && match_pm_metric_or_groups(pm, data->pmu, data->metric_name)) {
1048 bool metric_no_group = data->metric_no_group ||
1049 match_metric_or_groups(pm->metricgroup_no_group, data->metric_name);
1050
1051 data->has_match = true;
1052 ret = add_metric(data->list, pm, data->modifier, metric_no_group,
1053 data->metric_no_threshold, data->user_requested_cpu_list,
1054 data->system_wide, /*root_metric=*/NULL,
1055 /*visited_metrics=*/NULL, table);
1056 }
1057 return ret;
1058 }
1059
1060 /**
1061 * metricgroup__add_metric - Find and add a metric, or a metric group.
1062 * @pmu: The PMU name to search for metrics on, or "all" for all PMUs.
1063 * @metric_name: The name of the metric or metric group. For example, "IPC"
1064 * could be the name of a metric and "TopDownL1" the name of a
1065 * metric group.
1066 * @modifier: if non-null event modifiers like "u".
1067 * @metric_no_group: Should events written to events be grouped "{}" or
1068 * global. Grouping is the default but due to multiplexing the
1069 * user may override.
1070 * @user_requested_cpu_list: Command line specified CPUs to record on.
1071 * @system_wide: Are events for all processes recorded.
1072 * @metric_list: The list that the metric or metric group are added to.
1073 * @table: The table that is searched for metrics, most commonly the table for the
1074 * architecture perf is running upon.
1075 */
metricgroup__add_metric(const char * pmu,const char * metric_name,const char * modifier,bool metric_no_group,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct list_head * metric_list,const struct pmu_metrics_table * table)1076 static int metricgroup__add_metric(const char *pmu, const char *metric_name, const char *modifier,
1077 bool metric_no_group, bool metric_no_threshold,
1078 const char *user_requested_cpu_list,
1079 bool system_wide,
1080 struct list_head *metric_list,
1081 const struct pmu_metrics_table *table)
1082 {
1083 LIST_HEAD(list);
1084 int ret;
1085 struct metricgroup__add_metric_data data = {
1086 .list = &list,
1087 .pmu = pmu,
1088 .metric_name = metric_name,
1089 .modifier = modifier,
1090 .metric_no_group = metric_no_group,
1091 .metric_no_threshold = metric_no_threshold,
1092 .user_requested_cpu_list = user_requested_cpu_list,
1093 .system_wide = system_wide,
1094 .has_match = false,
1095 };
1096
1097 /*
1098 * Iterate over all metrics seeing if metric matches either the
1099 * name or group. When it does add the metric to the list.
1100 */
1101 ret = metricgroup__for_each_metric(table, metricgroup__add_metric_callback, &data);
1102 if (!ret && !data.has_match)
1103 ret = -ENOENT;
1104
1105 /*
1106 * add to metric_list so that they can be released
1107 * even if it's failed
1108 */
1109 list_splice(&list, metric_list);
1110 return ret;
1111 }
1112
1113 /**
1114 * metricgroup__add_metric_list - Find and add metrics, or metric groups,
1115 * specified in a list.
1116 * @pmu: A pmu to restrict the metrics to, or "all" for all PMUS.
1117 * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1"
1118 * would match the IPC and CPI metrics, and TopDownL1 would match all
1119 * the metrics in the TopDownL1 group.
1120 * @metric_no_group: Should events written to events be grouped "{}" or
1121 * global. Grouping is the default but due to multiplexing the
1122 * user may override.
1123 * @user_requested_cpu_list: Command line specified CPUs to record on.
1124 * @system_wide: Are events for all processes recorded.
1125 * @metric_list: The list that metrics are added to.
1126 * @table: The table that is searched for metrics, most commonly the table for the
1127 * architecture perf is running upon.
1128 */
metricgroup__add_metric_list(const char * pmu,const char * list,bool metric_no_group,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct list_head * metric_list,const struct pmu_metrics_table * table)1129 static int metricgroup__add_metric_list(const char *pmu, const char *list,
1130 bool metric_no_group,
1131 bool metric_no_threshold,
1132 const char *user_requested_cpu_list,
1133 bool system_wide, struct list_head *metric_list,
1134 const struct pmu_metrics_table *table)
1135 {
1136 char *list_itr, *list_copy, *metric_name, *modifier;
1137 int ret, count = 0;
1138
1139 list_copy = strdup(list);
1140 if (!list_copy)
1141 return -ENOMEM;
1142 list_itr = list_copy;
1143
1144 while ((metric_name = strsep(&list_itr, ",")) != NULL) {
1145 modifier = strchr(metric_name, ':');
1146 if (modifier)
1147 *modifier++ = '\0';
1148
1149 ret = metricgroup__add_metric(pmu, metric_name, modifier,
1150 metric_no_group, metric_no_threshold,
1151 user_requested_cpu_list,
1152 system_wide, metric_list, table);
1153 if (ret == -EINVAL)
1154 pr_err("Fail to parse metric or group `%s'\n", metric_name);
1155 else if (ret == -ENOENT)
1156 pr_err("Cannot find metric or group `%s'\n", metric_name);
1157
1158 if (ret)
1159 break;
1160
1161 count++;
1162 }
1163 free(list_copy);
1164
1165 if (!ret) {
1166 /*
1167 * Warn about nmi_watchdog if any parsed metrics had the
1168 * NO_NMI_WATCHDOG constraint.
1169 */
1170 metric__watchdog_constraint_hint(NULL, /*foot=*/true);
1171 /* No metrics. */
1172 if (count == 0)
1173 return -EINVAL;
1174 }
1175 return ret;
1176 }
1177
metricgroup__free_metrics(struct list_head * metric_list)1178 static void metricgroup__free_metrics(struct list_head *metric_list)
1179 {
1180 struct metric *m, *tmp;
1181
1182 list_for_each_entry_safe (m, tmp, metric_list, nd) {
1183 list_del_init(&m->nd);
1184 metric__free(m);
1185 }
1186 }
1187
1188 /**
1189 * find_tool_events - Search for the pressence of tool events in metric_list.
1190 * @metric_list: List to take metrics from.
1191 * @tool_events: Array of false values, indices corresponding to tool events set
1192 * to true if tool event is found.
1193 */
find_tool_events(const struct list_head * metric_list,bool tool_events[TOOL_PMU__EVENT_MAX])1194 static void find_tool_events(const struct list_head *metric_list,
1195 bool tool_events[TOOL_PMU__EVENT_MAX])
1196 {
1197 struct metric *m;
1198
1199 list_for_each_entry(m, metric_list, nd) {
1200 int i;
1201
1202 tool_pmu__for_each_event(i) {
1203 struct expr_id_data *data;
1204
1205 if (!tool_events[i] &&
1206 !expr__get_id(m->pctx, tool_pmu__event_to_str(i), &data))
1207 tool_events[i] = true;
1208 }
1209 }
1210 }
1211
1212 /**
1213 * build_combined_expr_ctx - Make an expr_parse_ctx with all !group_events
1214 * metric IDs, as the IDs are held in a set,
1215 * duplicates will be removed.
1216 * @metric_list: List to take metrics from.
1217 * @combined: Out argument for result.
1218 */
build_combined_expr_ctx(const struct list_head * metric_list,struct expr_parse_ctx ** combined)1219 static int build_combined_expr_ctx(const struct list_head *metric_list,
1220 struct expr_parse_ctx **combined)
1221 {
1222 struct hashmap_entry *cur;
1223 size_t bkt;
1224 struct metric *m;
1225 char *dup;
1226 int ret;
1227
1228 *combined = expr__ctx_new();
1229 if (!*combined)
1230 return -ENOMEM;
1231
1232 list_for_each_entry(m, metric_list, nd) {
1233 if (!m->group_events && !m->modifier) {
1234 hashmap__for_each_entry(m->pctx->ids, cur, bkt) {
1235 dup = strdup(cur->pkey);
1236 if (!dup) {
1237 ret = -ENOMEM;
1238 goto err_out;
1239 }
1240 ret = expr__add_id(*combined, dup);
1241 if (ret)
1242 goto err_out;
1243 }
1244 }
1245 }
1246 return 0;
1247 err_out:
1248 expr__ctx_free(*combined);
1249 *combined = NULL;
1250 return ret;
1251 }
1252
1253 /**
1254 * parse_ids - Build the event string for the ids and parse them creating an
1255 * evlist. The encoded metric_ids are decoded.
1256 * @metric_no_merge: is metric sharing explicitly disabled.
1257 * @fake_pmu: use a fake PMU when testing metrics not supported by the current CPU.
1258 * @ids: the event identifiers parsed from a metric.
1259 * @modifier: any modifiers added to the events.
1260 * @group_events: should events be placed in a weak group.
1261 * @tool_events: entries set true if the tool event of index could be present in
1262 * the overall list of metrics.
1263 * @out_evlist: the created list of events.
1264 */
parse_ids(bool metric_no_merge,bool fake_pmu,struct expr_parse_ctx * ids,const char * modifier,bool group_events,const bool tool_events[TOOL_PMU__EVENT_MAX],struct evlist ** out_evlist,const char * filter_pmu)1265 static int parse_ids(bool metric_no_merge, bool fake_pmu,
1266 struct expr_parse_ctx *ids, const char *modifier,
1267 bool group_events, const bool tool_events[TOOL_PMU__EVENT_MAX],
1268 struct evlist **out_evlist,
1269 const char *filter_pmu)
1270 {
1271 struct parse_events_error parse_error;
1272 struct evlist *parsed_evlist;
1273 struct strbuf events = STRBUF_INIT;
1274 int ret;
1275
1276 *out_evlist = NULL;
1277 if (!metric_no_merge || hashmap__size(ids->ids) == 0) {
1278 bool added_event = false;
1279 int i;
1280 /*
1281 * We may fail to share events between metrics because a tool
1282 * event isn't present in one metric. For example, a ratio of
1283 * cache misses doesn't need duration_time but the same events
1284 * may be used for a misses per second. Events without sharing
1285 * implies multiplexing, that is best avoided, so place
1286 * all tool events in every group.
1287 *
1288 * Also, there may be no ids/events in the expression parsing
1289 * context because of constant evaluation, e.g.:
1290 * event1 if #smt_on else 0
1291 * Add a tool event to avoid a parse error on an empty string.
1292 */
1293 tool_pmu__for_each_event(i) {
1294 if (tool_events[i]) {
1295 char *tmp = strdup(tool_pmu__event_to_str(i));
1296
1297 if (!tmp)
1298 return -ENOMEM;
1299 ids__insert(ids->ids, tmp);
1300 added_event = true;
1301 }
1302 }
1303 if (!added_event && hashmap__size(ids->ids) == 0) {
1304 char *tmp = strdup("duration_time");
1305
1306 if (!tmp)
1307 return -ENOMEM;
1308 ids__insert(ids->ids, tmp);
1309 }
1310 }
1311 ret = metricgroup__build_event_string(&events, ids, modifier,
1312 group_events);
1313 if (ret)
1314 return ret;
1315
1316 parsed_evlist = evlist__new();
1317 if (!parsed_evlist) {
1318 ret = -ENOMEM;
1319 goto err_out;
1320 }
1321 pr_debug("Parsing metric events '%s'\n", events.buf);
1322 parse_events_error__init(&parse_error);
1323 ret = __parse_events(parsed_evlist, events.buf, filter_pmu,
1324 &parse_error, fake_pmu, /*warn_if_reordered=*/false,
1325 /*fake_tp=*/false);
1326 if (ret) {
1327 parse_events_error__print(&parse_error, events.buf);
1328 goto err_out;
1329 }
1330 ret = decode_all_metric_ids(parsed_evlist, modifier);
1331 if (ret)
1332 goto err_out;
1333
1334 *out_evlist = parsed_evlist;
1335 parsed_evlist = NULL;
1336 err_out:
1337 parse_events_error__exit(&parse_error);
1338 evlist__delete(parsed_evlist);
1339 strbuf_release(&events);
1340 return ret;
1341 }
1342
1343 /* How many times will a given evsel be used in a set of metrics? */
count_uses(struct list_head * metric_list,struct evsel * evsel)1344 static int count_uses(struct list_head *metric_list, struct evsel *evsel)
1345 {
1346 const char *metric_id = evsel__metric_id(evsel);
1347 struct metric *m;
1348 int uses = 0;
1349
1350 list_for_each_entry(m, metric_list, nd) {
1351 if (hashmap__find(m->pctx->ids, metric_id, NULL))
1352 uses++;
1353 }
1354 return uses;
1355 }
1356
1357 /*
1358 * Select the evsel that stat-display will use to trigger shadow/metric
1359 * printing. Pick the least shared non-tool evsel, encouraging metrics to be
1360 * with a hardware counter that is specific to them.
1361 */
pick_display_evsel(struct list_head * metric_list,struct evsel ** metric_events)1362 static struct evsel *pick_display_evsel(struct list_head *metric_list,
1363 struct evsel **metric_events)
1364 {
1365 struct evsel *selected = metric_events[0];
1366 size_t selected_uses;
1367 bool selected_is_tool;
1368
1369 if (!selected)
1370 return NULL;
1371
1372 selected_uses = count_uses(metric_list, selected);
1373 selected_is_tool = evsel__is_tool(selected);
1374 for (int i = 1; metric_events[i]; i++) {
1375 struct evsel *candidate = metric_events[i];
1376 size_t candidate_uses = count_uses(metric_list, candidate);
1377
1378 if ((selected_is_tool && !evsel__is_tool(candidate)) ||
1379 (candidate_uses < selected_uses)) {
1380 selected = candidate;
1381 selected_uses = candidate_uses;
1382 selected_is_tool = evsel__is_tool(selected);
1383 }
1384 }
1385 return selected;
1386 }
1387
parse_groups(struct evlist * perf_evlist,const char * pmu,const char * str,bool metric_no_group,bool metric_no_merge,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,bool fake_pmu,const struct pmu_metrics_table * table)1388 static int parse_groups(struct evlist *perf_evlist,
1389 const char *pmu, const char *str,
1390 bool metric_no_group,
1391 bool metric_no_merge,
1392 bool metric_no_threshold,
1393 const char *user_requested_cpu_list,
1394 bool system_wide,
1395 bool fake_pmu,
1396 const struct pmu_metrics_table *table)
1397 {
1398 struct evlist *combined_evlist = NULL;
1399 LIST_HEAD(metric_list);
1400 struct metric *m;
1401 bool tool_events[TOOL_PMU__EVENT_MAX] = {false};
1402 bool is_default = !strcmp(str, "Default");
1403 int ret;
1404
1405 ret = metricgroup__add_metric_list(pmu, str, metric_no_group, metric_no_threshold,
1406 user_requested_cpu_list,
1407 system_wide, &metric_list, table);
1408 if (ret)
1409 goto out;
1410
1411 /* Sort metrics from largest to smallest. */
1412 list_sort(NULL, &metric_list, metric_list_cmp);
1413
1414 if (!metric_no_merge) {
1415 struct expr_parse_ctx *combined = NULL;
1416
1417 find_tool_events(&metric_list, tool_events);
1418
1419 ret = build_combined_expr_ctx(&metric_list, &combined);
1420
1421 if (!ret && combined && hashmap__size(combined->ids)) {
1422 ret = parse_ids(metric_no_merge, fake_pmu, combined,
1423 /*modifier=*/NULL,
1424 /*group_events=*/false,
1425 tool_events,
1426 &combined_evlist,
1427 (pmu && strcmp(pmu, "all") == 0) ? NULL : pmu);
1428 }
1429 if (combined)
1430 expr__ctx_free(combined);
1431
1432 if (ret)
1433 goto out;
1434 }
1435
1436 if (is_default)
1437 list_sort(NULL, &metric_list, default_metricgroup_cmp);
1438
1439 list_for_each_entry(m, &metric_list, nd) {
1440 struct metric_event *me;
1441 struct evsel **metric_events;
1442 struct evlist *metric_evlist = NULL;
1443 struct metric *n;
1444 struct metric_expr *expr;
1445
1446 if (combined_evlist && !m->group_events) {
1447 metric_evlist = combined_evlist;
1448 } else if (!metric_no_merge) {
1449 /*
1450 * See if the IDs for this metric are a subset of an
1451 * earlier metric.
1452 */
1453 list_for_each_entry(n, &metric_list, nd) {
1454 if (m == n)
1455 break;
1456
1457 if (n->evlist == NULL)
1458 continue;
1459
1460 if ((!m->modifier && n->modifier) ||
1461 (m->modifier && !n->modifier) ||
1462 (m->modifier && n->modifier &&
1463 strcmp(m->modifier, n->modifier)))
1464 continue;
1465
1466 if ((!m->pmu && n->pmu) ||
1467 (m->pmu && !n->pmu) ||
1468 (m->pmu && n->pmu && strcmp(m->pmu, n->pmu)))
1469 continue;
1470
1471 if (expr__subset_of_ids(n->pctx, m->pctx)) {
1472 pr_debug("Events in '%s' fully contained within '%s'\n",
1473 m->metric_name, n->metric_name);
1474 metric_evlist = n->evlist;
1475 break;
1476 }
1477
1478 }
1479 }
1480 if (!metric_evlist) {
1481 ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier,
1482 m->group_events, tool_events, &m->evlist,
1483 (pmu && strcmp(pmu, "all") == 0) ? NULL : pmu);
1484 if (ret)
1485 goto out;
1486
1487 metric_evlist = m->evlist;
1488 }
1489 ret = setup_metric_events(fake_pmu ? "all" : m->pmu, m->pctx->ids,
1490 metric_evlist, &metric_events);
1491 if (ret) {
1492 pr_err("Cannot resolve IDs for %s: %s\n",
1493 m->metric_name, m->metric_expr);
1494 goto out;
1495 }
1496
1497 me = metricgroup__lookup(&perf_evlist->metric_events,
1498 pick_display_evsel(&metric_list, metric_events),
1499 /*create=*/true);
1500
1501 expr = malloc(sizeof(struct metric_expr));
1502 if (!expr) {
1503 ret = -ENOMEM;
1504 free(metric_events);
1505 goto out;
1506 }
1507
1508 expr->metric_refs = m->metric_refs;
1509 m->metric_refs = NULL;
1510 expr->metric_expr = m->metric_expr;
1511 if (m->modifier) {
1512 char *tmp;
1513
1514 if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0)
1515 expr->metric_name = NULL;
1516 else
1517 expr->metric_name = tmp;
1518 } else
1519 expr->metric_name = strdup(m->metric_name);
1520
1521 if (!expr->metric_name) {
1522 ret = -ENOMEM;
1523 free(expr);
1524 free(metric_events);
1525 goto out;
1526 }
1527 if (m->default_show_events) {
1528 struct evsel *pos;
1529
1530 for (int i = 0; metric_events[i]; i++)
1531 metric_events[i]->default_show_events = true;
1532 evlist__for_each_entry(metric_evlist, pos) {
1533 if (pos->metric_leader && pos->metric_leader->default_show_events)
1534 pos->default_show_events = true;
1535 }
1536 }
1537 expr->metric_threshold = m->metric_threshold;
1538 expr->metric_unit = m->metric_unit;
1539 expr->metric_events = metric_events;
1540 expr->runtime = m->pctx->sctx.runtime;
1541 expr->default_metricgroup_name = m->default_metricgroup_name;
1542 me->is_default = is_default;
1543 list_add(&expr->nd, &me->head);
1544 }
1545
1546
1547 if (combined_evlist) {
1548 evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries);
1549 evlist__delete(combined_evlist);
1550 }
1551
1552 list_for_each_entry(m, &metric_list, nd) {
1553 if (m->evlist)
1554 evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries);
1555 }
1556
1557 out:
1558 metricgroup__free_metrics(&metric_list);
1559 return ret;
1560 }
1561
metricgroup__parse_groups(struct evlist * perf_evlist,const char * pmu,const char * str,bool metric_no_group,bool metric_no_merge,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,bool hardware_aware_grouping)1562 int metricgroup__parse_groups(struct evlist *perf_evlist,
1563 const char *pmu,
1564 const char *str,
1565 bool metric_no_group,
1566 bool metric_no_merge,
1567 bool metric_no_threshold,
1568 const char *user_requested_cpu_list,
1569 bool system_wide,
1570 bool hardware_aware_grouping)
1571 {
1572 const struct pmu_metrics_table *table = pmu_metrics_table__find();
1573
1574 if (hardware_aware_grouping)
1575 pr_debug("Use hardware aware grouping instead of traditional metric grouping method\n");
1576
1577 return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge,
1578 metric_no_threshold, user_requested_cpu_list, system_wide,
1579 /*fake_pmu=*/false, table);
1580 }
1581
metricgroup__parse_groups_test(struct evlist * evlist,const struct pmu_metrics_table * table,const char * str)1582 int metricgroup__parse_groups_test(struct evlist *evlist,
1583 const struct pmu_metrics_table *table,
1584 const char *str)
1585 {
1586 return parse_groups(evlist, "all", str,
1587 /*metric_no_group=*/false,
1588 /*metric_no_merge=*/false,
1589 /*metric_no_threshold=*/false,
1590 /*user_requested_cpu_list=*/NULL,
1591 /*system_wide=*/false,
1592 /*fake_pmu=*/true, table);
1593 }
1594
1595 struct metricgroup__has_metric_data {
1596 const char *pmu;
1597 const char *metric_or_groups;
1598 };
metricgroup__has_metric_or_groups_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table __maybe_unused,void * vdata)1599 static int metricgroup__has_metric_or_groups_callback(const struct pmu_metric *pm,
1600 const struct pmu_metrics_table *table
1601 __maybe_unused,
1602 void *vdata)
1603 {
1604 struct metricgroup__has_metric_data *data = vdata;
1605
1606 return match_pm_metric_or_groups(pm, data->pmu, data->metric_or_groups) ? 1 : 0;
1607 }
1608
metricgroup__has_metric_or_groups(const char * pmu,const char * metric_or_groups)1609 bool metricgroup__has_metric_or_groups(const char *pmu, const char *metric_or_groups)
1610 {
1611 const struct pmu_metrics_table *table = pmu_metrics_table__find();
1612 struct metricgroup__has_metric_data data = {
1613 .pmu = pmu,
1614 .metric_or_groups = metric_or_groups,
1615 };
1616
1617 return metricgroup__for_each_metric(table,
1618 metricgroup__has_metric_or_groups_callback,
1619 &data)
1620 ? true : false;
1621 }
1622
metricgroup__topdown_max_level_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table __maybe_unused,void * data)1623 static int metricgroup__topdown_max_level_callback(const struct pmu_metric *pm,
1624 const struct pmu_metrics_table *table __maybe_unused,
1625 void *data)
1626 {
1627 unsigned int *max_level = data;
1628 unsigned int level;
1629 const char *p = strstr(pm->metric_group ?: "", "TopdownL");
1630
1631 if (!p || p[8] == '\0')
1632 return 0;
1633
1634 level = p[8] - '0';
1635 if (level > *max_level)
1636 *max_level = level;
1637
1638 return 0;
1639 }
1640
metricgroups__topdown_max_level(void)1641 unsigned int metricgroups__topdown_max_level(void)
1642 {
1643 unsigned int max_level = 0;
1644 const struct pmu_metrics_table *table = pmu_metrics_table__find();
1645
1646 if (!table)
1647 return false;
1648
1649 pmu_metrics_table__for_each_metric(table, metricgroup__topdown_max_level_callback,
1650 &max_level);
1651 return max_level;
1652 }
1653
metricgroup__copy_metric_events(struct evlist * evlist,struct cgroup * cgrp,struct rblist * new_metric_events,struct rblist * old_metric_events)1654 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1655 struct rblist *new_metric_events,
1656 struct rblist *old_metric_events)
1657 {
1658 unsigned int i;
1659
1660 for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1661 struct rb_node *nd;
1662 struct metric_event *old_me, *new_me;
1663 struct metric_expr *old_expr, *new_expr;
1664 struct evsel *evsel;
1665 size_t alloc_size;
1666 int idx, nr;
1667
1668 nd = rblist__entry(old_metric_events, i);
1669 old_me = container_of(nd, struct metric_event, nd);
1670
1671 evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx);
1672 if (!evsel)
1673 return -EINVAL;
1674 new_me = metricgroup__lookup(new_metric_events, evsel, /*create=*/true);
1675 if (!new_me)
1676 return -ENOMEM;
1677
1678 pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1679 cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx);
1680
1681 new_me->is_default = old_me->is_default;
1682 list_for_each_entry(old_expr, &old_me->head, nd) {
1683 new_expr = malloc(sizeof(*new_expr));
1684 if (!new_expr)
1685 return -ENOMEM;
1686
1687 new_expr->metric_expr = old_expr->metric_expr;
1688 new_expr->metric_threshold = old_expr->metric_threshold;
1689 new_expr->metric_name = strdup(old_expr->metric_name);
1690 if (!new_expr->metric_name)
1691 return -ENOMEM;
1692
1693 new_expr->metric_unit = old_expr->metric_unit;
1694 new_expr->runtime = old_expr->runtime;
1695 new_expr->default_metricgroup_name = old_expr->default_metricgroup_name;
1696
1697 if (old_expr->metric_refs) {
1698 /* calculate number of metric_events */
1699 for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1700 continue;
1701 alloc_size = sizeof(*new_expr->metric_refs);
1702 new_expr->metric_refs = calloc(nr + 1, alloc_size);
1703 if (!new_expr->metric_refs) {
1704 free(new_expr);
1705 return -ENOMEM;
1706 }
1707
1708 memcpy(new_expr->metric_refs, old_expr->metric_refs,
1709 nr * alloc_size);
1710 } else {
1711 new_expr->metric_refs = NULL;
1712 }
1713
1714 /* calculate number of metric_events */
1715 for (nr = 0; old_expr->metric_events[nr]; nr++)
1716 continue;
1717 alloc_size = sizeof(*new_expr->metric_events);
1718 new_expr->metric_events = calloc(nr + 1, alloc_size);
1719 if (!new_expr->metric_events) {
1720 zfree(&new_expr->metric_refs);
1721 free(new_expr);
1722 return -ENOMEM;
1723 }
1724
1725 /* copy evsel in the same position */
1726 for (idx = 0; idx < nr; idx++) {
1727 evsel = old_expr->metric_events[idx];
1728 evsel = evlist__find_evsel(evlist, evsel->core.idx);
1729 if (evsel == NULL) {
1730 zfree(&new_expr->metric_events);
1731 zfree(&new_expr->metric_refs);
1732 free(new_expr);
1733 return -EINVAL;
1734 }
1735 new_expr->metric_events[idx] = evsel;
1736 }
1737
1738 list_add(&new_expr->nd, &new_me->head);
1739 }
1740 }
1741 return 0;
1742 }
1743