1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2017, Intel Corporation.
4 */
5
6 /* Manage metrics and groups of metrics from JSON files */
7
8 #include "metricgroup.h"
9 #include "debug.h"
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "strbuf.h"
13 #include "pmu.h"
14 #include "pmus.h"
15 #include "print-events.h"
16 #include "smt.h"
17 #include "tool_pmu.h"
18 #include "expr.h"
19 #include "rblist.h"
20 #include <string.h>
21 #include <errno.h>
22 #include "strlist.h"
23 #include <assert.h>
24 #include <linux/ctype.h>
25 #include <linux/list_sort.h>
26 #include <linux/string.h>
27 #include <linux/zalloc.h>
28 #include <perf/cpumap.h>
29 #include <subcmd/parse-options.h>
30 #include <api/fs/fs.h>
31 #include "util.h"
32 #include <asm/bug.h>
33 #include "cgroup.h"
34 #include "util/hashmap.h"
35
metricgroup__lookup(struct rblist * metric_events,struct evsel * evsel,bool create)36 struct metric_event *metricgroup__lookup(struct rblist *metric_events,
37 struct evsel *evsel,
38 bool create)
39 {
40 struct rb_node *nd;
41 struct metric_event me = {
42 .evsel = evsel
43 };
44
45 if (!metric_events)
46 return NULL;
47
48 if (evsel && evsel->metric_leader)
49 me.evsel = evsel->metric_leader;
50 nd = rblist__find(metric_events, &me);
51 if (nd)
52 return container_of(nd, struct metric_event, nd);
53 if (create) {
54 rblist__add_node(metric_events, &me);
55 nd = rblist__find(metric_events, &me);
56 if (nd)
57 return container_of(nd, struct metric_event, nd);
58 }
59 return NULL;
60 }
61
metric_event_cmp(struct rb_node * rb_node,const void * entry)62 static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
63 {
64 struct metric_event *a = container_of(rb_node,
65 struct metric_event,
66 nd);
67 const struct metric_event *b = entry;
68
69 if (a->evsel == b->evsel)
70 return 0;
71 if ((char *)a->evsel < (char *)b->evsel)
72 return -1;
73 return +1;
74 }
75
metric_event_new(struct rblist * rblist __maybe_unused,const void * entry)76 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
77 const void *entry)
78 {
79 struct metric_event *me = malloc(sizeof(struct metric_event));
80
81 if (!me)
82 return NULL;
83 memcpy(me, entry, sizeof(struct metric_event));
84 me->evsel = ((struct metric_event *)entry)->evsel;
85 me->is_default = false;
86 INIT_LIST_HEAD(&me->head);
87 return &me->nd;
88 }
89
metric_event_delete(struct rblist * rblist __maybe_unused,struct rb_node * rb_node)90 static void metric_event_delete(struct rblist *rblist __maybe_unused,
91 struct rb_node *rb_node)
92 {
93 struct metric_event *me = container_of(rb_node, struct metric_event, nd);
94 struct metric_expr *expr, *tmp;
95
96 list_for_each_entry_safe(expr, tmp, &me->head, nd) {
97 zfree(&expr->metric_name);
98 zfree(&expr->metric_refs);
99 zfree(&expr->metric_events);
100 free(expr);
101 }
102
103 free(me);
104 }
105
metricgroup__rblist_init(struct rblist * metric_events)106 void metricgroup__rblist_init(struct rblist *metric_events)
107 {
108 rblist__init(metric_events);
109 metric_events->node_cmp = metric_event_cmp;
110 metric_events->node_new = metric_event_new;
111 metric_events->node_delete = metric_event_delete;
112 }
113
metricgroup__rblist_exit(struct rblist * metric_events)114 void metricgroup__rblist_exit(struct rblist *metric_events)
115 {
116 rblist__exit(metric_events);
117 }
118
119 /**
120 * The metric under construction. The data held here will be placed in a
121 * metric_expr.
122 */
123 struct metric {
124 struct list_head nd;
125 /**
126 * The expression parse context importantly holding the IDs contained
127 * within the expression.
128 */
129 struct expr_parse_ctx *pctx;
130 const char *pmu;
131 /** The name of the metric such as "IPC". */
132 const char *metric_name;
133 /** Modifier on the metric such as "u" or NULL for none. */
134 const char *modifier;
135 /** The expression to parse, for example, "instructions/cycles". */
136 const char *metric_expr;
137 /** Optional threshold expression where zero value is green, otherwise red. */
138 const char *metric_threshold;
139 /**
140 * The "ScaleUnit" that scales and adds a unit to the metric during
141 * output.
142 */
143 const char *metric_unit;
144 /**
145 * Optional name of the metric group reported
146 * if the Default metric group is being processed.
147 */
148 const char *default_metricgroup_name;
149 /** Optional null terminated array of referenced metrics. */
150 struct metric_ref *metric_refs;
151 /**
152 * Should events of the metric be grouped?
153 */
154 bool group_events;
155 /**
156 * Parsed events for the metric. Optional as events may be taken from a
157 * different metric whose group contains all the IDs necessary for this
158 * one.
159 */
160 struct evlist *evlist;
161 };
162
metric__watchdog_constraint_hint(const char * name,bool foot)163 static void metric__watchdog_constraint_hint(const char *name, bool foot)
164 {
165 static bool violate_nmi_constraint;
166
167 if (!foot) {
168 pr_warning("Not grouping metric %s's events.\n", name);
169 violate_nmi_constraint = true;
170 return;
171 }
172
173 if (!violate_nmi_constraint)
174 return;
175
176 pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
177 " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
178 " perf stat ...\n"
179 " echo 1 > /proc/sys/kernel/nmi_watchdog\n");
180 }
181
metric__group_events(const struct pmu_metric * pm,bool metric_no_threshold)182 static bool metric__group_events(const struct pmu_metric *pm, bool metric_no_threshold)
183 {
184 switch (pm->event_grouping) {
185 case MetricNoGroupEvents:
186 return false;
187 case MetricNoGroupEventsNmi:
188 if (!sysctl__nmi_watchdog_enabled())
189 return true;
190 metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false);
191 return false;
192 case MetricNoGroupEventsSmt:
193 return !smt_on();
194 case MetricNoGroupEventsThresholdAndNmi:
195 if (metric_no_threshold)
196 return true;
197 if (!sysctl__nmi_watchdog_enabled())
198 return true;
199 metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false);
200 return false;
201 case MetricGroupEvents:
202 default:
203 return true;
204 }
205 }
206
metric__free(struct metric * m)207 static void metric__free(struct metric *m)
208 {
209 if (!m)
210 return;
211
212 zfree(&m->metric_refs);
213 expr__ctx_free(m->pctx);
214 zfree(&m->modifier);
215 evlist__delete(m->evlist);
216 free(m);
217 }
218
metric__new(const struct pmu_metric * pm,const char * modifier,bool metric_no_group,bool metric_no_threshold,int runtime,const char * user_requested_cpu_list,bool system_wide)219 static struct metric *metric__new(const struct pmu_metric *pm,
220 const char *modifier,
221 bool metric_no_group,
222 bool metric_no_threshold,
223 int runtime,
224 const char *user_requested_cpu_list,
225 bool system_wide)
226 {
227 struct metric *m;
228
229 m = zalloc(sizeof(*m));
230 if (!m)
231 return NULL;
232
233 m->pctx = expr__ctx_new();
234 if (!m->pctx)
235 goto out_err;
236
237 m->pmu = pm->pmu ?: "cpu";
238 m->metric_name = pm->metric_name;
239 m->default_metricgroup_name = pm->default_metricgroup_name ?: "";
240 m->modifier = NULL;
241 if (modifier) {
242 m->modifier = strdup(modifier);
243 if (!m->modifier)
244 goto out_err;
245 }
246 m->metric_expr = pm->metric_expr;
247 m->metric_threshold = pm->metric_threshold;
248 m->metric_unit = pm->unit;
249 m->pctx->sctx.user_requested_cpu_list = NULL;
250 if (user_requested_cpu_list) {
251 m->pctx->sctx.user_requested_cpu_list = strdup(user_requested_cpu_list);
252 if (!m->pctx->sctx.user_requested_cpu_list)
253 goto out_err;
254 }
255 m->pctx->sctx.runtime = runtime;
256 m->pctx->sctx.system_wide = system_wide;
257 m->group_events = !metric_no_group && metric__group_events(pm, metric_no_threshold);
258 m->metric_refs = NULL;
259 m->evlist = NULL;
260
261 return m;
262 out_err:
263 metric__free(m);
264 return NULL;
265 }
266
contains_metric_id(struct evsel ** metric_events,int num_events,const char * metric_id)267 static bool contains_metric_id(struct evsel **metric_events, int num_events,
268 const char *metric_id)
269 {
270 int i;
271
272 for (i = 0; i < num_events; i++) {
273 if (!strcmp(evsel__metric_id(metric_events[i]), metric_id))
274 return true;
275 }
276 return false;
277 }
278
279 /**
280 * setup_metric_events - Find a group of events in metric_evlist that correspond
281 * to the IDs from a parsed metric expression.
282 * @pmu: The PMU for the IDs.
283 * @ids: the metric IDs to match.
284 * @metric_evlist: the list of perf events.
285 * @out_metric_events: holds the created metric events array.
286 */
setup_metric_events(const char * pmu,struct hashmap * ids,struct evlist * metric_evlist,struct evsel *** out_metric_events)287 static int setup_metric_events(const char *pmu, struct hashmap *ids,
288 struct evlist *metric_evlist,
289 struct evsel ***out_metric_events)
290 {
291 struct evsel **metric_events;
292 const char *metric_id;
293 struct evsel *ev;
294 size_t ids_size, matched_events, i;
295 bool all_pmus = !strcmp(pmu, "all") || perf_pmus__num_core_pmus() == 1 || !is_pmu_core(pmu);
296
297 *out_metric_events = NULL;
298 ids_size = hashmap__size(ids);
299
300 metric_events = calloc(ids_size + 1, sizeof(void *));
301 if (!metric_events)
302 return -ENOMEM;
303
304 matched_events = 0;
305 evlist__for_each_entry(metric_evlist, ev) {
306 struct expr_id_data *val_ptr;
307
308 /* Don't match events for the wrong hybrid PMU. */
309 if (!all_pmus && ev->pmu && evsel__is_hybrid(ev) &&
310 strcmp(ev->pmu->name, pmu))
311 continue;
312 /*
313 * Check for duplicate events with the same name. For
314 * example, uncore_imc/cas_count_read/ will turn into 6
315 * events per socket on skylakex. Only the first such
316 * event is placed in metric_events.
317 */
318 metric_id = evsel__metric_id(ev);
319 if (contains_metric_id(metric_events, matched_events, metric_id))
320 continue;
321 /*
322 * Does this event belong to the parse context? For
323 * combined or shared groups, this metric may not care
324 * about this event.
325 */
326 if (hashmap__find(ids, metric_id, &val_ptr)) {
327 pr_debug("Matched metric-id %s to %s\n", metric_id, evsel__name(ev));
328 metric_events[matched_events++] = ev;
329
330 if (matched_events >= ids_size)
331 break;
332 }
333 }
334 if (matched_events < ids_size) {
335 free(metric_events);
336 return -EINVAL;
337 }
338 for (i = 0; i < ids_size; i++) {
339 ev = metric_events[i];
340 ev->collect_stat = true;
341
342 /*
343 * The metric leader points to the identically named
344 * event in metric_events.
345 */
346 ev->metric_leader = ev;
347 /*
348 * Mark two events with identical names in the same
349 * group (or globally) as being in use as uncore events
350 * may be duplicated for each pmu. Set the metric leader
351 * of such events to be the event that appears in
352 * metric_events.
353 */
354 metric_id = evsel__metric_id(ev);
355 evlist__for_each_entry_continue(metric_evlist, ev) {
356 if (!strcmp(evsel__metric_id(ev), metric_id))
357 ev->metric_leader = metric_events[i];
358 }
359 }
360 *out_metric_events = metric_events;
361 return 0;
362 }
363
match_metric_or_groups(const char * metric_or_groups,const char * sought)364 static bool match_metric_or_groups(const char *metric_or_groups, const char *sought)
365 {
366 int len;
367 char *m;
368
369 if (!sought)
370 return false;
371 if (!strcmp(sought, "all"))
372 return true;
373 if (!metric_or_groups)
374 return !strcasecmp(sought, "No_group");
375 len = strlen(sought);
376 if (!strncasecmp(metric_or_groups, sought, len) &&
377 (metric_or_groups[len] == 0 || metric_or_groups[len] == ';'))
378 return true;
379 m = strchr(metric_or_groups, ';');
380 return m && match_metric_or_groups(m + 1, sought);
381 }
382
match_pm_metric_or_groups(const struct pmu_metric * pm,const char * pmu,const char * metric_or_groups)383 static bool match_pm_metric_or_groups(const struct pmu_metric *pm, const char *pmu,
384 const char *metric_or_groups)
385 {
386 const char *pm_pmu = pm->pmu ?: "cpu";
387
388 if (strcmp(pmu, "all") && strcmp(pm_pmu, pmu))
389 return false;
390
391 return match_metric_or_groups(pm->metric_group, metric_or_groups) ||
392 match_metric_or_groups(pm->metric_name, metric_or_groups);
393 }
394
395 struct metricgroup_iter_data {
396 pmu_metric_iter_fn fn;
397 void *data;
398 };
399
metricgroup__sys_event_iter(const struct pmu_metric * pm,const struct pmu_metrics_table * table,void * data)400 static int metricgroup__sys_event_iter(const struct pmu_metric *pm,
401 const struct pmu_metrics_table *table,
402 void *data)
403 {
404 struct metricgroup_iter_data *d = data;
405 struct perf_pmu *pmu = NULL;
406
407 if (!pm->metric_expr || !pm->compat)
408 return 0;
409
410 while ((pmu = perf_pmus__scan(pmu))) {
411
412 if (!pmu->id || !pmu_uncore_identifier_match(pm->compat, pmu->id))
413 continue;
414
415 return d->fn(pm, table, d->data);
416 }
417 return 0;
418 }
419
metricgroup__for_each_metric(const struct pmu_metrics_table * table,pmu_metric_iter_fn fn,void * data)420 int metricgroup__for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
421 void *data)
422 {
423 struct metricgroup_iter_data sys_data = {
424 .fn = fn,
425 .data = data,
426 };
427
428 if (table) {
429 int ret = pmu_metrics_table__for_each_metric(table, fn, data);
430
431 if (ret)
432 return ret;
433 }
434
435 return pmu_for_each_sys_metric(metricgroup__sys_event_iter, &sys_data);
436 }
437
438 static const char *code_characters = ",-=@";
439
encode_metric_id(struct strbuf * sb,const char * x)440 static int encode_metric_id(struct strbuf *sb, const char *x)
441 {
442 char *c;
443 int ret = 0;
444
445 for (; *x; x++) {
446 c = strchr(code_characters, *x);
447 if (c) {
448 ret = strbuf_addch(sb, '!');
449 if (ret)
450 break;
451
452 ret = strbuf_addch(sb, '0' + (c - code_characters));
453 if (ret)
454 break;
455 } else {
456 ret = strbuf_addch(sb, *x);
457 if (ret)
458 break;
459 }
460 }
461 return ret;
462 }
463
decode_metric_id(struct strbuf * sb,const char * x)464 static int decode_metric_id(struct strbuf *sb, const char *x)
465 {
466 const char *orig = x;
467 size_t i;
468 char c;
469 int ret;
470
471 for (; *x; x++) {
472 c = *x;
473 if (*x == '!') {
474 x++;
475 i = *x - '0';
476 if (i > strlen(code_characters)) {
477 pr_err("Bad metric-id encoding in: '%s'", orig);
478 return -1;
479 }
480 c = code_characters[i];
481 }
482 ret = strbuf_addch(sb, c);
483 if (ret)
484 return ret;
485 }
486 return 0;
487 }
488
decode_all_metric_ids(struct evlist * perf_evlist,const char * modifier)489 static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier)
490 {
491 struct evsel *ev;
492 struct strbuf sb = STRBUF_INIT;
493 char *cur;
494 int ret = 0;
495
496 evlist__for_each_entry(perf_evlist, ev) {
497 if (!ev->metric_id)
498 continue;
499
500 ret = strbuf_setlen(&sb, 0);
501 if (ret)
502 break;
503
504 ret = decode_metric_id(&sb, ev->metric_id);
505 if (ret)
506 break;
507
508 free((char *)ev->metric_id);
509 ev->metric_id = strdup(sb.buf);
510 if (!ev->metric_id) {
511 ret = -ENOMEM;
512 break;
513 }
514 /*
515 * If the name is just the parsed event, use the metric-id to
516 * give a more friendly display version.
517 */
518 if (strstr(ev->name, "metric-id=")) {
519 bool has_slash = false;
520
521 zfree(&ev->name);
522 for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) {
523 *cur = '/';
524 has_slash = true;
525 }
526
527 if (modifier) {
528 if (!has_slash && !strchr(sb.buf, ':')) {
529 ret = strbuf_addch(&sb, ':');
530 if (ret)
531 break;
532 }
533 ret = strbuf_addstr(&sb, modifier);
534 if (ret)
535 break;
536 }
537 ev->name = strdup(sb.buf);
538 if (!ev->name) {
539 ret = -ENOMEM;
540 break;
541 }
542 }
543 }
544 strbuf_release(&sb);
545 return ret;
546 }
547
metricgroup__build_event_string(struct strbuf * events,const struct expr_parse_ctx * ctx,const char * modifier,bool group_events)548 static int metricgroup__build_event_string(struct strbuf *events,
549 const struct expr_parse_ctx *ctx,
550 const char *modifier,
551 bool group_events)
552 {
553 struct hashmap_entry *cur;
554 size_t bkt;
555 bool no_group = true, has_tool_events = false;
556 bool tool_events[TOOL_PMU__EVENT_MAX] = {false};
557 int ret = 0;
558
559 #define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
560
561 hashmap__for_each_entry(ctx->ids, cur, bkt) {
562 const char *sep, *rsep, *id = cur->pkey;
563 enum tool_pmu_event ev;
564
565 pr_debug("found event %s\n", id);
566
567 /* Always move tool events outside of the group. */
568 ev = tool_pmu__str_to_event(id);
569 if (ev != TOOL_PMU__EVENT_NONE) {
570 has_tool_events = true;
571 tool_events[ev] = true;
572 continue;
573 }
574 /* Separate events with commas and open the group if necessary. */
575 if (no_group) {
576 if (group_events) {
577 ret = strbuf_addch(events, '{');
578 RETURN_IF_NON_ZERO(ret);
579 }
580
581 no_group = false;
582 } else {
583 ret = strbuf_addch(events, ',');
584 RETURN_IF_NON_ZERO(ret);
585 }
586 /*
587 * Encode the ID as an event string. Add a qualifier for
588 * metric_id that is the original name except with characters
589 * that parse-events can't parse replaced. For example,
590 * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/
591 */
592 sep = strchr(id, '@');
593 if (sep != NULL) {
594 ret = strbuf_add(events, id, sep - id);
595 RETURN_IF_NON_ZERO(ret);
596 ret = strbuf_addch(events, '/');
597 RETURN_IF_NON_ZERO(ret);
598 rsep = strrchr(sep, '@');
599 ret = strbuf_add(events, sep + 1, rsep - sep - 1);
600 RETURN_IF_NON_ZERO(ret);
601 ret = strbuf_addstr(events, ",metric-id=");
602 RETURN_IF_NON_ZERO(ret);
603 sep = rsep;
604 } else {
605 sep = strchr(id, ':');
606 if (sep != NULL) {
607 ret = strbuf_add(events, id, sep - id);
608 RETURN_IF_NON_ZERO(ret);
609 } else {
610 ret = strbuf_addstr(events, id);
611 RETURN_IF_NON_ZERO(ret);
612 }
613 ret = strbuf_addstr(events, "/metric-id=");
614 RETURN_IF_NON_ZERO(ret);
615 }
616 ret = encode_metric_id(events, id);
617 RETURN_IF_NON_ZERO(ret);
618 ret = strbuf_addstr(events, "/");
619 RETURN_IF_NON_ZERO(ret);
620
621 if (sep != NULL) {
622 ret = strbuf_addstr(events, sep + 1);
623 RETURN_IF_NON_ZERO(ret);
624 }
625 if (modifier) {
626 ret = strbuf_addstr(events, modifier);
627 RETURN_IF_NON_ZERO(ret);
628 }
629 }
630 if (!no_group && group_events) {
631 ret = strbuf_addf(events, "}:W");
632 RETURN_IF_NON_ZERO(ret);
633 }
634 if (has_tool_events) {
635 int i;
636
637 tool_pmu__for_each_event(i) {
638 if (tool_events[i]) {
639 if (!no_group) {
640 ret = strbuf_addch(events, ',');
641 RETURN_IF_NON_ZERO(ret);
642 }
643 no_group = false;
644 ret = strbuf_addstr(events, tool_pmu__event_to_str(i));
645 RETURN_IF_NON_ZERO(ret);
646 }
647 }
648 }
649
650 return ret;
651 #undef RETURN_IF_NON_ZERO
652 }
653
arch_get_runtimeparam(const struct pmu_metric * pm __maybe_unused)654 int __weak arch_get_runtimeparam(const struct pmu_metric *pm __maybe_unused)
655 {
656 return 1;
657 }
658
659 /*
660 * A singly linked list on the stack of the names of metrics being
661 * processed. Used to identify recursion.
662 */
663 struct visited_metric {
664 const char *name;
665 const struct visited_metric *parent;
666 };
667
668 struct metricgroup_add_iter_data {
669 struct list_head *metric_list;
670 const char *pmu;
671 const char *metric_name;
672 const char *modifier;
673 int *ret;
674 bool *has_match;
675 bool metric_no_group;
676 bool metric_no_threshold;
677 const char *user_requested_cpu_list;
678 bool system_wide;
679 struct metric *root_metric;
680 const struct visited_metric *visited;
681 const struct pmu_metrics_table *table;
682 };
683
684 static int add_metric(struct list_head *metric_list,
685 const struct pmu_metric *pm,
686 const char *modifier,
687 bool metric_no_group,
688 bool metric_no_threshold,
689 const char *user_requested_cpu_list,
690 bool system_wide,
691 struct metric *root_metric,
692 const struct visited_metric *visited,
693 const struct pmu_metrics_table *table);
694
metricgroup__find_metric_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table __maybe_unused,void * vdata)695 static int metricgroup__find_metric_callback(const struct pmu_metric *pm,
696 const struct pmu_metrics_table *table __maybe_unused,
697 void *vdata)
698 {
699 struct pmu_metric *copied_pm = vdata;
700
701 memcpy(copied_pm, pm, sizeof(*pm));
702 return 0;
703 }
704
705 /**
706 * resolve_metric - Locate metrics within the root metric and recursively add
707 * references to them.
708 * @metric_list: The list the metric is added to.
709 * @pmu: The PMU name to resolve metrics on, or "all" for all PMUs.
710 * @modifier: if non-null event modifiers like "u".
711 * @metric_no_group: Should events written to events be grouped "{}" or
712 * global. Grouping is the default but due to multiplexing the
713 * user may override.
714 * @user_requested_cpu_list: Command line specified CPUs to record on.
715 * @system_wide: Are events for all processes recorded.
716 * @root_metric: Metrics may reference other metrics to form a tree. In this
717 * case the root_metric holds all the IDs and a list of referenced
718 * metrics. When adding a root this argument is NULL.
719 * @visited: A singly linked list of metric names being added that is used to
720 * detect recursion.
721 * @table: The table that is searched for metrics, most commonly the table for the
722 * architecture perf is running upon.
723 */
resolve_metric(struct list_head * metric_list,struct perf_pmu * pmu,const char * modifier,bool metric_no_group,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct metric * root_metric,const struct visited_metric * visited,const struct pmu_metrics_table * table)724 static int resolve_metric(struct list_head *metric_list,
725 struct perf_pmu *pmu,
726 const char *modifier,
727 bool metric_no_group,
728 bool metric_no_threshold,
729 const char *user_requested_cpu_list,
730 bool system_wide,
731 struct metric *root_metric,
732 const struct visited_metric *visited,
733 const struct pmu_metrics_table *table)
734 {
735 struct hashmap_entry *cur;
736 size_t bkt;
737 struct to_resolve {
738 /* The metric to resolve. */
739 struct pmu_metric pm;
740 /*
741 * The key in the IDs map, this may differ from in case,
742 * etc. from pm->metric_name.
743 */
744 const char *key;
745 } *pending = NULL;
746 int i, ret = 0, pending_cnt = 0;
747
748 /*
749 * Iterate all the parsed IDs and if there's a matching metric and it to
750 * the pending array.
751 */
752 hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
753 struct pmu_metric pm;
754
755 if (pmu_metrics_table__find_metric(table, pmu, cur->pkey,
756 metricgroup__find_metric_callback,
757 &pm) != PMU_METRICS__NOT_FOUND) {
758 pending = realloc(pending,
759 (pending_cnt + 1) * sizeof(struct to_resolve));
760 if (!pending)
761 return -ENOMEM;
762
763 memcpy(&pending[pending_cnt].pm, &pm, sizeof(pm));
764 pending[pending_cnt].key = cur->pkey;
765 pending_cnt++;
766 }
767 }
768
769 /* Remove the metric IDs from the context. */
770 for (i = 0; i < pending_cnt; i++)
771 expr__del_id(root_metric->pctx, pending[i].key);
772
773 /*
774 * Recursively add all the metrics, IDs are added to the root metric's
775 * context.
776 */
777 for (i = 0; i < pending_cnt; i++) {
778 ret = add_metric(metric_list, &pending[i].pm, modifier, metric_no_group,
779 metric_no_threshold, user_requested_cpu_list, system_wide,
780 root_metric, visited, table);
781 if (ret)
782 break;
783 }
784
785 free(pending);
786 return ret;
787 }
788
789 /**
790 * __add_metric - Add a metric to metric_list.
791 * @metric_list: The list the metric is added to.
792 * @pm: The pmu_metric containing the metric to be added.
793 * @modifier: if non-null event modifiers like "u".
794 * @metric_no_group: Should events written to events be grouped "{}" or
795 * global. Grouping is the default but due to multiplexing the
796 * user may override.
797 * @metric_no_threshold: Should threshold expressions be ignored?
798 * @runtime: A special argument for the parser only known at runtime.
799 * @user_requested_cpu_list: Command line specified CPUs to record on.
800 * @system_wide: Are events for all processes recorded.
801 * @root_metric: Metrics may reference other metrics to form a tree. In this
802 * case the root_metric holds all the IDs and a list of referenced
803 * metrics. When adding a root this argument is NULL.
804 * @visited: A singly linked list of metric names being added that is used to
805 * detect recursion.
806 * @table: The table that is searched for metrics, most commonly the table for the
807 * architecture perf is running upon.
808 */
__add_metric(struct list_head * metric_list,const struct pmu_metric * pm,const char * modifier,bool metric_no_group,bool metric_no_threshold,int runtime,const char * user_requested_cpu_list,bool system_wide,struct metric * root_metric,const struct visited_metric * visited,const struct pmu_metrics_table * table)809 static int __add_metric(struct list_head *metric_list,
810 const struct pmu_metric *pm,
811 const char *modifier,
812 bool metric_no_group,
813 bool metric_no_threshold,
814 int runtime,
815 const char *user_requested_cpu_list,
816 bool system_wide,
817 struct metric *root_metric,
818 const struct visited_metric *visited,
819 const struct pmu_metrics_table *table)
820 {
821 const struct visited_metric *vm;
822 int ret;
823 bool is_root = !root_metric;
824 const char *expr;
825 struct visited_metric visited_node = {
826 .name = pm->metric_name,
827 .parent = visited,
828 };
829
830 for (vm = visited; vm; vm = vm->parent) {
831 if (!strcmp(pm->metric_name, vm->name)) {
832 pr_err("failed: recursion detected for %s\n", pm->metric_name);
833 return -1;
834 }
835 }
836
837 if (is_root) {
838 /*
839 * This metric is the root of a tree and may reference other
840 * metrics that are added recursively.
841 */
842 root_metric = metric__new(pm, modifier, metric_no_group, metric_no_threshold,
843 runtime, user_requested_cpu_list, system_wide);
844 if (!root_metric)
845 return -ENOMEM;
846
847 } else {
848 int cnt = 0;
849
850 /*
851 * This metric was referenced in a metric higher in the
852 * tree. Check if the same metric is already resolved in the
853 * metric_refs list.
854 */
855 if (root_metric->metric_refs) {
856 for (; root_metric->metric_refs[cnt].metric_name; cnt++) {
857 if (!strcmp(pm->metric_name,
858 root_metric->metric_refs[cnt].metric_name))
859 return 0;
860 }
861 }
862
863 /* Create reference. Need space for the entry and the terminator. */
864 root_metric->metric_refs = realloc(root_metric->metric_refs,
865 (cnt + 2) * sizeof(struct metric_ref));
866 if (!root_metric->metric_refs)
867 return -ENOMEM;
868
869 /*
870 * Intentionally passing just const char pointers,
871 * from 'pe' object, so they never go away. We don't
872 * need to change them, so there's no need to create
873 * our own copy.
874 */
875 root_metric->metric_refs[cnt].metric_name = pm->metric_name;
876 root_metric->metric_refs[cnt].metric_expr = pm->metric_expr;
877
878 /* Null terminate array. */
879 root_metric->metric_refs[cnt+1].metric_name = NULL;
880 root_metric->metric_refs[cnt+1].metric_expr = NULL;
881 }
882
883 /*
884 * For both the parent and referenced metrics, we parse
885 * all the metric's IDs and add it to the root context.
886 */
887 ret = 0;
888 expr = pm->metric_expr;
889 if (is_root && pm->metric_threshold) {
890 /*
891 * Threshold expressions are built off the actual metric. Switch
892 * to use that in case of additional necessary events. Change
893 * the visited node name to avoid this being flagged as
894 * recursion. If the threshold events are disabled, just use the
895 * metric's name as a reference. This allows metric threshold
896 * computation if there are sufficient events.
897 */
898 assert(strstr(pm->metric_threshold, pm->metric_name));
899 expr = metric_no_threshold ? pm->metric_name : pm->metric_threshold;
900 visited_node.name = "__threshold__";
901 }
902 if (expr__find_ids(expr, NULL, root_metric->pctx) < 0) {
903 /* Broken metric. */
904 ret = -EINVAL;
905 }
906 if (!ret) {
907 /* Resolve referenced metrics. */
908 struct perf_pmu *pmu;
909
910 if (pm->pmu && pm->pmu[0] != '\0')
911 pmu = perf_pmus__find(pm->pmu);
912 else
913 pmu = perf_pmus__scan_core(/*pmu=*/ NULL);
914
915 ret = resolve_metric(metric_list, pmu, modifier, metric_no_group,
916 metric_no_threshold, user_requested_cpu_list,
917 system_wide, root_metric, &visited_node,
918 table);
919 }
920 if (ret) {
921 if (is_root)
922 metric__free(root_metric);
923
924 } else if (is_root)
925 list_add(&root_metric->nd, metric_list);
926
927 return ret;
928 }
929
add_metric(struct list_head * metric_list,const struct pmu_metric * pm,const char * modifier,bool metric_no_group,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct metric * root_metric,const struct visited_metric * visited,const struct pmu_metrics_table * table)930 static int add_metric(struct list_head *metric_list,
931 const struct pmu_metric *pm,
932 const char *modifier,
933 bool metric_no_group,
934 bool metric_no_threshold,
935 const char *user_requested_cpu_list,
936 bool system_wide,
937 struct metric *root_metric,
938 const struct visited_metric *visited,
939 const struct pmu_metrics_table *table)
940 {
941 int ret = 0;
942
943 pr_debug("metric expr %s for %s\n", pm->metric_expr, pm->metric_name);
944
945 if (!strstr(pm->metric_expr, "?")) {
946 ret = __add_metric(metric_list, pm, modifier, metric_no_group,
947 metric_no_threshold, 0, user_requested_cpu_list,
948 system_wide, root_metric, visited, table);
949 } else {
950 int j, count;
951
952 count = arch_get_runtimeparam(pm);
953
954 /* This loop is added to create multiple
955 * events depend on count value and add
956 * those events to metric_list.
957 */
958
959 for (j = 0; j < count && !ret; j++)
960 ret = __add_metric(metric_list, pm, modifier, metric_no_group,
961 metric_no_threshold, j, user_requested_cpu_list,
962 system_wide, root_metric, visited, table);
963 }
964
965 return ret;
966 }
967
968 /**
969 * metric_list_cmp - list_sort comparator that sorts metrics with more events to
970 * the front. tool events are excluded from the count.
971 */
metric_list_cmp(void * priv __maybe_unused,const struct list_head * l,const struct list_head * r)972 static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
973 const struct list_head *r)
974 {
975 const struct metric *left = container_of(l, struct metric, nd);
976 const struct metric *right = container_of(r, struct metric, nd);
977 struct expr_id_data *data;
978 int i, left_count, right_count;
979
980 left_count = hashmap__size(left->pctx->ids);
981 tool_pmu__for_each_event(i) {
982 if (!expr__get_id(left->pctx, tool_pmu__event_to_str(i), &data))
983 left_count--;
984 }
985
986 right_count = hashmap__size(right->pctx->ids);
987 tool_pmu__for_each_event(i) {
988 if (!expr__get_id(right->pctx, tool_pmu__event_to_str(i), &data))
989 right_count--;
990 }
991
992 return right_count - left_count;
993 }
994
995 /**
996 * default_metricgroup_cmp - Implements complex key for the Default metricgroup
997 * that first sorts by default_metricgroup_name, then
998 * metric_name.
999 */
default_metricgroup_cmp(void * priv __maybe_unused,const struct list_head * l,const struct list_head * r)1000 static int default_metricgroup_cmp(void *priv __maybe_unused,
1001 const struct list_head *l,
1002 const struct list_head *r)
1003 {
1004 const struct metric *left = container_of(l, struct metric, nd);
1005 const struct metric *right = container_of(r, struct metric, nd);
1006 int diff = strcmp(right->default_metricgroup_name, left->default_metricgroup_name);
1007
1008 if (diff)
1009 return diff;
1010
1011 return strcmp(right->metric_name, left->metric_name);
1012 }
1013
1014 struct metricgroup__add_metric_data {
1015 struct list_head *list;
1016 const char *pmu;
1017 const char *metric_name;
1018 const char *modifier;
1019 const char *user_requested_cpu_list;
1020 bool metric_no_group;
1021 bool metric_no_threshold;
1022 bool system_wide;
1023 bool has_match;
1024 };
1025
metricgroup__add_metric_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table,void * vdata)1026 static int metricgroup__add_metric_callback(const struct pmu_metric *pm,
1027 const struct pmu_metrics_table *table,
1028 void *vdata)
1029 {
1030 struct metricgroup__add_metric_data *data = vdata;
1031 int ret = 0;
1032
1033 if (pm->metric_expr && match_pm_metric_or_groups(pm, data->pmu, data->metric_name)) {
1034 bool metric_no_group = data->metric_no_group ||
1035 match_metric_or_groups(pm->metricgroup_no_group, data->metric_name);
1036
1037 data->has_match = true;
1038 ret = add_metric(data->list, pm, data->modifier, metric_no_group,
1039 data->metric_no_threshold, data->user_requested_cpu_list,
1040 data->system_wide, /*root_metric=*/NULL,
1041 /*visited_metrics=*/NULL, table);
1042 }
1043 return ret;
1044 }
1045
1046 /**
1047 * metricgroup__add_metric - Find and add a metric, or a metric group.
1048 * @pmu: The PMU name to search for metrics on, or "all" for all PMUs.
1049 * @metric_name: The name of the metric or metric group. For example, "IPC"
1050 * could be the name of a metric and "TopDownL1" the name of a
1051 * metric group.
1052 * @modifier: if non-null event modifiers like "u".
1053 * @metric_no_group: Should events written to events be grouped "{}" or
1054 * global. Grouping is the default but due to multiplexing the
1055 * user may override.
1056 * @user_requested_cpu_list: Command line specified CPUs to record on.
1057 * @system_wide: Are events for all processes recorded.
1058 * @metric_list: The list that the metric or metric group are added to.
1059 * @table: The table that is searched for metrics, most commonly the table for the
1060 * architecture perf is running upon.
1061 */
metricgroup__add_metric(const char * pmu,const char * metric_name,const char * modifier,bool metric_no_group,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct list_head * metric_list,const struct pmu_metrics_table * table)1062 static int metricgroup__add_metric(const char *pmu, const char *metric_name, const char *modifier,
1063 bool metric_no_group, bool metric_no_threshold,
1064 const char *user_requested_cpu_list,
1065 bool system_wide,
1066 struct list_head *metric_list,
1067 const struct pmu_metrics_table *table)
1068 {
1069 LIST_HEAD(list);
1070 int ret;
1071 struct metricgroup__add_metric_data data = {
1072 .list = &list,
1073 .pmu = pmu,
1074 .metric_name = metric_name,
1075 .modifier = modifier,
1076 .metric_no_group = metric_no_group,
1077 .metric_no_threshold = metric_no_threshold,
1078 .user_requested_cpu_list = user_requested_cpu_list,
1079 .system_wide = system_wide,
1080 .has_match = false,
1081 };
1082
1083 /*
1084 * Iterate over all metrics seeing if metric matches either the
1085 * name or group. When it does add the metric to the list.
1086 */
1087 ret = metricgroup__for_each_metric(table, metricgroup__add_metric_callback, &data);
1088 if (!ret && !data.has_match)
1089 ret = -EINVAL;
1090
1091 /*
1092 * add to metric_list so that they can be released
1093 * even if it's failed
1094 */
1095 list_splice(&list, metric_list);
1096 return ret;
1097 }
1098
1099 /**
1100 * metricgroup__add_metric_list - Find and add metrics, or metric groups,
1101 * specified in a list.
1102 * @pmu: A pmu to restrict the metrics to, or "all" for all PMUS.
1103 * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1"
1104 * would match the IPC and CPI metrics, and TopDownL1 would match all
1105 * the metrics in the TopDownL1 group.
1106 * @metric_no_group: Should events written to events be grouped "{}" or
1107 * global. Grouping is the default but due to multiplexing the
1108 * user may override.
1109 * @user_requested_cpu_list: Command line specified CPUs to record on.
1110 * @system_wide: Are events for all processes recorded.
1111 * @metric_list: The list that metrics are added to.
1112 * @table: The table that is searched for metrics, most commonly the table for the
1113 * architecture perf is running upon.
1114 */
metricgroup__add_metric_list(const char * pmu,const char * list,bool metric_no_group,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,struct list_head * metric_list,const struct pmu_metrics_table * table)1115 static int metricgroup__add_metric_list(const char *pmu, const char *list,
1116 bool metric_no_group,
1117 bool metric_no_threshold,
1118 const char *user_requested_cpu_list,
1119 bool system_wide, struct list_head *metric_list,
1120 const struct pmu_metrics_table *table)
1121 {
1122 char *list_itr, *list_copy, *metric_name, *modifier;
1123 int ret, count = 0;
1124
1125 list_copy = strdup(list);
1126 if (!list_copy)
1127 return -ENOMEM;
1128 list_itr = list_copy;
1129
1130 while ((metric_name = strsep(&list_itr, ",")) != NULL) {
1131 modifier = strchr(metric_name, ':');
1132 if (modifier)
1133 *modifier++ = '\0';
1134
1135 ret = metricgroup__add_metric(pmu, metric_name, modifier,
1136 metric_no_group, metric_no_threshold,
1137 user_requested_cpu_list,
1138 system_wide, metric_list, table);
1139 if (ret == -EINVAL)
1140 pr_err("Cannot find metric or group `%s'\n", metric_name);
1141
1142 if (ret)
1143 break;
1144
1145 count++;
1146 }
1147 free(list_copy);
1148
1149 if (!ret) {
1150 /*
1151 * Warn about nmi_watchdog if any parsed metrics had the
1152 * NO_NMI_WATCHDOG constraint.
1153 */
1154 metric__watchdog_constraint_hint(NULL, /*foot=*/true);
1155 /* No metrics. */
1156 if (count == 0)
1157 return -EINVAL;
1158 }
1159 return ret;
1160 }
1161
metricgroup__free_metrics(struct list_head * metric_list)1162 static void metricgroup__free_metrics(struct list_head *metric_list)
1163 {
1164 struct metric *m, *tmp;
1165
1166 list_for_each_entry_safe (m, tmp, metric_list, nd) {
1167 list_del_init(&m->nd);
1168 metric__free(m);
1169 }
1170 }
1171
1172 /**
1173 * find_tool_events - Search for the pressence of tool events in metric_list.
1174 * @metric_list: List to take metrics from.
1175 * @tool_events: Array of false values, indices corresponding to tool events set
1176 * to true if tool event is found.
1177 */
find_tool_events(const struct list_head * metric_list,bool tool_events[TOOL_PMU__EVENT_MAX])1178 static void find_tool_events(const struct list_head *metric_list,
1179 bool tool_events[TOOL_PMU__EVENT_MAX])
1180 {
1181 struct metric *m;
1182
1183 list_for_each_entry(m, metric_list, nd) {
1184 int i;
1185
1186 tool_pmu__for_each_event(i) {
1187 struct expr_id_data *data;
1188
1189 if (!tool_events[i] &&
1190 !expr__get_id(m->pctx, tool_pmu__event_to_str(i), &data))
1191 tool_events[i] = true;
1192 }
1193 }
1194 }
1195
1196 /**
1197 * build_combined_expr_ctx - Make an expr_parse_ctx with all !group_events
1198 * metric IDs, as the IDs are held in a set,
1199 * duplicates will be removed.
1200 * @metric_list: List to take metrics from.
1201 * @combined: Out argument for result.
1202 */
build_combined_expr_ctx(const struct list_head * metric_list,struct expr_parse_ctx ** combined)1203 static int build_combined_expr_ctx(const struct list_head *metric_list,
1204 struct expr_parse_ctx **combined)
1205 {
1206 struct hashmap_entry *cur;
1207 size_t bkt;
1208 struct metric *m;
1209 char *dup;
1210 int ret;
1211
1212 *combined = expr__ctx_new();
1213 if (!*combined)
1214 return -ENOMEM;
1215
1216 list_for_each_entry(m, metric_list, nd) {
1217 if (!m->group_events && !m->modifier) {
1218 hashmap__for_each_entry(m->pctx->ids, cur, bkt) {
1219 dup = strdup(cur->pkey);
1220 if (!dup) {
1221 ret = -ENOMEM;
1222 goto err_out;
1223 }
1224 ret = expr__add_id(*combined, dup);
1225 if (ret)
1226 goto err_out;
1227 }
1228 }
1229 }
1230 return 0;
1231 err_out:
1232 expr__ctx_free(*combined);
1233 *combined = NULL;
1234 return ret;
1235 }
1236
1237 /**
1238 * parse_ids - Build the event string for the ids and parse them creating an
1239 * evlist. The encoded metric_ids are decoded.
1240 * @metric_no_merge: is metric sharing explicitly disabled.
1241 * @fake_pmu: use a fake PMU when testing metrics not supported by the current CPU.
1242 * @ids: the event identifiers parsed from a metric.
1243 * @modifier: any modifiers added to the events.
1244 * @group_events: should events be placed in a weak group.
1245 * @tool_events: entries set true if the tool event of index could be present in
1246 * the overall list of metrics.
1247 * @out_evlist: the created list of events.
1248 */
parse_ids(bool metric_no_merge,bool fake_pmu,struct expr_parse_ctx * ids,const char * modifier,bool group_events,const bool tool_events[TOOL_PMU__EVENT_MAX],struct evlist ** out_evlist)1249 static int parse_ids(bool metric_no_merge, bool fake_pmu,
1250 struct expr_parse_ctx *ids, const char *modifier,
1251 bool group_events, const bool tool_events[TOOL_PMU__EVENT_MAX],
1252 struct evlist **out_evlist)
1253 {
1254 struct parse_events_error parse_error;
1255 struct evlist *parsed_evlist;
1256 struct strbuf events = STRBUF_INIT;
1257 int ret;
1258
1259 *out_evlist = NULL;
1260 if (!metric_no_merge || hashmap__size(ids->ids) == 0) {
1261 bool added_event = false;
1262 int i;
1263 /*
1264 * We may fail to share events between metrics because a tool
1265 * event isn't present in one metric. For example, a ratio of
1266 * cache misses doesn't need duration_time but the same events
1267 * may be used for a misses per second. Events without sharing
1268 * implies multiplexing, that is best avoided, so place
1269 * all tool events in every group.
1270 *
1271 * Also, there may be no ids/events in the expression parsing
1272 * context because of constant evaluation, e.g.:
1273 * event1 if #smt_on else 0
1274 * Add a tool event to avoid a parse error on an empty string.
1275 */
1276 tool_pmu__for_each_event(i) {
1277 if (tool_events[i]) {
1278 char *tmp = strdup(tool_pmu__event_to_str(i));
1279
1280 if (!tmp)
1281 return -ENOMEM;
1282 ids__insert(ids->ids, tmp);
1283 added_event = true;
1284 }
1285 }
1286 if (!added_event && hashmap__size(ids->ids) == 0) {
1287 char *tmp = strdup("duration_time");
1288
1289 if (!tmp)
1290 return -ENOMEM;
1291 ids__insert(ids->ids, tmp);
1292 }
1293 }
1294 ret = metricgroup__build_event_string(&events, ids, modifier,
1295 group_events);
1296 if (ret)
1297 return ret;
1298
1299 parsed_evlist = evlist__new();
1300 if (!parsed_evlist) {
1301 ret = -ENOMEM;
1302 goto err_out;
1303 }
1304 pr_debug("Parsing metric events '%s'\n", events.buf);
1305 parse_events_error__init(&parse_error);
1306 ret = __parse_events(parsed_evlist, events.buf, /*pmu_filter=*/NULL,
1307 &parse_error, fake_pmu, /*warn_if_reordered=*/false,
1308 /*fake_tp=*/false);
1309 if (ret) {
1310 parse_events_error__print(&parse_error, events.buf);
1311 goto err_out;
1312 }
1313 ret = decode_all_metric_ids(parsed_evlist, modifier);
1314 if (ret)
1315 goto err_out;
1316
1317 *out_evlist = parsed_evlist;
1318 parsed_evlist = NULL;
1319 err_out:
1320 parse_events_error__exit(&parse_error);
1321 evlist__delete(parsed_evlist);
1322 strbuf_release(&events);
1323 return ret;
1324 }
1325
parse_groups(struct evlist * perf_evlist,const char * pmu,const char * str,bool metric_no_group,bool metric_no_merge,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,bool fake_pmu,const struct pmu_metrics_table * table)1326 static int parse_groups(struct evlist *perf_evlist,
1327 const char *pmu, const char *str,
1328 bool metric_no_group,
1329 bool metric_no_merge,
1330 bool metric_no_threshold,
1331 const char *user_requested_cpu_list,
1332 bool system_wide,
1333 bool fake_pmu,
1334 const struct pmu_metrics_table *table)
1335 {
1336 struct evlist *combined_evlist = NULL;
1337 LIST_HEAD(metric_list);
1338 struct metric *m;
1339 bool tool_events[TOOL_PMU__EVENT_MAX] = {false};
1340 bool is_default = !strcmp(str, "Default");
1341 int ret;
1342
1343 ret = metricgroup__add_metric_list(pmu, str, metric_no_group, metric_no_threshold,
1344 user_requested_cpu_list,
1345 system_wide, &metric_list, table);
1346 if (ret)
1347 goto out;
1348
1349 /* Sort metrics from largest to smallest. */
1350 list_sort(NULL, &metric_list, metric_list_cmp);
1351
1352 if (!metric_no_merge) {
1353 struct expr_parse_ctx *combined = NULL;
1354
1355 find_tool_events(&metric_list, tool_events);
1356
1357 ret = build_combined_expr_ctx(&metric_list, &combined);
1358
1359 if (!ret && combined && hashmap__size(combined->ids)) {
1360 ret = parse_ids(metric_no_merge, fake_pmu, combined,
1361 /*modifier=*/NULL,
1362 /*group_events=*/false,
1363 tool_events,
1364 &combined_evlist);
1365 }
1366 if (combined)
1367 expr__ctx_free(combined);
1368
1369 if (ret)
1370 goto out;
1371 }
1372
1373 if (is_default)
1374 list_sort(NULL, &metric_list, default_metricgroup_cmp);
1375
1376 list_for_each_entry(m, &metric_list, nd) {
1377 struct metric_event *me;
1378 struct evsel **metric_events;
1379 struct evlist *metric_evlist = NULL;
1380 struct metric *n;
1381 struct metric_expr *expr;
1382
1383 if (combined_evlist && !m->group_events) {
1384 metric_evlist = combined_evlist;
1385 } else if (!metric_no_merge) {
1386 /*
1387 * See if the IDs for this metric are a subset of an
1388 * earlier metric.
1389 */
1390 list_for_each_entry(n, &metric_list, nd) {
1391 if (m == n)
1392 break;
1393
1394 if (n->evlist == NULL)
1395 continue;
1396
1397 if ((!m->modifier && n->modifier) ||
1398 (m->modifier && !n->modifier) ||
1399 (m->modifier && n->modifier &&
1400 strcmp(m->modifier, n->modifier)))
1401 continue;
1402
1403 if ((!m->pmu && n->pmu) ||
1404 (m->pmu && !n->pmu) ||
1405 (m->pmu && n->pmu && strcmp(m->pmu, n->pmu)))
1406 continue;
1407
1408 if (expr__subset_of_ids(n->pctx, m->pctx)) {
1409 pr_debug("Events in '%s' fully contained within '%s'\n",
1410 m->metric_name, n->metric_name);
1411 metric_evlist = n->evlist;
1412 break;
1413 }
1414
1415 }
1416 }
1417 if (!metric_evlist) {
1418 ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier,
1419 m->group_events, tool_events, &m->evlist);
1420 if (ret)
1421 goto out;
1422
1423 metric_evlist = m->evlist;
1424 }
1425 ret = setup_metric_events(fake_pmu ? "all" : m->pmu, m->pctx->ids,
1426 metric_evlist, &metric_events);
1427 if (ret) {
1428 pr_err("Cannot resolve IDs for %s: %s\n",
1429 m->metric_name, m->metric_expr);
1430 goto out;
1431 }
1432
1433 me = metricgroup__lookup(&perf_evlist->metric_events, metric_events[0],
1434 /*create=*/true);
1435
1436 expr = malloc(sizeof(struct metric_expr));
1437 if (!expr) {
1438 ret = -ENOMEM;
1439 free(metric_events);
1440 goto out;
1441 }
1442
1443 expr->metric_refs = m->metric_refs;
1444 m->metric_refs = NULL;
1445 expr->metric_expr = m->metric_expr;
1446 if (m->modifier) {
1447 char *tmp;
1448
1449 if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0)
1450 expr->metric_name = NULL;
1451 else
1452 expr->metric_name = tmp;
1453 } else
1454 expr->metric_name = strdup(m->metric_name);
1455
1456 if (!expr->metric_name) {
1457 ret = -ENOMEM;
1458 free(metric_events);
1459 goto out;
1460 }
1461 expr->metric_threshold = m->metric_threshold;
1462 expr->metric_unit = m->metric_unit;
1463 expr->metric_events = metric_events;
1464 expr->runtime = m->pctx->sctx.runtime;
1465 expr->default_metricgroup_name = m->default_metricgroup_name;
1466 me->is_default = is_default;
1467 list_add(&expr->nd, &me->head);
1468 }
1469
1470
1471 if (combined_evlist) {
1472 evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries);
1473 evlist__delete(combined_evlist);
1474 }
1475
1476 list_for_each_entry(m, &metric_list, nd) {
1477 if (m->evlist)
1478 evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries);
1479 }
1480
1481 out:
1482 metricgroup__free_metrics(&metric_list);
1483 return ret;
1484 }
1485
metricgroup__parse_groups(struct evlist * perf_evlist,const char * pmu,const char * str,bool metric_no_group,bool metric_no_merge,bool metric_no_threshold,const char * user_requested_cpu_list,bool system_wide,bool hardware_aware_grouping)1486 int metricgroup__parse_groups(struct evlist *perf_evlist,
1487 const char *pmu,
1488 const char *str,
1489 bool metric_no_group,
1490 bool metric_no_merge,
1491 bool metric_no_threshold,
1492 const char *user_requested_cpu_list,
1493 bool system_wide,
1494 bool hardware_aware_grouping)
1495 {
1496 const struct pmu_metrics_table *table = pmu_metrics_table__find();
1497
1498 if (!table)
1499 return -EINVAL;
1500 if (hardware_aware_grouping)
1501 pr_debug("Use hardware aware grouping instead of traditional metric grouping method\n");
1502
1503 return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge,
1504 metric_no_threshold, user_requested_cpu_list, system_wide,
1505 /*fake_pmu=*/false, table);
1506 }
1507
metricgroup__parse_groups_test(struct evlist * evlist,const struct pmu_metrics_table * table,const char * str)1508 int metricgroup__parse_groups_test(struct evlist *evlist,
1509 const struct pmu_metrics_table *table,
1510 const char *str)
1511 {
1512 return parse_groups(evlist, "all", str,
1513 /*metric_no_group=*/false,
1514 /*metric_no_merge=*/false,
1515 /*metric_no_threshold=*/false,
1516 /*user_requested_cpu_list=*/NULL,
1517 /*system_wide=*/false,
1518 /*fake_pmu=*/true, table);
1519 }
1520
1521 struct metricgroup__has_metric_data {
1522 const char *pmu;
1523 const char *metric_or_groups;
1524 };
metricgroup__has_metric_or_groups_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table __maybe_unused,void * vdata)1525 static int metricgroup__has_metric_or_groups_callback(const struct pmu_metric *pm,
1526 const struct pmu_metrics_table *table
1527 __maybe_unused,
1528 void *vdata)
1529 {
1530 struct metricgroup__has_metric_data *data = vdata;
1531
1532 return match_pm_metric_or_groups(pm, data->pmu, data->metric_or_groups) ? 1 : 0;
1533 }
1534
metricgroup__has_metric_or_groups(const char * pmu,const char * metric_or_groups)1535 bool metricgroup__has_metric_or_groups(const char *pmu, const char *metric_or_groups)
1536 {
1537 const struct pmu_metrics_table *table = pmu_metrics_table__find();
1538 struct metricgroup__has_metric_data data = {
1539 .pmu = pmu,
1540 .metric_or_groups = metric_or_groups,
1541 };
1542
1543 if (!table)
1544 return false;
1545
1546 return pmu_metrics_table__for_each_metric(table,
1547 metricgroup__has_metric_or_groups_callback,
1548 &data)
1549 ? true : false;
1550 }
1551
metricgroup__topdown_max_level_callback(const struct pmu_metric * pm,const struct pmu_metrics_table * table __maybe_unused,void * data)1552 static int metricgroup__topdown_max_level_callback(const struct pmu_metric *pm,
1553 const struct pmu_metrics_table *table __maybe_unused,
1554 void *data)
1555 {
1556 unsigned int *max_level = data;
1557 unsigned int level;
1558 const char *p = strstr(pm->metric_group ?: "", "TopdownL");
1559
1560 if (!p || p[8] == '\0')
1561 return 0;
1562
1563 level = p[8] - '0';
1564 if (level > *max_level)
1565 *max_level = level;
1566
1567 return 0;
1568 }
1569
metricgroups__topdown_max_level(void)1570 unsigned int metricgroups__topdown_max_level(void)
1571 {
1572 unsigned int max_level = 0;
1573 const struct pmu_metrics_table *table = pmu_metrics_table__find();
1574
1575 if (!table)
1576 return false;
1577
1578 pmu_metrics_table__for_each_metric(table, metricgroup__topdown_max_level_callback,
1579 &max_level);
1580 return max_level;
1581 }
1582
metricgroup__copy_metric_events(struct evlist * evlist,struct cgroup * cgrp,struct rblist * new_metric_events,struct rblist * old_metric_events)1583 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1584 struct rblist *new_metric_events,
1585 struct rblist *old_metric_events)
1586 {
1587 unsigned int i;
1588
1589 for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1590 struct rb_node *nd;
1591 struct metric_event *old_me, *new_me;
1592 struct metric_expr *old_expr, *new_expr;
1593 struct evsel *evsel;
1594 size_t alloc_size;
1595 int idx, nr;
1596
1597 nd = rblist__entry(old_metric_events, i);
1598 old_me = container_of(nd, struct metric_event, nd);
1599
1600 evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx);
1601 if (!evsel)
1602 return -EINVAL;
1603 new_me = metricgroup__lookup(new_metric_events, evsel, /*create=*/true);
1604 if (!new_me)
1605 return -ENOMEM;
1606
1607 pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1608 cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx);
1609
1610 list_for_each_entry(old_expr, &old_me->head, nd) {
1611 new_expr = malloc(sizeof(*new_expr));
1612 if (!new_expr)
1613 return -ENOMEM;
1614
1615 new_expr->metric_expr = old_expr->metric_expr;
1616 new_expr->metric_threshold = old_expr->metric_threshold;
1617 new_expr->metric_name = strdup(old_expr->metric_name);
1618 if (!new_expr->metric_name)
1619 return -ENOMEM;
1620
1621 new_expr->metric_unit = old_expr->metric_unit;
1622 new_expr->runtime = old_expr->runtime;
1623
1624 if (old_expr->metric_refs) {
1625 /* calculate number of metric_events */
1626 for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1627 continue;
1628 alloc_size = sizeof(*new_expr->metric_refs);
1629 new_expr->metric_refs = calloc(nr + 1, alloc_size);
1630 if (!new_expr->metric_refs) {
1631 free(new_expr);
1632 return -ENOMEM;
1633 }
1634
1635 memcpy(new_expr->metric_refs, old_expr->metric_refs,
1636 nr * alloc_size);
1637 } else {
1638 new_expr->metric_refs = NULL;
1639 }
1640
1641 /* calculate number of metric_events */
1642 for (nr = 0; old_expr->metric_events[nr]; nr++)
1643 continue;
1644 alloc_size = sizeof(*new_expr->metric_events);
1645 new_expr->metric_events = calloc(nr + 1, alloc_size);
1646 if (!new_expr->metric_events) {
1647 zfree(&new_expr->metric_refs);
1648 free(new_expr);
1649 return -ENOMEM;
1650 }
1651
1652 /* copy evsel in the same position */
1653 for (idx = 0; idx < nr; idx++) {
1654 evsel = old_expr->metric_events[idx];
1655 evsel = evlist__find_evsel(evlist, evsel->core.idx);
1656 if (evsel == NULL) {
1657 zfree(&new_expr->metric_events);
1658 zfree(&new_expr->metric_refs);
1659 free(new_expr);
1660 return -EINVAL;
1661 }
1662 new_expr->metric_events[idx] = evsel;
1663 }
1664
1665 list_add(&new_expr->nd, &new_me->head);
1666 }
1667 }
1668 return 0;
1669 }
1670