xref: /linux/tools/perf/util/metricgroup.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2017, Intel Corporation.
4  */
5 
6 /* Manage metrics and groups of metrics from JSON files */
7 
8 #include "metricgroup.h"
9 #include "debug.h"
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "strbuf.h"
13 #include "pmu.h"
14 #include "pmus.h"
15 #include "print-events.h"
16 #include "smt.h"
17 #include "tool_pmu.h"
18 #include "expr.h"
19 #include "rblist.h"
20 #include <string.h>
21 #include <errno.h>
22 #include "strlist.h"
23 #include <assert.h>
24 #include <linux/ctype.h>
25 #include <linux/list_sort.h>
26 #include <linux/string.h>
27 #include <linux/zalloc.h>
28 #include <perf/cpumap.h>
29 #include <subcmd/parse-options.h>
30 #include <api/fs/fs.h>
31 #include "util.h"
32 #include <asm/bug.h>
33 #include "cgroup.h"
34 #include "util/hashmap.h"
35 
36 struct metric_event *metricgroup__lookup(struct rblist *metric_events,
37 					 struct evsel *evsel,
38 					 bool create)
39 {
40 	struct rb_node *nd;
41 	struct metric_event me = {
42 		.evsel = evsel
43 	};
44 
45 	if (!metric_events)
46 		return NULL;
47 
48 	if (evsel && evsel->metric_leader)
49 		me.evsel = evsel->metric_leader;
50 	nd = rblist__find(metric_events, &me);
51 	if (nd)
52 		return container_of(nd, struct metric_event, nd);
53 	if (create) {
54 		rblist__add_node(metric_events, &me);
55 		nd = rblist__find(metric_events, &me);
56 		if (nd)
57 			return container_of(nd, struct metric_event, nd);
58 	}
59 	return NULL;
60 }
61 
62 static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
63 {
64 	struct metric_event *a = container_of(rb_node,
65 					      struct metric_event,
66 					      nd);
67 	const struct metric_event *b = entry;
68 
69 	if (a->evsel == b->evsel)
70 		return 0;
71 	if ((char *)a->evsel < (char *)b->evsel)
72 		return -1;
73 	return +1;
74 }
75 
76 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
77 					const void *entry)
78 {
79 	struct metric_event *me = malloc(sizeof(struct metric_event));
80 
81 	if (!me)
82 		return NULL;
83 	memcpy(me, entry, sizeof(struct metric_event));
84 	me->evsel = ((struct metric_event *)entry)->evsel;
85 	me->is_default = false;
86 	INIT_LIST_HEAD(&me->head);
87 	return &me->nd;
88 }
89 
90 static void metric_event_delete(struct rblist *rblist __maybe_unused,
91 				struct rb_node *rb_node)
92 {
93 	struct metric_event *me = container_of(rb_node, struct metric_event, nd);
94 	struct metric_expr *expr, *tmp;
95 
96 	list_for_each_entry_safe(expr, tmp, &me->head, nd) {
97 		zfree(&expr->metric_name);
98 		zfree(&expr->metric_refs);
99 		zfree(&expr->metric_events);
100 		free(expr);
101 	}
102 
103 	free(me);
104 }
105 
106 static void metricgroup__rblist_init(struct rblist *metric_events)
107 {
108 	rblist__init(metric_events);
109 	metric_events->node_cmp = metric_event_cmp;
110 	metric_events->node_new = metric_event_new;
111 	metric_events->node_delete = metric_event_delete;
112 }
113 
114 void metricgroup__rblist_exit(struct rblist *metric_events)
115 {
116 	rblist__exit(metric_events);
117 }
118 
119 /**
120  * The metric under construction. The data held here will be placed in a
121  * metric_expr.
122  */
123 struct metric {
124 	struct list_head nd;
125 	/**
126 	 * The expression parse context importantly holding the IDs contained
127 	 * within the expression.
128 	 */
129 	struct expr_parse_ctx *pctx;
130 	const char *pmu;
131 	/** The name of the metric such as "IPC". */
132 	const char *metric_name;
133 	/** Modifier on the metric such as "u" or NULL for none. */
134 	const char *modifier;
135 	/** The expression to parse, for example, "instructions/cycles". */
136 	const char *metric_expr;
137 	/** Optional threshold expression where zero value is green, otherwise red. */
138 	const char *metric_threshold;
139 	/**
140 	 * The "ScaleUnit" that scales and adds a unit to the metric during
141 	 * output.
142 	 */
143 	const char *metric_unit;
144 	/**
145 	 * Optional name of the metric group reported
146 	 * if the Default metric group is being processed.
147 	 */
148 	const char *default_metricgroup_name;
149 	/** Optional null terminated array of referenced metrics. */
150 	struct metric_ref *metric_refs;
151 	/**
152 	 * Should events of the metric be grouped?
153 	 */
154 	bool group_events;
155 	/**
156 	 * Parsed events for the metric. Optional as events may be taken from a
157 	 * different metric whose group contains all the IDs necessary for this
158 	 * one.
159 	 */
160 	struct evlist *evlist;
161 };
162 
163 static void metric__watchdog_constraint_hint(const char *name, bool foot)
164 {
165 	static bool violate_nmi_constraint;
166 
167 	if (!foot) {
168 		pr_warning("Not grouping metric %s's events.\n", name);
169 		violate_nmi_constraint = true;
170 		return;
171 	}
172 
173 	if (!violate_nmi_constraint)
174 		return;
175 
176 	pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
177 		   "    echo 0 > /proc/sys/kernel/nmi_watchdog\n"
178 		   "    perf stat ...\n"
179 		   "    echo 1 > /proc/sys/kernel/nmi_watchdog\n");
180 }
181 
182 static bool metric__group_events(const struct pmu_metric *pm)
183 {
184 	switch (pm->event_grouping) {
185 	case MetricNoGroupEvents:
186 		return false;
187 	case MetricNoGroupEventsNmi:
188 		if (!sysctl__nmi_watchdog_enabled())
189 			return true;
190 		metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false);
191 		return false;
192 	case MetricNoGroupEventsSmt:
193 		return !smt_on();
194 	case MetricGroupEvents:
195 	default:
196 		return true;
197 	}
198 }
199 
200 static void metric__free(struct metric *m)
201 {
202 	if (!m)
203 		return;
204 
205 	zfree(&m->metric_refs);
206 	expr__ctx_free(m->pctx);
207 	zfree(&m->modifier);
208 	evlist__delete(m->evlist);
209 	free(m);
210 }
211 
212 static struct metric *metric__new(const struct pmu_metric *pm,
213 				  const char *modifier,
214 				  bool metric_no_group,
215 				  int runtime,
216 				  const char *user_requested_cpu_list,
217 				  bool system_wide)
218 {
219 	struct metric *m;
220 
221 	m = zalloc(sizeof(*m));
222 	if (!m)
223 		return NULL;
224 
225 	m->pctx = expr__ctx_new();
226 	if (!m->pctx)
227 		goto out_err;
228 
229 	m->pmu = pm->pmu ?: "cpu";
230 	m->metric_name = pm->metric_name;
231 	m->default_metricgroup_name = pm->default_metricgroup_name ?: "";
232 	m->modifier = NULL;
233 	if (modifier) {
234 		m->modifier = strdup(modifier);
235 		if (!m->modifier)
236 			goto out_err;
237 	}
238 	m->metric_expr = pm->metric_expr;
239 	m->metric_threshold = pm->metric_threshold;
240 	m->metric_unit = pm->unit;
241 	m->pctx->sctx.user_requested_cpu_list = NULL;
242 	if (user_requested_cpu_list) {
243 		m->pctx->sctx.user_requested_cpu_list = strdup(user_requested_cpu_list);
244 		if (!m->pctx->sctx.user_requested_cpu_list)
245 			goto out_err;
246 	}
247 	m->pctx->sctx.runtime = runtime;
248 	m->pctx->sctx.system_wide = system_wide;
249 	m->group_events = !metric_no_group && metric__group_events(pm);
250 	m->metric_refs = NULL;
251 	m->evlist = NULL;
252 
253 	return m;
254 out_err:
255 	metric__free(m);
256 	return NULL;
257 }
258 
259 static bool contains_metric_id(struct evsel **metric_events, int num_events,
260 			       const char *metric_id)
261 {
262 	int i;
263 
264 	for (i = 0; i < num_events; i++) {
265 		if (!strcmp(evsel__metric_id(metric_events[i]), metric_id))
266 			return true;
267 	}
268 	return false;
269 }
270 
271 /**
272  * setup_metric_events - Find a group of events in metric_evlist that correspond
273  *                       to the IDs from a parsed metric expression.
274  * @pmu: The PMU for the IDs.
275  * @ids: the metric IDs to match.
276  * @metric_evlist: the list of perf events.
277  * @out_metric_events: holds the created metric events array.
278  */
279 static int setup_metric_events(const char *pmu, struct hashmap *ids,
280 			       struct evlist *metric_evlist,
281 			       struct evsel ***out_metric_events)
282 {
283 	struct evsel **metric_events;
284 	const char *metric_id;
285 	struct evsel *ev;
286 	size_t ids_size, matched_events, i;
287 	bool all_pmus = !strcmp(pmu, "all") || perf_pmus__num_core_pmus() == 1 || !is_pmu_core(pmu);
288 
289 	*out_metric_events = NULL;
290 	ids_size = hashmap__size(ids);
291 
292 	metric_events = calloc(ids_size + 1, sizeof(void *));
293 	if (!metric_events)
294 		return -ENOMEM;
295 
296 	matched_events = 0;
297 	evlist__for_each_entry(metric_evlist, ev) {
298 		struct expr_id_data *val_ptr;
299 
300 		/* Don't match events for the wrong hybrid PMU. */
301 		if (!all_pmus && ev->pmu && evsel__is_hybrid(ev) &&
302 		    strcmp(ev->pmu->name, pmu))
303 			continue;
304 		/*
305 		 * Check for duplicate events with the same name. For
306 		 * example, uncore_imc/cas_count_read/ will turn into 6
307 		 * events per socket on skylakex. Only the first such
308 		 * event is placed in metric_events.
309 		 */
310 		metric_id = evsel__metric_id(ev);
311 		if (contains_metric_id(metric_events, matched_events, metric_id))
312 			continue;
313 		/*
314 		 * Does this event belong to the parse context? For
315 		 * combined or shared groups, this metric may not care
316 		 * about this event.
317 		 */
318 		if (hashmap__find(ids, metric_id, &val_ptr)) {
319 			pr_debug("Matched metric-id %s to %s\n", metric_id, evsel__name(ev));
320 			metric_events[matched_events++] = ev;
321 
322 			if (matched_events >= ids_size)
323 				break;
324 		}
325 	}
326 	if (matched_events < ids_size) {
327 		free(metric_events);
328 		return -EINVAL;
329 	}
330 	for (i = 0; i < ids_size; i++) {
331 		ev = metric_events[i];
332 		ev->collect_stat = true;
333 
334 		/*
335 		 * The metric leader points to the identically named
336 		 * event in metric_events.
337 		 */
338 		ev->metric_leader = ev;
339 		/*
340 		 * Mark two events with identical names in the same
341 		 * group (or globally) as being in use as uncore events
342 		 * may be duplicated for each pmu. Set the metric leader
343 		 * of such events to be the event that appears in
344 		 * metric_events.
345 		 */
346 		metric_id = evsel__metric_id(ev);
347 		evlist__for_each_entry_continue(metric_evlist, ev) {
348 			if (!strcmp(evsel__metric_id(ev), metric_id))
349 				ev->metric_leader = metric_events[i];
350 		}
351 	}
352 	*out_metric_events = metric_events;
353 	return 0;
354 }
355 
356 static bool match_metric(const char *metric_or_groups, const char *sought)
357 {
358 	int len;
359 	char *m;
360 
361 	if (!sought)
362 		return false;
363 	if (!strcmp(sought, "all"))
364 		return true;
365 	if (!metric_or_groups)
366 		return !strcasecmp(sought, "No_group");
367 	len = strlen(sought);
368 	if (!strncasecmp(metric_or_groups, sought, len) &&
369 	    (metric_or_groups[len] == 0 || metric_or_groups[len] == ';'))
370 		return true;
371 	m = strchr(metric_or_groups, ';');
372 	return m && match_metric(m + 1, sought);
373 }
374 
375 static bool match_pm_metric(const struct pmu_metric *pm, const char *pmu, const char *metric)
376 {
377 	const char *pm_pmu = pm->pmu ?: "cpu";
378 
379 	if (strcmp(pmu, "all") && strcmp(pm_pmu, pmu))
380 		return false;
381 
382 	return match_metric(pm->metric_group, metric) ||
383 	       match_metric(pm->metric_name, metric);
384 }
385 
386 /** struct mep - RB-tree node for building printing information. */
387 struct mep {
388 	/** nd - RB-tree element. */
389 	struct rb_node nd;
390 	/** @metric_group: Owned metric group name, separated others with ';'. */
391 	char *metric_group;
392 	const char *metric_name;
393 	const char *metric_desc;
394 	const char *metric_long_desc;
395 	const char *metric_expr;
396 	const char *metric_threshold;
397 	const char *metric_unit;
398 };
399 
400 static int mep_cmp(struct rb_node *rb_node, const void *entry)
401 {
402 	struct mep *a = container_of(rb_node, struct mep, nd);
403 	struct mep *b = (struct mep *)entry;
404 	int ret;
405 
406 	ret = strcmp(a->metric_group, b->metric_group);
407 	if (ret)
408 		return ret;
409 
410 	return strcmp(a->metric_name, b->metric_name);
411 }
412 
413 static struct rb_node *mep_new(struct rblist *rl __maybe_unused, const void *entry)
414 {
415 	struct mep *me = malloc(sizeof(struct mep));
416 
417 	if (!me)
418 		return NULL;
419 
420 	memcpy(me, entry, sizeof(struct mep));
421 	return &me->nd;
422 }
423 
424 static void mep_delete(struct rblist *rl __maybe_unused,
425 		       struct rb_node *nd)
426 {
427 	struct mep *me = container_of(nd, struct mep, nd);
428 
429 	zfree(&me->metric_group);
430 	free(me);
431 }
432 
433 static struct mep *mep_lookup(struct rblist *groups, const char *metric_group,
434 			      const char *metric_name)
435 {
436 	struct rb_node *nd;
437 	struct mep me = {
438 		.metric_group = strdup(metric_group),
439 		.metric_name = metric_name,
440 	};
441 	nd = rblist__find(groups, &me);
442 	if (nd) {
443 		free(me.metric_group);
444 		return container_of(nd, struct mep, nd);
445 	}
446 	rblist__add_node(groups, &me);
447 	nd = rblist__find(groups, &me);
448 	if (nd)
449 		return container_of(nd, struct mep, nd);
450 	return NULL;
451 }
452 
453 static int metricgroup__add_to_mep_groups(const struct pmu_metric *pm,
454 					struct rblist *groups)
455 {
456 	const char *g;
457 	char *omg, *mg;
458 
459 	mg = strdup(pm->metric_group ?: pm->metric_name);
460 	if (!mg)
461 		return -ENOMEM;
462 	omg = mg;
463 	while ((g = strsep(&mg, ";")) != NULL) {
464 		struct mep *me;
465 
466 		g = skip_spaces(g);
467 		if (strlen(g))
468 			me = mep_lookup(groups, g, pm->metric_name);
469 		else
470 			me = mep_lookup(groups, pm->metric_name, pm->metric_name);
471 
472 		if (me) {
473 			me->metric_desc = pm->desc;
474 			me->metric_long_desc = pm->long_desc;
475 			me->metric_expr = pm->metric_expr;
476 			me->metric_threshold = pm->metric_threshold;
477 			me->metric_unit = pm->unit;
478 		}
479 	}
480 	free(omg);
481 
482 	return 0;
483 }
484 
485 struct metricgroup_iter_data {
486 	pmu_metric_iter_fn fn;
487 	void *data;
488 };
489 
490 static int metricgroup__sys_event_iter(const struct pmu_metric *pm,
491 				       const struct pmu_metrics_table *table,
492 				       void *data)
493 {
494 	struct metricgroup_iter_data *d = data;
495 	struct perf_pmu *pmu = NULL;
496 
497 	if (!pm->metric_expr || !pm->compat)
498 		return 0;
499 
500 	while ((pmu = perf_pmus__scan(pmu))) {
501 
502 		if (!pmu->id || !pmu_uncore_identifier_match(pm->compat, pmu->id))
503 			continue;
504 
505 		return d->fn(pm, table, d->data);
506 	}
507 	return 0;
508 }
509 
510 static int metricgroup__add_to_mep_groups_callback(const struct pmu_metric *pm,
511 					const struct pmu_metrics_table *table __maybe_unused,
512 					void *vdata)
513 {
514 	struct rblist *groups = vdata;
515 
516 	return metricgroup__add_to_mep_groups(pm, groups);
517 }
518 
519 void metricgroup__print(const struct print_callbacks *print_cb, void *print_state)
520 {
521 	struct rblist groups;
522 	const struct pmu_metrics_table *table;
523 	struct rb_node *node, *next;
524 
525 	rblist__init(&groups);
526 	groups.node_new = mep_new;
527 	groups.node_cmp = mep_cmp;
528 	groups.node_delete = mep_delete;
529 	table = pmu_metrics_table__find();
530 	if (table) {
531 		pmu_metrics_table__for_each_metric(table,
532 						 metricgroup__add_to_mep_groups_callback,
533 						 &groups);
534 	}
535 	{
536 		struct metricgroup_iter_data data = {
537 			.fn = metricgroup__add_to_mep_groups_callback,
538 			.data = &groups,
539 		};
540 		pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data);
541 	}
542 
543 	for (node = rb_first_cached(&groups.entries); node; node = next) {
544 		struct mep *me = container_of(node, struct mep, nd);
545 
546 		print_cb->print_metric(print_state,
547 				me->metric_group,
548 				me->metric_name,
549 				me->metric_desc,
550 				me->metric_long_desc,
551 				me->metric_expr,
552 				me->metric_threshold,
553 				me->metric_unit);
554 		next = rb_next(node);
555 		rblist__remove_node(&groups, node);
556 	}
557 }
558 
559 static const char *code_characters = ",-=@";
560 
561 static int encode_metric_id(struct strbuf *sb, const char *x)
562 {
563 	char *c;
564 	int ret = 0;
565 
566 	for (; *x; x++) {
567 		c = strchr(code_characters, *x);
568 		if (c) {
569 			ret = strbuf_addch(sb, '!');
570 			if (ret)
571 				break;
572 
573 			ret = strbuf_addch(sb, '0' + (c - code_characters));
574 			if (ret)
575 				break;
576 		} else {
577 			ret = strbuf_addch(sb, *x);
578 			if (ret)
579 				break;
580 		}
581 	}
582 	return ret;
583 }
584 
585 static int decode_metric_id(struct strbuf *sb, const char *x)
586 {
587 	const char *orig = x;
588 	size_t i;
589 	char c;
590 	int ret;
591 
592 	for (; *x; x++) {
593 		c = *x;
594 		if (*x == '!') {
595 			x++;
596 			i = *x - '0';
597 			if (i > strlen(code_characters)) {
598 				pr_err("Bad metric-id encoding in: '%s'", orig);
599 				return -1;
600 			}
601 			c = code_characters[i];
602 		}
603 		ret = strbuf_addch(sb, c);
604 		if (ret)
605 			return ret;
606 	}
607 	return 0;
608 }
609 
610 static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier)
611 {
612 	struct evsel *ev;
613 	struct strbuf sb = STRBUF_INIT;
614 	char *cur;
615 	int ret = 0;
616 
617 	evlist__for_each_entry(perf_evlist, ev) {
618 		if (!ev->metric_id)
619 			continue;
620 
621 		ret = strbuf_setlen(&sb, 0);
622 		if (ret)
623 			break;
624 
625 		ret = decode_metric_id(&sb, ev->metric_id);
626 		if (ret)
627 			break;
628 
629 		free((char *)ev->metric_id);
630 		ev->metric_id = strdup(sb.buf);
631 		if (!ev->metric_id) {
632 			ret = -ENOMEM;
633 			break;
634 		}
635 		/*
636 		 * If the name is just the parsed event, use the metric-id to
637 		 * give a more friendly display version.
638 		 */
639 		if (strstr(ev->name, "metric-id=")) {
640 			bool has_slash = false;
641 
642 			zfree(&ev->name);
643 			for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) {
644 				*cur = '/';
645 				has_slash = true;
646 			}
647 
648 			if (modifier) {
649 				if (!has_slash && !strchr(sb.buf, ':')) {
650 					ret = strbuf_addch(&sb, ':');
651 					if (ret)
652 						break;
653 				}
654 				ret = strbuf_addstr(&sb, modifier);
655 				if (ret)
656 					break;
657 			}
658 			ev->name = strdup(sb.buf);
659 			if (!ev->name) {
660 				ret = -ENOMEM;
661 				break;
662 			}
663 		}
664 	}
665 	strbuf_release(&sb);
666 	return ret;
667 }
668 
669 static int metricgroup__build_event_string(struct strbuf *events,
670 					   const struct expr_parse_ctx *ctx,
671 					   const char *modifier,
672 					   bool group_events)
673 {
674 	struct hashmap_entry *cur;
675 	size_t bkt;
676 	bool no_group = true, has_tool_events = false;
677 	bool tool_events[TOOL_PMU__EVENT_MAX] = {false};
678 	int ret = 0;
679 
680 #define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
681 
682 	hashmap__for_each_entry(ctx->ids, cur, bkt) {
683 		const char *sep, *rsep, *id = cur->pkey;
684 		enum tool_pmu_event ev;
685 
686 		pr_debug("found event %s\n", id);
687 
688 		/* Always move tool events outside of the group. */
689 		ev = tool_pmu__str_to_event(id);
690 		if (ev != TOOL_PMU__EVENT_NONE) {
691 			has_tool_events = true;
692 			tool_events[ev] = true;
693 			continue;
694 		}
695 		/* Separate events with commas and open the group if necessary. */
696 		if (no_group) {
697 			if (group_events) {
698 				ret = strbuf_addch(events, '{');
699 				RETURN_IF_NON_ZERO(ret);
700 			}
701 
702 			no_group = false;
703 		} else {
704 			ret = strbuf_addch(events, ',');
705 			RETURN_IF_NON_ZERO(ret);
706 		}
707 		/*
708 		 * Encode the ID as an event string. Add a qualifier for
709 		 * metric_id that is the original name except with characters
710 		 * that parse-events can't parse replaced. For example,
711 		 * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/
712 		 */
713 		sep = strchr(id, '@');
714 		if (sep != NULL) {
715 			ret = strbuf_add(events, id, sep - id);
716 			RETURN_IF_NON_ZERO(ret);
717 			ret = strbuf_addch(events, '/');
718 			RETURN_IF_NON_ZERO(ret);
719 			rsep = strrchr(sep, '@');
720 			ret = strbuf_add(events, sep + 1, rsep - sep - 1);
721 			RETURN_IF_NON_ZERO(ret);
722 			ret = strbuf_addstr(events, ",metric-id=");
723 			RETURN_IF_NON_ZERO(ret);
724 			sep = rsep;
725 		} else {
726 			sep = strchr(id, ':');
727 			if (sep != NULL) {
728 				ret = strbuf_add(events, id, sep - id);
729 				RETURN_IF_NON_ZERO(ret);
730 			} else {
731 				ret = strbuf_addstr(events, id);
732 				RETURN_IF_NON_ZERO(ret);
733 			}
734 			ret = strbuf_addstr(events, "/metric-id=");
735 			RETURN_IF_NON_ZERO(ret);
736 		}
737 		ret = encode_metric_id(events, id);
738 		RETURN_IF_NON_ZERO(ret);
739 		ret = strbuf_addstr(events, "/");
740 		RETURN_IF_NON_ZERO(ret);
741 
742 		if (sep != NULL) {
743 			ret = strbuf_addstr(events, sep + 1);
744 			RETURN_IF_NON_ZERO(ret);
745 		}
746 		if (modifier) {
747 			ret = strbuf_addstr(events, modifier);
748 			RETURN_IF_NON_ZERO(ret);
749 		}
750 	}
751 	if (!no_group && group_events) {
752 		ret = strbuf_addf(events, "}:W");
753 		RETURN_IF_NON_ZERO(ret);
754 	}
755 	if (has_tool_events) {
756 		int i;
757 
758 		tool_pmu__for_each_event(i) {
759 			if (tool_events[i]) {
760 				if (!no_group) {
761 					ret = strbuf_addch(events, ',');
762 					RETURN_IF_NON_ZERO(ret);
763 				}
764 				no_group = false;
765 				ret = strbuf_addstr(events, tool_pmu__event_to_str(i));
766 				RETURN_IF_NON_ZERO(ret);
767 			}
768 		}
769 	}
770 
771 	return ret;
772 #undef RETURN_IF_NON_ZERO
773 }
774 
775 int __weak arch_get_runtimeparam(const struct pmu_metric *pm __maybe_unused)
776 {
777 	return 1;
778 }
779 
780 /*
781  * A singly linked list on the stack of the names of metrics being
782  * processed. Used to identify recursion.
783  */
784 struct visited_metric {
785 	const char *name;
786 	const struct visited_metric *parent;
787 };
788 
789 struct metricgroup_add_iter_data {
790 	struct list_head *metric_list;
791 	const char *pmu;
792 	const char *metric_name;
793 	const char *modifier;
794 	int *ret;
795 	bool *has_match;
796 	bool metric_no_group;
797 	bool metric_no_threshold;
798 	const char *user_requested_cpu_list;
799 	bool system_wide;
800 	struct metric *root_metric;
801 	const struct visited_metric *visited;
802 	const struct pmu_metrics_table *table;
803 };
804 
805 static bool metricgroup__find_metric(const char *pmu,
806 				     const char *metric,
807 				     const struct pmu_metrics_table *table,
808 				     struct pmu_metric *pm);
809 
810 static int add_metric(struct list_head *metric_list,
811 		      const struct pmu_metric *pm,
812 		      const char *modifier,
813 		      bool metric_no_group,
814 		      bool metric_no_threshold,
815 		      const char *user_requested_cpu_list,
816 		      bool system_wide,
817 		      struct metric *root_metric,
818 		      const struct visited_metric *visited,
819 		      const struct pmu_metrics_table *table);
820 
821 /**
822  * resolve_metric - Locate metrics within the root metric and recursively add
823  *                    references to them.
824  * @metric_list: The list the metric is added to.
825  * @pmu: The PMU name to resolve metrics on, or "all" for all PMUs.
826  * @modifier: if non-null event modifiers like "u".
827  * @metric_no_group: Should events written to events be grouped "{}" or
828  *                   global. Grouping is the default but due to multiplexing the
829  *                   user may override.
830  * @user_requested_cpu_list: Command line specified CPUs to record on.
831  * @system_wide: Are events for all processes recorded.
832  * @root_metric: Metrics may reference other metrics to form a tree. In this
833  *               case the root_metric holds all the IDs and a list of referenced
834  *               metrics. When adding a root this argument is NULL.
835  * @visited: A singly linked list of metric names being added that is used to
836  *           detect recursion.
837  * @table: The table that is searched for metrics, most commonly the table for the
838  *       architecture perf is running upon.
839  */
840 static int resolve_metric(struct list_head *metric_list,
841 			  const char *pmu,
842 			  const char *modifier,
843 			  bool metric_no_group,
844 			  bool metric_no_threshold,
845 			  const char *user_requested_cpu_list,
846 			  bool system_wide,
847 			  struct metric *root_metric,
848 			  const struct visited_metric *visited,
849 			  const struct pmu_metrics_table *table)
850 {
851 	struct hashmap_entry *cur;
852 	size_t bkt;
853 	struct to_resolve {
854 		/* The metric to resolve. */
855 		struct pmu_metric pm;
856 		/*
857 		 * The key in the IDs map, this may differ from in case,
858 		 * etc. from pm->metric_name.
859 		 */
860 		const char *key;
861 	} *pending = NULL;
862 	int i, ret = 0, pending_cnt = 0;
863 
864 	/*
865 	 * Iterate all the parsed IDs and if there's a matching metric and it to
866 	 * the pending array.
867 	 */
868 	hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
869 		struct pmu_metric pm;
870 
871 		if (metricgroup__find_metric(pmu, cur->pkey, table, &pm)) {
872 			pending = realloc(pending,
873 					(pending_cnt + 1) * sizeof(struct to_resolve));
874 			if (!pending)
875 				return -ENOMEM;
876 
877 			memcpy(&pending[pending_cnt].pm, &pm, sizeof(pm));
878 			pending[pending_cnt].key = cur->pkey;
879 			pending_cnt++;
880 		}
881 	}
882 
883 	/* Remove the metric IDs from the context. */
884 	for (i = 0; i < pending_cnt; i++)
885 		expr__del_id(root_metric->pctx, pending[i].key);
886 
887 	/*
888 	 * Recursively add all the metrics, IDs are added to the root metric's
889 	 * context.
890 	 */
891 	for (i = 0; i < pending_cnt; i++) {
892 		ret = add_metric(metric_list, &pending[i].pm, modifier, metric_no_group,
893 				 metric_no_threshold, user_requested_cpu_list, system_wide,
894 				 root_metric, visited, table);
895 		if (ret)
896 			break;
897 	}
898 
899 	free(pending);
900 	return ret;
901 }
902 
903 /**
904  * __add_metric - Add a metric to metric_list.
905  * @metric_list: The list the metric is added to.
906  * @pm: The pmu_metric containing the metric to be added.
907  * @modifier: if non-null event modifiers like "u".
908  * @metric_no_group: Should events written to events be grouped "{}" or
909  *                   global. Grouping is the default but due to multiplexing the
910  *                   user may override.
911  * @metric_no_threshold: Should threshold expressions be ignored?
912  * @runtime: A special argument for the parser only known at runtime.
913  * @user_requested_cpu_list: Command line specified CPUs to record on.
914  * @system_wide: Are events for all processes recorded.
915  * @root_metric: Metrics may reference other metrics to form a tree. In this
916  *               case the root_metric holds all the IDs and a list of referenced
917  *               metrics. When adding a root this argument is NULL.
918  * @visited: A singly linked list of metric names being added that is used to
919  *           detect recursion.
920  * @table: The table that is searched for metrics, most commonly the table for the
921  *       architecture perf is running upon.
922  */
923 static int __add_metric(struct list_head *metric_list,
924 			const struct pmu_metric *pm,
925 			const char *modifier,
926 			bool metric_no_group,
927 			bool metric_no_threshold,
928 			int runtime,
929 			const char *user_requested_cpu_list,
930 			bool system_wide,
931 			struct metric *root_metric,
932 			const struct visited_metric *visited,
933 			const struct pmu_metrics_table *table)
934 {
935 	const struct visited_metric *vm;
936 	int ret;
937 	bool is_root = !root_metric;
938 	const char *expr;
939 	struct visited_metric visited_node = {
940 		.name = pm->metric_name,
941 		.parent = visited,
942 	};
943 
944 	for (vm = visited; vm; vm = vm->parent) {
945 		if (!strcmp(pm->metric_name, vm->name)) {
946 			pr_err("failed: recursion detected for %s\n", pm->metric_name);
947 			return -1;
948 		}
949 	}
950 
951 	if (is_root) {
952 		/*
953 		 * This metric is the root of a tree and may reference other
954 		 * metrics that are added recursively.
955 		 */
956 		root_metric = metric__new(pm, modifier, metric_no_group, runtime,
957 					  user_requested_cpu_list, system_wide);
958 		if (!root_metric)
959 			return -ENOMEM;
960 
961 	} else {
962 		int cnt = 0;
963 
964 		/*
965 		 * This metric was referenced in a metric higher in the
966 		 * tree. Check if the same metric is already resolved in the
967 		 * metric_refs list.
968 		 */
969 		if (root_metric->metric_refs) {
970 			for (; root_metric->metric_refs[cnt].metric_name; cnt++) {
971 				if (!strcmp(pm->metric_name,
972 					    root_metric->metric_refs[cnt].metric_name))
973 					return 0;
974 			}
975 		}
976 
977 		/* Create reference. Need space for the entry and the terminator. */
978 		root_metric->metric_refs = realloc(root_metric->metric_refs,
979 						(cnt + 2) * sizeof(struct metric_ref));
980 		if (!root_metric->metric_refs)
981 			return -ENOMEM;
982 
983 		/*
984 		 * Intentionally passing just const char pointers,
985 		 * from 'pe' object, so they never go away. We don't
986 		 * need to change them, so there's no need to create
987 		 * our own copy.
988 		 */
989 		root_metric->metric_refs[cnt].metric_name = pm->metric_name;
990 		root_metric->metric_refs[cnt].metric_expr = pm->metric_expr;
991 
992 		/* Null terminate array. */
993 		root_metric->metric_refs[cnt+1].metric_name = NULL;
994 		root_metric->metric_refs[cnt+1].metric_expr = NULL;
995 	}
996 
997 	/*
998 	 * For both the parent and referenced metrics, we parse
999 	 * all the metric's IDs and add it to the root context.
1000 	 */
1001 	ret = 0;
1002 	expr = pm->metric_expr;
1003 	if (is_root && pm->metric_threshold) {
1004 		/*
1005 		 * Threshold expressions are built off the actual metric. Switch
1006 		 * to use that in case of additional necessary events. Change
1007 		 * the visited node name to avoid this being flagged as
1008 		 * recursion. If the threshold events are disabled, just use the
1009 		 * metric's name as a reference. This allows metric threshold
1010 		 * computation if there are sufficient events.
1011 		 */
1012 		assert(strstr(pm->metric_threshold, pm->metric_name));
1013 		expr = metric_no_threshold ? pm->metric_name : pm->metric_threshold;
1014 		visited_node.name = "__threshold__";
1015 	}
1016 	if (expr__find_ids(expr, NULL, root_metric->pctx) < 0) {
1017 		/* Broken metric. */
1018 		ret = -EINVAL;
1019 	}
1020 	if (!ret) {
1021 		/* Resolve referenced metrics. */
1022 		const char *pmu = pm->pmu ?: "cpu";
1023 
1024 		ret = resolve_metric(metric_list, pmu, modifier, metric_no_group,
1025 				     metric_no_threshold, user_requested_cpu_list,
1026 				     system_wide, root_metric, &visited_node,
1027 				     table);
1028 	}
1029 	if (ret) {
1030 		if (is_root)
1031 			metric__free(root_metric);
1032 
1033 	} else if (is_root)
1034 		list_add(&root_metric->nd, metric_list);
1035 
1036 	return ret;
1037 }
1038 
1039 struct metricgroup__find_metric_data {
1040 	const char *pmu;
1041 	const char *metric;
1042 	struct pmu_metric *pm;
1043 };
1044 
1045 static int metricgroup__find_metric_callback(const struct pmu_metric *pm,
1046 					     const struct pmu_metrics_table *table  __maybe_unused,
1047 					     void *vdata)
1048 {
1049 	struct metricgroup__find_metric_data *data = vdata;
1050 	const char *pm_pmu = pm->pmu ?: "cpu";
1051 
1052 	if (strcmp(data->pmu, "all") && strcmp(pm_pmu, data->pmu))
1053 		return 0;
1054 
1055 	if (!match_metric(pm->metric_name, data->metric))
1056 		return 0;
1057 
1058 	memcpy(data->pm, pm, sizeof(*pm));
1059 	return 1;
1060 }
1061 
1062 static bool metricgroup__find_metric(const char *pmu,
1063 				     const char *metric,
1064 				     const struct pmu_metrics_table *table,
1065 				     struct pmu_metric *pm)
1066 {
1067 	struct metricgroup__find_metric_data data = {
1068 		.pmu = pmu,
1069 		.metric = metric,
1070 		.pm = pm,
1071 	};
1072 
1073 	return pmu_metrics_table__for_each_metric(table, metricgroup__find_metric_callback, &data)
1074 		? true : false;
1075 }
1076 
1077 static int add_metric(struct list_head *metric_list,
1078 		      const struct pmu_metric *pm,
1079 		      const char *modifier,
1080 		      bool metric_no_group,
1081 		      bool metric_no_threshold,
1082 		      const char *user_requested_cpu_list,
1083 		      bool system_wide,
1084 		      struct metric *root_metric,
1085 		      const struct visited_metric *visited,
1086 		      const struct pmu_metrics_table *table)
1087 {
1088 	int ret = 0;
1089 
1090 	pr_debug("metric expr %s for %s\n", pm->metric_expr, pm->metric_name);
1091 
1092 	if (!strstr(pm->metric_expr, "?")) {
1093 		ret = __add_metric(metric_list, pm, modifier, metric_no_group,
1094 				   metric_no_threshold, 0, user_requested_cpu_list,
1095 				   system_wide, root_metric, visited, table);
1096 	} else {
1097 		int j, count;
1098 
1099 		count = arch_get_runtimeparam(pm);
1100 
1101 		/* This loop is added to create multiple
1102 		 * events depend on count value and add
1103 		 * those events to metric_list.
1104 		 */
1105 
1106 		for (j = 0; j < count && !ret; j++)
1107 			ret = __add_metric(metric_list, pm, modifier, metric_no_group,
1108 					   metric_no_threshold, j, user_requested_cpu_list,
1109 					   system_wide, root_metric, visited, table);
1110 	}
1111 
1112 	return ret;
1113 }
1114 
1115 static int metricgroup__add_metric_sys_event_iter(const struct pmu_metric *pm,
1116 					const struct pmu_metrics_table *table __maybe_unused,
1117 					void *data)
1118 {
1119 	struct metricgroup_add_iter_data *d = data;
1120 	int ret;
1121 
1122 	if (!match_pm_metric(pm, d->pmu, d->metric_name))
1123 		return 0;
1124 
1125 	ret = add_metric(d->metric_list, pm, d->modifier, d->metric_no_group,
1126 			 d->metric_no_threshold, d->user_requested_cpu_list,
1127 			 d->system_wide, d->root_metric, d->visited, d->table);
1128 	if (ret)
1129 		goto out;
1130 
1131 	*(d->has_match) = true;
1132 
1133 out:
1134 	*(d->ret) = ret;
1135 	return ret;
1136 }
1137 
1138 /**
1139  * metric_list_cmp - list_sort comparator that sorts metrics with more events to
1140  *                   the front. tool events are excluded from the count.
1141  */
1142 static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
1143 			   const struct list_head *r)
1144 {
1145 	const struct metric *left = container_of(l, struct metric, nd);
1146 	const struct metric *right = container_of(r, struct metric, nd);
1147 	struct expr_id_data *data;
1148 	int i, left_count, right_count;
1149 
1150 	left_count = hashmap__size(left->pctx->ids);
1151 	tool_pmu__for_each_event(i) {
1152 		if (!expr__get_id(left->pctx, tool_pmu__event_to_str(i), &data))
1153 			left_count--;
1154 	}
1155 
1156 	right_count = hashmap__size(right->pctx->ids);
1157 	tool_pmu__for_each_event(i) {
1158 		if (!expr__get_id(right->pctx, tool_pmu__event_to_str(i), &data))
1159 			right_count--;
1160 	}
1161 
1162 	return right_count - left_count;
1163 }
1164 
1165 /**
1166  * default_metricgroup_cmp - Implements complex key for the Default metricgroup
1167  *			     that first sorts by default_metricgroup_name, then
1168  *			     metric_name.
1169  */
1170 static int default_metricgroup_cmp(void *priv __maybe_unused,
1171 				   const struct list_head *l,
1172 				   const struct list_head *r)
1173 {
1174 	const struct metric *left = container_of(l, struct metric, nd);
1175 	const struct metric *right = container_of(r, struct metric, nd);
1176 	int diff = strcmp(right->default_metricgroup_name, left->default_metricgroup_name);
1177 
1178 	if (diff)
1179 		return diff;
1180 
1181 	return strcmp(right->metric_name, left->metric_name);
1182 }
1183 
1184 struct metricgroup__add_metric_data {
1185 	struct list_head *list;
1186 	const char *pmu;
1187 	const char *metric_name;
1188 	const char *modifier;
1189 	const char *user_requested_cpu_list;
1190 	bool metric_no_group;
1191 	bool metric_no_threshold;
1192 	bool system_wide;
1193 	bool has_match;
1194 };
1195 
1196 static int metricgroup__add_metric_callback(const struct pmu_metric *pm,
1197 					    const struct pmu_metrics_table *table,
1198 					    void *vdata)
1199 {
1200 	struct metricgroup__add_metric_data *data = vdata;
1201 	int ret = 0;
1202 
1203 	if (pm->metric_expr && match_pm_metric(pm, data->pmu, data->metric_name)) {
1204 		bool metric_no_group = data->metric_no_group ||
1205 			match_metric(pm->metricgroup_no_group, data->metric_name);
1206 
1207 		data->has_match = true;
1208 		ret = add_metric(data->list, pm, data->modifier, metric_no_group,
1209 				 data->metric_no_threshold, data->user_requested_cpu_list,
1210 				 data->system_wide, /*root_metric=*/NULL,
1211 				 /*visited_metrics=*/NULL, table);
1212 	}
1213 	return ret;
1214 }
1215 
1216 /**
1217  * metricgroup__add_metric - Find and add a metric, or a metric group.
1218  * @pmu: The PMU name to search for metrics on, or "all" for all PMUs.
1219  * @metric_name: The name of the metric or metric group. For example, "IPC"
1220  *               could be the name of a metric and "TopDownL1" the name of a
1221  *               metric group.
1222  * @modifier: if non-null event modifiers like "u".
1223  * @metric_no_group: Should events written to events be grouped "{}" or
1224  *                   global. Grouping is the default but due to multiplexing the
1225  *                   user may override.
1226  * @user_requested_cpu_list: Command line specified CPUs to record on.
1227  * @system_wide: Are events for all processes recorded.
1228  * @metric_list: The list that the metric or metric group are added to.
1229  * @table: The table that is searched for metrics, most commonly the table for the
1230  *       architecture perf is running upon.
1231  */
1232 static int metricgroup__add_metric(const char *pmu, const char *metric_name, const char *modifier,
1233 				   bool metric_no_group, bool metric_no_threshold,
1234 				   const char *user_requested_cpu_list,
1235 				   bool system_wide,
1236 				   struct list_head *metric_list,
1237 				   const struct pmu_metrics_table *table)
1238 {
1239 	LIST_HEAD(list);
1240 	int ret;
1241 	bool has_match = false;
1242 
1243 	{
1244 		struct metricgroup__add_metric_data data = {
1245 			.list = &list,
1246 			.pmu = pmu,
1247 			.metric_name = metric_name,
1248 			.modifier = modifier,
1249 			.metric_no_group = metric_no_group,
1250 			.metric_no_threshold = metric_no_threshold,
1251 			.user_requested_cpu_list = user_requested_cpu_list,
1252 			.system_wide = system_wide,
1253 			.has_match = false,
1254 		};
1255 		/*
1256 		 * Iterate over all metrics seeing if metric matches either the
1257 		 * name or group. When it does add the metric to the list.
1258 		 */
1259 		ret = pmu_metrics_table__for_each_metric(table, metricgroup__add_metric_callback,
1260 						       &data);
1261 		if (ret)
1262 			goto out;
1263 
1264 		has_match = data.has_match;
1265 	}
1266 	{
1267 		struct metricgroup_iter_data data = {
1268 			.fn = metricgroup__add_metric_sys_event_iter,
1269 			.data = (void *) &(struct metricgroup_add_iter_data) {
1270 				.metric_list = &list,
1271 				.pmu = pmu,
1272 				.metric_name = metric_name,
1273 				.modifier = modifier,
1274 				.metric_no_group = metric_no_group,
1275 				.user_requested_cpu_list = user_requested_cpu_list,
1276 				.system_wide = system_wide,
1277 				.has_match = &has_match,
1278 				.ret = &ret,
1279 				.table = table,
1280 			},
1281 		};
1282 
1283 		pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data);
1284 	}
1285 	/* End of pmu events. */
1286 	if (!has_match)
1287 		ret = -EINVAL;
1288 
1289 out:
1290 	/*
1291 	 * add to metric_list so that they can be released
1292 	 * even if it's failed
1293 	 */
1294 	list_splice(&list, metric_list);
1295 	return ret;
1296 }
1297 
1298 /**
1299  * metricgroup__add_metric_list - Find and add metrics, or metric groups,
1300  *                                specified in a list.
1301  * @pmu: A pmu to restrict the metrics to, or "all" for all PMUS.
1302  * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1"
1303  *        would match the IPC and CPI metrics, and TopDownL1 would match all
1304  *        the metrics in the TopDownL1 group.
1305  * @metric_no_group: Should events written to events be grouped "{}" or
1306  *                   global. Grouping is the default but due to multiplexing the
1307  *                   user may override.
1308  * @user_requested_cpu_list: Command line specified CPUs to record on.
1309  * @system_wide: Are events for all processes recorded.
1310  * @metric_list: The list that metrics are added to.
1311  * @table: The table that is searched for metrics, most commonly the table for the
1312  *       architecture perf is running upon.
1313  */
1314 static int metricgroup__add_metric_list(const char *pmu, const char *list,
1315 					bool metric_no_group,
1316 					bool metric_no_threshold,
1317 					const char *user_requested_cpu_list,
1318 					bool system_wide, struct list_head *metric_list,
1319 					const struct pmu_metrics_table *table)
1320 {
1321 	char *list_itr, *list_copy, *metric_name, *modifier;
1322 	int ret, count = 0;
1323 
1324 	list_copy = strdup(list);
1325 	if (!list_copy)
1326 		return -ENOMEM;
1327 	list_itr = list_copy;
1328 
1329 	while ((metric_name = strsep(&list_itr, ",")) != NULL) {
1330 		modifier = strchr(metric_name, ':');
1331 		if (modifier)
1332 			*modifier++ = '\0';
1333 
1334 		ret = metricgroup__add_metric(pmu, metric_name, modifier,
1335 					      metric_no_group, metric_no_threshold,
1336 					      user_requested_cpu_list,
1337 					      system_wide, metric_list, table);
1338 		if (ret == -EINVAL)
1339 			pr_err("Cannot find metric or group `%s'\n", metric_name);
1340 
1341 		if (ret)
1342 			break;
1343 
1344 		count++;
1345 	}
1346 	free(list_copy);
1347 
1348 	if (!ret) {
1349 		/*
1350 		 * Warn about nmi_watchdog if any parsed metrics had the
1351 		 * NO_NMI_WATCHDOG constraint.
1352 		 */
1353 		metric__watchdog_constraint_hint(NULL, /*foot=*/true);
1354 		/* No metrics. */
1355 		if (count == 0)
1356 			return -EINVAL;
1357 	}
1358 	return ret;
1359 }
1360 
1361 static void metricgroup__free_metrics(struct list_head *metric_list)
1362 {
1363 	struct metric *m, *tmp;
1364 
1365 	list_for_each_entry_safe (m, tmp, metric_list, nd) {
1366 		list_del_init(&m->nd);
1367 		metric__free(m);
1368 	}
1369 }
1370 
1371 /**
1372  * find_tool_events - Search for the pressence of tool events in metric_list.
1373  * @metric_list: List to take metrics from.
1374  * @tool_events: Array of false values, indices corresponding to tool events set
1375  *               to true if tool event is found.
1376  */
1377 static void find_tool_events(const struct list_head *metric_list,
1378 			     bool tool_events[TOOL_PMU__EVENT_MAX])
1379 {
1380 	struct metric *m;
1381 
1382 	list_for_each_entry(m, metric_list, nd) {
1383 		int i;
1384 
1385 		tool_pmu__for_each_event(i) {
1386 			struct expr_id_data *data;
1387 
1388 			if (!tool_events[i] &&
1389 			    !expr__get_id(m->pctx, tool_pmu__event_to_str(i), &data))
1390 				tool_events[i] = true;
1391 		}
1392 	}
1393 }
1394 
1395 /**
1396  * build_combined_expr_ctx - Make an expr_parse_ctx with all !group_events
1397  *                           metric IDs, as the IDs are held in a set,
1398  *                           duplicates will be removed.
1399  * @metric_list: List to take metrics from.
1400  * @combined: Out argument for result.
1401  */
1402 static int build_combined_expr_ctx(const struct list_head *metric_list,
1403 				   struct expr_parse_ctx **combined)
1404 {
1405 	struct hashmap_entry *cur;
1406 	size_t bkt;
1407 	struct metric *m;
1408 	char *dup;
1409 	int ret;
1410 
1411 	*combined = expr__ctx_new();
1412 	if (!*combined)
1413 		return -ENOMEM;
1414 
1415 	list_for_each_entry(m, metric_list, nd) {
1416 		if (!m->group_events && !m->modifier) {
1417 			hashmap__for_each_entry(m->pctx->ids, cur, bkt) {
1418 				dup = strdup(cur->pkey);
1419 				if (!dup) {
1420 					ret = -ENOMEM;
1421 					goto err_out;
1422 				}
1423 				ret = expr__add_id(*combined, dup);
1424 				if (ret)
1425 					goto err_out;
1426 			}
1427 		}
1428 	}
1429 	return 0;
1430 err_out:
1431 	expr__ctx_free(*combined);
1432 	*combined = NULL;
1433 	return ret;
1434 }
1435 
1436 /**
1437  * parse_ids - Build the event string for the ids and parse them creating an
1438  *             evlist. The encoded metric_ids are decoded.
1439  * @metric_no_merge: is metric sharing explicitly disabled.
1440  * @fake_pmu: use a fake PMU when testing metrics not supported by the current CPU.
1441  * @ids: the event identifiers parsed from a metric.
1442  * @modifier: any modifiers added to the events.
1443  * @group_events: should events be placed in a weak group.
1444  * @tool_events: entries set true if the tool event of index could be present in
1445  *               the overall list of metrics.
1446  * @out_evlist: the created list of events.
1447  */
1448 static int parse_ids(bool metric_no_merge, bool fake_pmu,
1449 		     struct expr_parse_ctx *ids, const char *modifier,
1450 		     bool group_events, const bool tool_events[TOOL_PMU__EVENT_MAX],
1451 		     struct evlist **out_evlist)
1452 {
1453 	struct parse_events_error parse_error;
1454 	struct evlist *parsed_evlist;
1455 	struct strbuf events = STRBUF_INIT;
1456 	int ret;
1457 
1458 	*out_evlist = NULL;
1459 	if (!metric_no_merge || hashmap__size(ids->ids) == 0) {
1460 		bool added_event = false;
1461 		int i;
1462 		/*
1463 		 * We may fail to share events between metrics because a tool
1464 		 * event isn't present in one metric. For example, a ratio of
1465 		 * cache misses doesn't need duration_time but the same events
1466 		 * may be used for a misses per second. Events without sharing
1467 		 * implies multiplexing, that is best avoided, so place
1468 		 * all tool events in every group.
1469 		 *
1470 		 * Also, there may be no ids/events in the expression parsing
1471 		 * context because of constant evaluation, e.g.:
1472 		 *    event1 if #smt_on else 0
1473 		 * Add a tool event to avoid a parse error on an empty string.
1474 		 */
1475 		tool_pmu__for_each_event(i) {
1476 			if (tool_events[i]) {
1477 				char *tmp = strdup(tool_pmu__event_to_str(i));
1478 
1479 				if (!tmp)
1480 					return -ENOMEM;
1481 				ids__insert(ids->ids, tmp);
1482 				added_event = true;
1483 			}
1484 		}
1485 		if (!added_event && hashmap__size(ids->ids) == 0) {
1486 			char *tmp = strdup("duration_time");
1487 
1488 			if (!tmp)
1489 				return -ENOMEM;
1490 			ids__insert(ids->ids, tmp);
1491 		}
1492 	}
1493 	ret = metricgroup__build_event_string(&events, ids, modifier,
1494 					      group_events);
1495 	if (ret)
1496 		return ret;
1497 
1498 	parsed_evlist = evlist__new();
1499 	if (!parsed_evlist) {
1500 		ret = -ENOMEM;
1501 		goto err_out;
1502 	}
1503 	pr_debug("Parsing metric events '%s'\n", events.buf);
1504 	parse_events_error__init(&parse_error);
1505 	ret = __parse_events(parsed_evlist, events.buf, /*pmu_filter=*/NULL,
1506 			     &parse_error, fake_pmu, /*warn_if_reordered=*/false,
1507 			     /*fake_tp=*/false);
1508 	if (ret) {
1509 		parse_events_error__print(&parse_error, events.buf);
1510 		goto err_out;
1511 	}
1512 	ret = decode_all_metric_ids(parsed_evlist, modifier);
1513 	if (ret)
1514 		goto err_out;
1515 
1516 	*out_evlist = parsed_evlist;
1517 	parsed_evlist = NULL;
1518 err_out:
1519 	parse_events_error__exit(&parse_error);
1520 	evlist__delete(parsed_evlist);
1521 	strbuf_release(&events);
1522 	return ret;
1523 }
1524 
1525 static int parse_groups(struct evlist *perf_evlist,
1526 			const char *pmu, const char *str,
1527 			bool metric_no_group,
1528 			bool metric_no_merge,
1529 			bool metric_no_threshold,
1530 			const char *user_requested_cpu_list,
1531 			bool system_wide,
1532 			bool fake_pmu,
1533 			struct rblist *metric_events_list,
1534 			const struct pmu_metrics_table *table)
1535 {
1536 	struct evlist *combined_evlist = NULL;
1537 	LIST_HEAD(metric_list);
1538 	struct metric *m;
1539 	bool tool_events[TOOL_PMU__EVENT_MAX] = {false};
1540 	bool is_default = !strcmp(str, "Default");
1541 	int ret;
1542 
1543 	if (metric_events_list->nr_entries == 0)
1544 		metricgroup__rblist_init(metric_events_list);
1545 	ret = metricgroup__add_metric_list(pmu, str, metric_no_group, metric_no_threshold,
1546 					   user_requested_cpu_list,
1547 					   system_wide, &metric_list, table);
1548 	if (ret)
1549 		goto out;
1550 
1551 	/* Sort metrics from largest to smallest. */
1552 	list_sort(NULL, &metric_list, metric_list_cmp);
1553 
1554 	if (!metric_no_merge) {
1555 		struct expr_parse_ctx *combined = NULL;
1556 
1557 		find_tool_events(&metric_list, tool_events);
1558 
1559 		ret = build_combined_expr_ctx(&metric_list, &combined);
1560 
1561 		if (!ret && combined && hashmap__size(combined->ids)) {
1562 			ret = parse_ids(metric_no_merge, fake_pmu, combined,
1563 					/*modifier=*/NULL,
1564 					/*group_events=*/false,
1565 					tool_events,
1566 					&combined_evlist);
1567 		}
1568 		if (combined)
1569 			expr__ctx_free(combined);
1570 
1571 		if (ret)
1572 			goto out;
1573 	}
1574 
1575 	if (is_default)
1576 		list_sort(NULL, &metric_list, default_metricgroup_cmp);
1577 
1578 	list_for_each_entry(m, &metric_list, nd) {
1579 		struct metric_event *me;
1580 		struct evsel **metric_events;
1581 		struct evlist *metric_evlist = NULL;
1582 		struct metric *n;
1583 		struct metric_expr *expr;
1584 
1585 		if (combined_evlist && !m->group_events) {
1586 			metric_evlist = combined_evlist;
1587 		} else if (!metric_no_merge) {
1588 			/*
1589 			 * See if the IDs for this metric are a subset of an
1590 			 * earlier metric.
1591 			 */
1592 			list_for_each_entry(n, &metric_list, nd) {
1593 				if (m == n)
1594 					break;
1595 
1596 				if (n->evlist == NULL)
1597 					continue;
1598 
1599 				if ((!m->modifier && n->modifier) ||
1600 				    (m->modifier && !n->modifier) ||
1601 				    (m->modifier && n->modifier &&
1602 					    strcmp(m->modifier, n->modifier)))
1603 					continue;
1604 
1605 				if ((!m->pmu && n->pmu) ||
1606 				    (m->pmu && !n->pmu) ||
1607 				    (m->pmu && n->pmu && strcmp(m->pmu, n->pmu)))
1608 					continue;
1609 
1610 				if (expr__subset_of_ids(n->pctx, m->pctx)) {
1611 					pr_debug("Events in '%s' fully contained within '%s'\n",
1612 						 m->metric_name, n->metric_name);
1613 					metric_evlist = n->evlist;
1614 					break;
1615 				}
1616 
1617 			}
1618 		}
1619 		if (!metric_evlist) {
1620 			ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier,
1621 					m->group_events, tool_events, &m->evlist);
1622 			if (ret)
1623 				goto out;
1624 
1625 			metric_evlist = m->evlist;
1626 		}
1627 		ret = setup_metric_events(fake_pmu ? "all" : m->pmu, m->pctx->ids,
1628 					  metric_evlist, &metric_events);
1629 		if (ret) {
1630 			pr_err("Cannot resolve IDs for %s: %s\n",
1631 				m->metric_name, m->metric_expr);
1632 			goto out;
1633 		}
1634 
1635 		me = metricgroup__lookup(metric_events_list, metric_events[0], true);
1636 
1637 		expr = malloc(sizeof(struct metric_expr));
1638 		if (!expr) {
1639 			ret = -ENOMEM;
1640 			free(metric_events);
1641 			goto out;
1642 		}
1643 
1644 		expr->metric_refs = m->metric_refs;
1645 		m->metric_refs = NULL;
1646 		expr->metric_expr = m->metric_expr;
1647 		if (m->modifier) {
1648 			char *tmp;
1649 
1650 			if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0)
1651 				expr->metric_name = NULL;
1652 			else
1653 				expr->metric_name = tmp;
1654 		} else
1655 			expr->metric_name = strdup(m->metric_name);
1656 
1657 		if (!expr->metric_name) {
1658 			ret = -ENOMEM;
1659 			free(metric_events);
1660 			goto out;
1661 		}
1662 		expr->metric_threshold = m->metric_threshold;
1663 		expr->metric_unit = m->metric_unit;
1664 		expr->metric_events = metric_events;
1665 		expr->runtime = m->pctx->sctx.runtime;
1666 		expr->default_metricgroup_name = m->default_metricgroup_name;
1667 		me->is_default = is_default;
1668 		list_add(&expr->nd, &me->head);
1669 	}
1670 
1671 
1672 	if (combined_evlist) {
1673 		evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries);
1674 		evlist__delete(combined_evlist);
1675 	}
1676 
1677 	list_for_each_entry(m, &metric_list, nd) {
1678 		if (m->evlist)
1679 			evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries);
1680 	}
1681 
1682 out:
1683 	metricgroup__free_metrics(&metric_list);
1684 	return ret;
1685 }
1686 
1687 int metricgroup__parse_groups(struct evlist *perf_evlist,
1688 			      const char *pmu,
1689 			      const char *str,
1690 			      bool metric_no_group,
1691 			      bool metric_no_merge,
1692 			      bool metric_no_threshold,
1693 			      const char *user_requested_cpu_list,
1694 			      bool system_wide,
1695 			      bool hardware_aware_grouping,
1696 			      struct rblist *metric_events)
1697 {
1698 	const struct pmu_metrics_table *table = pmu_metrics_table__find();
1699 
1700 	if (!table)
1701 		return -EINVAL;
1702 	if (hardware_aware_grouping)
1703 		pr_debug("Use hardware aware grouping instead of traditional metric grouping method\n");
1704 
1705 	return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge,
1706 			    metric_no_threshold, user_requested_cpu_list, system_wide,
1707 			    /*fake_pmu=*/false, metric_events, table);
1708 }
1709 
1710 int metricgroup__parse_groups_test(struct evlist *evlist,
1711 				   const struct pmu_metrics_table *table,
1712 				   const char *str,
1713 				   struct rblist *metric_events)
1714 {
1715 	return parse_groups(evlist, "all", str,
1716 			    /*metric_no_group=*/false,
1717 			    /*metric_no_merge=*/false,
1718 			    /*metric_no_threshold=*/false,
1719 			    /*user_requested_cpu_list=*/NULL,
1720 			    /*system_wide=*/false,
1721 			    /*fake_pmu=*/true, metric_events, table);
1722 }
1723 
1724 struct metricgroup__has_metric_data {
1725 	const char *pmu;
1726 	const char *metric;
1727 };
1728 static int metricgroup__has_metric_callback(const struct pmu_metric *pm,
1729 					    const struct pmu_metrics_table *table __maybe_unused,
1730 					    void *vdata)
1731 {
1732 	struct metricgroup__has_metric_data *data = vdata;
1733 
1734 	return match_pm_metric(pm, data->pmu, data->metric) ? 1 : 0;
1735 }
1736 
1737 bool metricgroup__has_metric(const char *pmu, const char *metric)
1738 {
1739 	const struct pmu_metrics_table *table = pmu_metrics_table__find();
1740 	struct metricgroup__has_metric_data data = {
1741 		.pmu = pmu,
1742 		.metric = metric,
1743 	};
1744 
1745 	if (!table)
1746 		return false;
1747 
1748 	return pmu_metrics_table__for_each_metric(table, metricgroup__has_metric_callback, &data)
1749 		? true : false;
1750 }
1751 
1752 static int metricgroup__topdown_max_level_callback(const struct pmu_metric *pm,
1753 					    const struct pmu_metrics_table *table __maybe_unused,
1754 					    void *data)
1755 {
1756 	unsigned int *max_level = data;
1757 	unsigned int level;
1758 	const char *p = strstr(pm->metric_group ?: "", "TopdownL");
1759 
1760 	if (!p || p[8] == '\0')
1761 		return 0;
1762 
1763 	level = p[8] - '0';
1764 	if (level > *max_level)
1765 		*max_level = level;
1766 
1767 	return 0;
1768 }
1769 
1770 unsigned int metricgroups__topdown_max_level(void)
1771 {
1772 	unsigned int max_level = 0;
1773 	const struct pmu_metrics_table *table = pmu_metrics_table__find();
1774 
1775 	if (!table)
1776 		return false;
1777 
1778 	pmu_metrics_table__for_each_metric(table, metricgroup__topdown_max_level_callback,
1779 					  &max_level);
1780 	return max_level;
1781 }
1782 
1783 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1784 				    struct rblist *new_metric_events,
1785 				    struct rblist *old_metric_events)
1786 {
1787 	unsigned int i;
1788 
1789 	for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1790 		struct rb_node *nd;
1791 		struct metric_event *old_me, *new_me;
1792 		struct metric_expr *old_expr, *new_expr;
1793 		struct evsel *evsel;
1794 		size_t alloc_size;
1795 		int idx, nr;
1796 
1797 		nd = rblist__entry(old_metric_events, i);
1798 		old_me = container_of(nd, struct metric_event, nd);
1799 
1800 		evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx);
1801 		if (!evsel)
1802 			return -EINVAL;
1803 		new_me = metricgroup__lookup(new_metric_events, evsel, true);
1804 		if (!new_me)
1805 			return -ENOMEM;
1806 
1807 		pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1808 			 cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx);
1809 
1810 		list_for_each_entry(old_expr, &old_me->head, nd) {
1811 			new_expr = malloc(sizeof(*new_expr));
1812 			if (!new_expr)
1813 				return -ENOMEM;
1814 
1815 			new_expr->metric_expr = old_expr->metric_expr;
1816 			new_expr->metric_threshold = old_expr->metric_threshold;
1817 			new_expr->metric_name = strdup(old_expr->metric_name);
1818 			if (!new_expr->metric_name)
1819 				return -ENOMEM;
1820 
1821 			new_expr->metric_unit = old_expr->metric_unit;
1822 			new_expr->runtime = old_expr->runtime;
1823 
1824 			if (old_expr->metric_refs) {
1825 				/* calculate number of metric_events */
1826 				for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1827 					continue;
1828 				alloc_size = sizeof(*new_expr->metric_refs);
1829 				new_expr->metric_refs = calloc(nr + 1, alloc_size);
1830 				if (!new_expr->metric_refs) {
1831 					free(new_expr);
1832 					return -ENOMEM;
1833 				}
1834 
1835 				memcpy(new_expr->metric_refs, old_expr->metric_refs,
1836 				       nr * alloc_size);
1837 			} else {
1838 				new_expr->metric_refs = NULL;
1839 			}
1840 
1841 			/* calculate number of metric_events */
1842 			for (nr = 0; old_expr->metric_events[nr]; nr++)
1843 				continue;
1844 			alloc_size = sizeof(*new_expr->metric_events);
1845 			new_expr->metric_events = calloc(nr + 1, alloc_size);
1846 			if (!new_expr->metric_events) {
1847 				zfree(&new_expr->metric_refs);
1848 				free(new_expr);
1849 				return -ENOMEM;
1850 			}
1851 
1852 			/* copy evsel in the same position */
1853 			for (idx = 0; idx < nr; idx++) {
1854 				evsel = old_expr->metric_events[idx];
1855 				evsel = evlist__find_evsel(evlist, evsel->core.idx);
1856 				if (evsel == NULL) {
1857 					zfree(&new_expr->metric_events);
1858 					zfree(&new_expr->metric_refs);
1859 					free(new_expr);
1860 					return -EINVAL;
1861 				}
1862 				new_expr->metric_events[idx] = evsel;
1863 			}
1864 
1865 			list_add(&new_expr->nd, &new_me->head);
1866 		}
1867 	}
1868 	return 0;
1869 }
1870