xref: /linux/tools/perf/util/metricgroup.c (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2017, Intel Corporation.
4  */
5 
6 /* Manage metrics and groups of metrics from JSON files */
7 
8 #include "metricgroup.h"
9 #include "debug.h"
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "strbuf.h"
13 #include "pmu.h"
14 #include "pmus.h"
15 #include "print-events.h"
16 #include "smt.h"
17 #include "expr.h"
18 #include "rblist.h"
19 #include <string.h>
20 #include <errno.h>
21 #include "strlist.h"
22 #include <assert.h>
23 #include <linux/ctype.h>
24 #include <linux/list_sort.h>
25 #include <linux/string.h>
26 #include <linux/zalloc.h>
27 #include <perf/cpumap.h>
28 #include <subcmd/parse-options.h>
29 #include <api/fs/fs.h>
30 #include "util.h"
31 #include <asm/bug.h>
32 #include "cgroup.h"
33 #include "util/hashmap.h"
34 
35 struct metric_event *metricgroup__lookup(struct rblist *metric_events,
36 					 struct evsel *evsel,
37 					 bool create)
38 {
39 	struct rb_node *nd;
40 	struct metric_event me = {
41 		.evsel = evsel
42 	};
43 
44 	if (!metric_events)
45 		return NULL;
46 
47 	if (evsel && evsel->metric_leader)
48 		me.evsel = evsel->metric_leader;
49 	nd = rblist__find(metric_events, &me);
50 	if (nd)
51 		return container_of(nd, struct metric_event, nd);
52 	if (create) {
53 		rblist__add_node(metric_events, &me);
54 		nd = rblist__find(metric_events, &me);
55 		if (nd)
56 			return container_of(nd, struct metric_event, nd);
57 	}
58 	return NULL;
59 }
60 
61 static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
62 {
63 	struct metric_event *a = container_of(rb_node,
64 					      struct metric_event,
65 					      nd);
66 	const struct metric_event *b = entry;
67 
68 	if (a->evsel == b->evsel)
69 		return 0;
70 	if ((char *)a->evsel < (char *)b->evsel)
71 		return -1;
72 	return +1;
73 }
74 
75 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
76 					const void *entry)
77 {
78 	struct metric_event *me = malloc(sizeof(struct metric_event));
79 
80 	if (!me)
81 		return NULL;
82 	memcpy(me, entry, sizeof(struct metric_event));
83 	me->evsel = ((struct metric_event *)entry)->evsel;
84 	me->is_default = false;
85 	INIT_LIST_HEAD(&me->head);
86 	return &me->nd;
87 }
88 
89 static void metric_event_delete(struct rblist *rblist __maybe_unused,
90 				struct rb_node *rb_node)
91 {
92 	struct metric_event *me = container_of(rb_node, struct metric_event, nd);
93 	struct metric_expr *expr, *tmp;
94 
95 	list_for_each_entry_safe(expr, tmp, &me->head, nd) {
96 		zfree(&expr->metric_name);
97 		zfree(&expr->metric_refs);
98 		zfree(&expr->metric_events);
99 		free(expr);
100 	}
101 
102 	free(me);
103 }
104 
105 static void metricgroup__rblist_init(struct rblist *metric_events)
106 {
107 	rblist__init(metric_events);
108 	metric_events->node_cmp = metric_event_cmp;
109 	metric_events->node_new = metric_event_new;
110 	metric_events->node_delete = metric_event_delete;
111 }
112 
113 void metricgroup__rblist_exit(struct rblist *metric_events)
114 {
115 	rblist__exit(metric_events);
116 }
117 
118 /**
119  * The metric under construction. The data held here will be placed in a
120  * metric_expr.
121  */
122 struct metric {
123 	struct list_head nd;
124 	/**
125 	 * The expression parse context importantly holding the IDs contained
126 	 * within the expression.
127 	 */
128 	struct expr_parse_ctx *pctx;
129 	const char *pmu;
130 	/** The name of the metric such as "IPC". */
131 	const char *metric_name;
132 	/** Modifier on the metric such as "u" or NULL for none. */
133 	const char *modifier;
134 	/** The expression to parse, for example, "instructions/cycles". */
135 	const char *metric_expr;
136 	/** Optional threshold expression where zero value is green, otherwise red. */
137 	const char *metric_threshold;
138 	/**
139 	 * The "ScaleUnit" that scales and adds a unit to the metric during
140 	 * output.
141 	 */
142 	const char *metric_unit;
143 	/**
144 	 * Optional name of the metric group reported
145 	 * if the Default metric group is being processed.
146 	 */
147 	const char *default_metricgroup_name;
148 	/** Optional null terminated array of referenced metrics. */
149 	struct metric_ref *metric_refs;
150 	/**
151 	 * Should events of the metric be grouped?
152 	 */
153 	bool group_events;
154 	/**
155 	 * Parsed events for the metric. Optional as events may be taken from a
156 	 * different metric whose group contains all the IDs necessary for this
157 	 * one.
158 	 */
159 	struct evlist *evlist;
160 };
161 
162 static void metric__watchdog_constraint_hint(const char *name, bool foot)
163 {
164 	static bool violate_nmi_constraint;
165 
166 	if (!foot) {
167 		pr_warning("Not grouping metric %s's events.\n", name);
168 		violate_nmi_constraint = true;
169 		return;
170 	}
171 
172 	if (!violate_nmi_constraint)
173 		return;
174 
175 	pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
176 		   "    echo 0 > /proc/sys/kernel/nmi_watchdog\n"
177 		   "    perf stat ...\n"
178 		   "    echo 1 > /proc/sys/kernel/nmi_watchdog\n");
179 }
180 
181 static bool metric__group_events(const struct pmu_metric *pm)
182 {
183 	switch (pm->event_grouping) {
184 	case MetricNoGroupEvents:
185 		return false;
186 	case MetricNoGroupEventsNmi:
187 		if (!sysctl__nmi_watchdog_enabled())
188 			return true;
189 		metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false);
190 		return false;
191 	case MetricNoGroupEventsSmt:
192 		return !smt_on();
193 	case MetricGroupEvents:
194 	default:
195 		return true;
196 	}
197 }
198 
199 static void metric__free(struct metric *m)
200 {
201 	if (!m)
202 		return;
203 
204 	zfree(&m->metric_refs);
205 	expr__ctx_free(m->pctx);
206 	zfree(&m->modifier);
207 	evlist__delete(m->evlist);
208 	free(m);
209 }
210 
211 static struct metric *metric__new(const struct pmu_metric *pm,
212 				  const char *modifier,
213 				  bool metric_no_group,
214 				  int runtime,
215 				  const char *user_requested_cpu_list,
216 				  bool system_wide)
217 {
218 	struct metric *m;
219 
220 	m = zalloc(sizeof(*m));
221 	if (!m)
222 		return NULL;
223 
224 	m->pctx = expr__ctx_new();
225 	if (!m->pctx)
226 		goto out_err;
227 
228 	m->pmu = pm->pmu ?: "cpu";
229 	m->metric_name = pm->metric_name;
230 	m->default_metricgroup_name = pm->default_metricgroup_name ?: "";
231 	m->modifier = NULL;
232 	if (modifier) {
233 		m->modifier = strdup(modifier);
234 		if (!m->modifier)
235 			goto out_err;
236 	}
237 	m->metric_expr = pm->metric_expr;
238 	m->metric_threshold = pm->metric_threshold;
239 	m->metric_unit = pm->unit;
240 	m->pctx->sctx.user_requested_cpu_list = NULL;
241 	if (user_requested_cpu_list) {
242 		m->pctx->sctx.user_requested_cpu_list = strdup(user_requested_cpu_list);
243 		if (!m->pctx->sctx.user_requested_cpu_list)
244 			goto out_err;
245 	}
246 	m->pctx->sctx.runtime = runtime;
247 	m->pctx->sctx.system_wide = system_wide;
248 	m->group_events = !metric_no_group && metric__group_events(pm);
249 	m->metric_refs = NULL;
250 	m->evlist = NULL;
251 
252 	return m;
253 out_err:
254 	metric__free(m);
255 	return NULL;
256 }
257 
258 static bool contains_metric_id(struct evsel **metric_events, int num_events,
259 			       const char *metric_id)
260 {
261 	int i;
262 
263 	for (i = 0; i < num_events; i++) {
264 		if (!strcmp(evsel__metric_id(metric_events[i]), metric_id))
265 			return true;
266 	}
267 	return false;
268 }
269 
270 /**
271  * setup_metric_events - Find a group of events in metric_evlist that correspond
272  *                       to the IDs from a parsed metric expression.
273  * @pmu: The PMU for the IDs.
274  * @ids: the metric IDs to match.
275  * @metric_evlist: the list of perf events.
276  * @out_metric_events: holds the created metric events array.
277  */
278 static int setup_metric_events(const char *pmu, struct hashmap *ids,
279 			       struct evlist *metric_evlist,
280 			       struct evsel ***out_metric_events)
281 {
282 	struct evsel **metric_events;
283 	const char *metric_id;
284 	struct evsel *ev;
285 	size_t ids_size, matched_events, i;
286 	bool all_pmus = !strcmp(pmu, "all") || perf_pmus__num_core_pmus() == 1 || !is_pmu_core(pmu);
287 
288 	*out_metric_events = NULL;
289 	ids_size = hashmap__size(ids);
290 
291 	metric_events = calloc(ids_size + 1, sizeof(void *));
292 	if (!metric_events)
293 		return -ENOMEM;
294 
295 	matched_events = 0;
296 	evlist__for_each_entry(metric_evlist, ev) {
297 		struct expr_id_data *val_ptr;
298 
299 		/* Don't match events for the wrong hybrid PMU. */
300 		if (!all_pmus && ev->pmu_name && evsel__is_hybrid(ev) &&
301 		    strcmp(ev->pmu_name, pmu))
302 			continue;
303 		/*
304 		 * Check for duplicate events with the same name. For
305 		 * example, uncore_imc/cas_count_read/ will turn into 6
306 		 * events per socket on skylakex. Only the first such
307 		 * event is placed in metric_events.
308 		 */
309 		metric_id = evsel__metric_id(ev);
310 		if (contains_metric_id(metric_events, matched_events, metric_id))
311 			continue;
312 		/*
313 		 * Does this event belong to the parse context? For
314 		 * combined or shared groups, this metric may not care
315 		 * about this event.
316 		 */
317 		if (hashmap__find(ids, metric_id, &val_ptr)) {
318 			pr_debug("Matched metric-id %s to %s\n", metric_id, evsel__name(ev));
319 			metric_events[matched_events++] = ev;
320 
321 			if (matched_events >= ids_size)
322 				break;
323 		}
324 	}
325 	if (matched_events < ids_size) {
326 		free(metric_events);
327 		return -EINVAL;
328 	}
329 	for (i = 0; i < ids_size; i++) {
330 		ev = metric_events[i];
331 		ev->collect_stat = true;
332 
333 		/*
334 		 * The metric leader points to the identically named
335 		 * event in metric_events.
336 		 */
337 		ev->metric_leader = ev;
338 		/*
339 		 * Mark two events with identical names in the same
340 		 * group (or globally) as being in use as uncore events
341 		 * may be duplicated for each pmu. Set the metric leader
342 		 * of such events to be the event that appears in
343 		 * metric_events.
344 		 */
345 		metric_id = evsel__metric_id(ev);
346 		evlist__for_each_entry_continue(metric_evlist, ev) {
347 			if (!strcmp(evsel__metric_id(ev), metric_id))
348 				ev->metric_leader = metric_events[i];
349 		}
350 	}
351 	*out_metric_events = metric_events;
352 	return 0;
353 }
354 
355 static bool match_metric(const char *metric_or_groups, const char *sought)
356 {
357 	int len;
358 	char *m;
359 
360 	if (!sought)
361 		return false;
362 	if (!strcmp(sought, "all"))
363 		return true;
364 	if (!metric_or_groups)
365 		return !strcasecmp(sought, "No_group");
366 	len = strlen(sought);
367 	if (!strncasecmp(metric_or_groups, sought, len) &&
368 	    (metric_or_groups[len] == 0 || metric_or_groups[len] == ';'))
369 		return true;
370 	m = strchr(metric_or_groups, ';');
371 	return m && match_metric(m + 1, sought);
372 }
373 
374 static bool match_pm_metric(const struct pmu_metric *pm, const char *pmu, const char *metric)
375 {
376 	const char *pm_pmu = pm->pmu ?: "cpu";
377 
378 	if (strcmp(pmu, "all") && strcmp(pm_pmu, pmu))
379 		return false;
380 
381 	return match_metric(pm->metric_group, metric) ||
382 	       match_metric(pm->metric_name, metric);
383 }
384 
385 /** struct mep - RB-tree node for building printing information. */
386 struct mep {
387 	/** nd - RB-tree element. */
388 	struct rb_node nd;
389 	/** @metric_group: Owned metric group name, separated others with ';'. */
390 	char *metric_group;
391 	const char *metric_name;
392 	const char *metric_desc;
393 	const char *metric_long_desc;
394 	const char *metric_expr;
395 	const char *metric_threshold;
396 	const char *metric_unit;
397 };
398 
399 static int mep_cmp(struct rb_node *rb_node, const void *entry)
400 {
401 	struct mep *a = container_of(rb_node, struct mep, nd);
402 	struct mep *b = (struct mep *)entry;
403 	int ret;
404 
405 	ret = strcmp(a->metric_group, b->metric_group);
406 	if (ret)
407 		return ret;
408 
409 	return strcmp(a->metric_name, b->metric_name);
410 }
411 
412 static struct rb_node *mep_new(struct rblist *rl __maybe_unused, const void *entry)
413 {
414 	struct mep *me = malloc(sizeof(struct mep));
415 
416 	if (!me)
417 		return NULL;
418 
419 	memcpy(me, entry, sizeof(struct mep));
420 	return &me->nd;
421 }
422 
423 static void mep_delete(struct rblist *rl __maybe_unused,
424 		       struct rb_node *nd)
425 {
426 	struct mep *me = container_of(nd, struct mep, nd);
427 
428 	zfree(&me->metric_group);
429 	free(me);
430 }
431 
432 static struct mep *mep_lookup(struct rblist *groups, const char *metric_group,
433 			      const char *metric_name)
434 {
435 	struct rb_node *nd;
436 	struct mep me = {
437 		.metric_group = strdup(metric_group),
438 		.metric_name = metric_name,
439 	};
440 	nd = rblist__find(groups, &me);
441 	if (nd) {
442 		free(me.metric_group);
443 		return container_of(nd, struct mep, nd);
444 	}
445 	rblist__add_node(groups, &me);
446 	nd = rblist__find(groups, &me);
447 	if (nd)
448 		return container_of(nd, struct mep, nd);
449 	return NULL;
450 }
451 
452 static int metricgroup__add_to_mep_groups(const struct pmu_metric *pm,
453 					struct rblist *groups)
454 {
455 	const char *g;
456 	char *omg, *mg;
457 
458 	mg = strdup(pm->metric_group ?: pm->metric_name);
459 	if (!mg)
460 		return -ENOMEM;
461 	omg = mg;
462 	while ((g = strsep(&mg, ";")) != NULL) {
463 		struct mep *me;
464 
465 		g = skip_spaces(g);
466 		if (strlen(g))
467 			me = mep_lookup(groups, g, pm->metric_name);
468 		else
469 			me = mep_lookup(groups, pm->metric_name, pm->metric_name);
470 
471 		if (me) {
472 			me->metric_desc = pm->desc;
473 			me->metric_long_desc = pm->long_desc;
474 			me->metric_expr = pm->metric_expr;
475 			me->metric_threshold = pm->metric_threshold;
476 			me->metric_unit = pm->unit;
477 		}
478 	}
479 	free(omg);
480 
481 	return 0;
482 }
483 
484 struct metricgroup_iter_data {
485 	pmu_metric_iter_fn fn;
486 	void *data;
487 };
488 
489 static int metricgroup__sys_event_iter(const struct pmu_metric *pm,
490 				       const struct pmu_metrics_table *table,
491 				       void *data)
492 {
493 	struct metricgroup_iter_data *d = data;
494 	struct perf_pmu *pmu = NULL;
495 
496 	if (!pm->metric_expr || !pm->compat)
497 		return 0;
498 
499 	while ((pmu = perf_pmus__scan(pmu))) {
500 
501 		if (!pmu->id || !pmu_uncore_identifier_match(pm->compat, pmu->id))
502 			continue;
503 
504 		return d->fn(pm, table, d->data);
505 	}
506 	return 0;
507 }
508 
509 static int metricgroup__add_to_mep_groups_callback(const struct pmu_metric *pm,
510 					const struct pmu_metrics_table *table __maybe_unused,
511 					void *vdata)
512 {
513 	struct rblist *groups = vdata;
514 
515 	return metricgroup__add_to_mep_groups(pm, groups);
516 }
517 
518 void metricgroup__print(const struct print_callbacks *print_cb, void *print_state)
519 {
520 	struct rblist groups;
521 	const struct pmu_metrics_table *table;
522 	struct rb_node *node, *next;
523 
524 	rblist__init(&groups);
525 	groups.node_new = mep_new;
526 	groups.node_cmp = mep_cmp;
527 	groups.node_delete = mep_delete;
528 	table = pmu_metrics_table__find();
529 	if (table) {
530 		pmu_metrics_table__for_each_metric(table,
531 						 metricgroup__add_to_mep_groups_callback,
532 						 &groups);
533 	}
534 	{
535 		struct metricgroup_iter_data data = {
536 			.fn = metricgroup__add_to_mep_groups_callback,
537 			.data = &groups,
538 		};
539 		pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data);
540 	}
541 
542 	for (node = rb_first_cached(&groups.entries); node; node = next) {
543 		struct mep *me = container_of(node, struct mep, nd);
544 
545 		print_cb->print_metric(print_state,
546 				me->metric_group,
547 				me->metric_name,
548 				me->metric_desc,
549 				me->metric_long_desc,
550 				me->metric_expr,
551 				me->metric_threshold,
552 				me->metric_unit);
553 		next = rb_next(node);
554 		rblist__remove_node(&groups, node);
555 	}
556 }
557 
558 static const char *code_characters = ",-=@";
559 
560 static int encode_metric_id(struct strbuf *sb, const char *x)
561 {
562 	char *c;
563 	int ret = 0;
564 
565 	for (; *x; x++) {
566 		c = strchr(code_characters, *x);
567 		if (c) {
568 			ret = strbuf_addch(sb, '!');
569 			if (ret)
570 				break;
571 
572 			ret = strbuf_addch(sb, '0' + (c - code_characters));
573 			if (ret)
574 				break;
575 		} else {
576 			ret = strbuf_addch(sb, *x);
577 			if (ret)
578 				break;
579 		}
580 	}
581 	return ret;
582 }
583 
584 static int decode_metric_id(struct strbuf *sb, const char *x)
585 {
586 	const char *orig = x;
587 	size_t i;
588 	char c;
589 	int ret;
590 
591 	for (; *x; x++) {
592 		c = *x;
593 		if (*x == '!') {
594 			x++;
595 			i = *x - '0';
596 			if (i > strlen(code_characters)) {
597 				pr_err("Bad metric-id encoding in: '%s'", orig);
598 				return -1;
599 			}
600 			c = code_characters[i];
601 		}
602 		ret = strbuf_addch(sb, c);
603 		if (ret)
604 			return ret;
605 	}
606 	return 0;
607 }
608 
609 static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier)
610 {
611 	struct evsel *ev;
612 	struct strbuf sb = STRBUF_INIT;
613 	char *cur;
614 	int ret = 0;
615 
616 	evlist__for_each_entry(perf_evlist, ev) {
617 		if (!ev->metric_id)
618 			continue;
619 
620 		ret = strbuf_setlen(&sb, 0);
621 		if (ret)
622 			break;
623 
624 		ret = decode_metric_id(&sb, ev->metric_id);
625 		if (ret)
626 			break;
627 
628 		free((char *)ev->metric_id);
629 		ev->metric_id = strdup(sb.buf);
630 		if (!ev->metric_id) {
631 			ret = -ENOMEM;
632 			break;
633 		}
634 		/*
635 		 * If the name is just the parsed event, use the metric-id to
636 		 * give a more friendly display version.
637 		 */
638 		if (strstr(ev->name, "metric-id=")) {
639 			bool has_slash = false;
640 
641 			zfree(&ev->name);
642 			for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) {
643 				*cur = '/';
644 				has_slash = true;
645 			}
646 
647 			if (modifier) {
648 				if (!has_slash && !strchr(sb.buf, ':')) {
649 					ret = strbuf_addch(&sb, ':');
650 					if (ret)
651 						break;
652 				}
653 				ret = strbuf_addstr(&sb, modifier);
654 				if (ret)
655 					break;
656 			}
657 			ev->name = strdup(sb.buf);
658 			if (!ev->name) {
659 				ret = -ENOMEM;
660 				break;
661 			}
662 		}
663 	}
664 	strbuf_release(&sb);
665 	return ret;
666 }
667 
668 static int metricgroup__build_event_string(struct strbuf *events,
669 					   const struct expr_parse_ctx *ctx,
670 					   const char *modifier,
671 					   bool group_events)
672 {
673 	struct hashmap_entry *cur;
674 	size_t bkt;
675 	bool no_group = true, has_tool_events = false;
676 	bool tool_events[PERF_TOOL_MAX] = {false};
677 	int ret = 0;
678 
679 #define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
680 
681 	hashmap__for_each_entry(ctx->ids, cur, bkt) {
682 		const char *sep, *rsep, *id = cur->pkey;
683 		enum perf_tool_event ev;
684 
685 		pr_debug("found event %s\n", id);
686 
687 		/* Always move tool events outside of the group. */
688 		ev = perf_tool_event__from_str(id);
689 		if (ev != PERF_TOOL_NONE) {
690 			has_tool_events = true;
691 			tool_events[ev] = true;
692 			continue;
693 		}
694 		/* Separate events with commas and open the group if necessary. */
695 		if (no_group) {
696 			if (group_events) {
697 				ret = strbuf_addch(events, '{');
698 				RETURN_IF_NON_ZERO(ret);
699 			}
700 
701 			no_group = false;
702 		} else {
703 			ret = strbuf_addch(events, ',');
704 			RETURN_IF_NON_ZERO(ret);
705 		}
706 		/*
707 		 * Encode the ID as an event string. Add a qualifier for
708 		 * metric_id that is the original name except with characters
709 		 * that parse-events can't parse replaced. For example,
710 		 * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/
711 		 */
712 		sep = strchr(id, '@');
713 		if (sep != NULL) {
714 			ret = strbuf_add(events, id, sep - id);
715 			RETURN_IF_NON_ZERO(ret);
716 			ret = strbuf_addch(events, '/');
717 			RETURN_IF_NON_ZERO(ret);
718 			rsep = strrchr(sep, '@');
719 			ret = strbuf_add(events, sep + 1, rsep - sep - 1);
720 			RETURN_IF_NON_ZERO(ret);
721 			ret = strbuf_addstr(events, ",metric-id=");
722 			RETURN_IF_NON_ZERO(ret);
723 			sep = rsep;
724 		} else {
725 			sep = strchr(id, ':');
726 			if (sep != NULL) {
727 				ret = strbuf_add(events, id, sep - id);
728 				RETURN_IF_NON_ZERO(ret);
729 			} else {
730 				ret = strbuf_addstr(events, id);
731 				RETURN_IF_NON_ZERO(ret);
732 			}
733 			ret = strbuf_addstr(events, "/metric-id=");
734 			RETURN_IF_NON_ZERO(ret);
735 		}
736 		ret = encode_metric_id(events, id);
737 		RETURN_IF_NON_ZERO(ret);
738 		ret = strbuf_addstr(events, "/");
739 		RETURN_IF_NON_ZERO(ret);
740 
741 		if (sep != NULL) {
742 			ret = strbuf_addstr(events, sep + 1);
743 			RETURN_IF_NON_ZERO(ret);
744 		}
745 		if (modifier) {
746 			ret = strbuf_addstr(events, modifier);
747 			RETURN_IF_NON_ZERO(ret);
748 		}
749 	}
750 	if (!no_group && group_events) {
751 		ret = strbuf_addf(events, "}:W");
752 		RETURN_IF_NON_ZERO(ret);
753 	}
754 	if (has_tool_events) {
755 		int i;
756 
757 		perf_tool_event__for_each_event(i) {
758 			if (tool_events[i]) {
759 				if (!no_group) {
760 					ret = strbuf_addch(events, ',');
761 					RETURN_IF_NON_ZERO(ret);
762 				}
763 				no_group = false;
764 				ret = strbuf_addstr(events, perf_tool_event__to_str(i));
765 				RETURN_IF_NON_ZERO(ret);
766 			}
767 		}
768 	}
769 
770 	return ret;
771 #undef RETURN_IF_NON_ZERO
772 }
773 
774 int __weak arch_get_runtimeparam(const struct pmu_metric *pm __maybe_unused)
775 {
776 	return 1;
777 }
778 
779 /*
780  * A singly linked list on the stack of the names of metrics being
781  * processed. Used to identify recursion.
782  */
783 struct visited_metric {
784 	const char *name;
785 	const struct visited_metric *parent;
786 };
787 
788 struct metricgroup_add_iter_data {
789 	struct list_head *metric_list;
790 	const char *pmu;
791 	const char *metric_name;
792 	const char *modifier;
793 	int *ret;
794 	bool *has_match;
795 	bool metric_no_group;
796 	bool metric_no_threshold;
797 	const char *user_requested_cpu_list;
798 	bool system_wide;
799 	struct metric *root_metric;
800 	const struct visited_metric *visited;
801 	const struct pmu_metrics_table *table;
802 };
803 
804 static bool metricgroup__find_metric(const char *pmu,
805 				     const char *metric,
806 				     const struct pmu_metrics_table *table,
807 				     struct pmu_metric *pm);
808 
809 static int add_metric(struct list_head *metric_list,
810 		      const struct pmu_metric *pm,
811 		      const char *modifier,
812 		      bool metric_no_group,
813 		      bool metric_no_threshold,
814 		      const char *user_requested_cpu_list,
815 		      bool system_wide,
816 		      struct metric *root_metric,
817 		      const struct visited_metric *visited,
818 		      const struct pmu_metrics_table *table);
819 
820 /**
821  * resolve_metric - Locate metrics within the root metric and recursively add
822  *                    references to them.
823  * @metric_list: The list the metric is added to.
824  * @pmu: The PMU name to resolve metrics on, or "all" for all PMUs.
825  * @modifier: if non-null event modifiers like "u".
826  * @metric_no_group: Should events written to events be grouped "{}" or
827  *                   global. Grouping is the default but due to multiplexing the
828  *                   user may override.
829  * @user_requested_cpu_list: Command line specified CPUs to record on.
830  * @system_wide: Are events for all processes recorded.
831  * @root_metric: Metrics may reference other metrics to form a tree. In this
832  *               case the root_metric holds all the IDs and a list of referenced
833  *               metrics. When adding a root this argument is NULL.
834  * @visited: A singly linked list of metric names being added that is used to
835  *           detect recursion.
836  * @table: The table that is searched for metrics, most commonly the table for the
837  *       architecture perf is running upon.
838  */
839 static int resolve_metric(struct list_head *metric_list,
840 			  const char *pmu,
841 			  const char *modifier,
842 			  bool metric_no_group,
843 			  bool metric_no_threshold,
844 			  const char *user_requested_cpu_list,
845 			  bool system_wide,
846 			  struct metric *root_metric,
847 			  const struct visited_metric *visited,
848 			  const struct pmu_metrics_table *table)
849 {
850 	struct hashmap_entry *cur;
851 	size_t bkt;
852 	struct to_resolve {
853 		/* The metric to resolve. */
854 		struct pmu_metric pm;
855 		/*
856 		 * The key in the IDs map, this may differ from in case,
857 		 * etc. from pm->metric_name.
858 		 */
859 		const char *key;
860 	} *pending = NULL;
861 	int i, ret = 0, pending_cnt = 0;
862 
863 	/*
864 	 * Iterate all the parsed IDs and if there's a matching metric and it to
865 	 * the pending array.
866 	 */
867 	hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
868 		struct pmu_metric pm;
869 
870 		if (metricgroup__find_metric(pmu, cur->pkey, table, &pm)) {
871 			pending = realloc(pending,
872 					(pending_cnt + 1) * sizeof(struct to_resolve));
873 			if (!pending)
874 				return -ENOMEM;
875 
876 			memcpy(&pending[pending_cnt].pm, &pm, sizeof(pm));
877 			pending[pending_cnt].key = cur->pkey;
878 			pending_cnt++;
879 		}
880 	}
881 
882 	/* Remove the metric IDs from the context. */
883 	for (i = 0; i < pending_cnt; i++)
884 		expr__del_id(root_metric->pctx, pending[i].key);
885 
886 	/*
887 	 * Recursively add all the metrics, IDs are added to the root metric's
888 	 * context.
889 	 */
890 	for (i = 0; i < pending_cnt; i++) {
891 		ret = add_metric(metric_list, &pending[i].pm, modifier, metric_no_group,
892 				 metric_no_threshold, user_requested_cpu_list, system_wide,
893 				 root_metric, visited, table);
894 		if (ret)
895 			break;
896 	}
897 
898 	free(pending);
899 	return ret;
900 }
901 
902 /**
903  * __add_metric - Add a metric to metric_list.
904  * @metric_list: The list the metric is added to.
905  * @pm: The pmu_metric containing the metric to be added.
906  * @modifier: if non-null event modifiers like "u".
907  * @metric_no_group: Should events written to events be grouped "{}" or
908  *                   global. Grouping is the default but due to multiplexing the
909  *                   user may override.
910  * @metric_no_threshold: Should threshold expressions be ignored?
911  * @runtime: A special argument for the parser only known at runtime.
912  * @user_requested_cpu_list: Command line specified CPUs to record on.
913  * @system_wide: Are events for all processes recorded.
914  * @root_metric: Metrics may reference other metrics to form a tree. In this
915  *               case the root_metric holds all the IDs and a list of referenced
916  *               metrics. When adding a root this argument is NULL.
917  * @visited: A singly linked list of metric names being added that is used to
918  *           detect recursion.
919  * @table: The table that is searched for metrics, most commonly the table for the
920  *       architecture perf is running upon.
921  */
922 static int __add_metric(struct list_head *metric_list,
923 			const struct pmu_metric *pm,
924 			const char *modifier,
925 			bool metric_no_group,
926 			bool metric_no_threshold,
927 			int runtime,
928 			const char *user_requested_cpu_list,
929 			bool system_wide,
930 			struct metric *root_metric,
931 			const struct visited_metric *visited,
932 			const struct pmu_metrics_table *table)
933 {
934 	const struct visited_metric *vm;
935 	int ret;
936 	bool is_root = !root_metric;
937 	const char *expr;
938 	struct visited_metric visited_node = {
939 		.name = pm->metric_name,
940 		.parent = visited,
941 	};
942 
943 	for (vm = visited; vm; vm = vm->parent) {
944 		if (!strcmp(pm->metric_name, vm->name)) {
945 			pr_err("failed: recursion detected for %s\n", pm->metric_name);
946 			return -1;
947 		}
948 	}
949 
950 	if (is_root) {
951 		/*
952 		 * This metric is the root of a tree and may reference other
953 		 * metrics that are added recursively.
954 		 */
955 		root_metric = metric__new(pm, modifier, metric_no_group, runtime,
956 					  user_requested_cpu_list, system_wide);
957 		if (!root_metric)
958 			return -ENOMEM;
959 
960 	} else {
961 		int cnt = 0;
962 
963 		/*
964 		 * This metric was referenced in a metric higher in the
965 		 * tree. Check if the same metric is already resolved in the
966 		 * metric_refs list.
967 		 */
968 		if (root_metric->metric_refs) {
969 			for (; root_metric->metric_refs[cnt].metric_name; cnt++) {
970 				if (!strcmp(pm->metric_name,
971 					    root_metric->metric_refs[cnt].metric_name))
972 					return 0;
973 			}
974 		}
975 
976 		/* Create reference. Need space for the entry and the terminator. */
977 		root_metric->metric_refs = realloc(root_metric->metric_refs,
978 						(cnt + 2) * sizeof(struct metric_ref));
979 		if (!root_metric->metric_refs)
980 			return -ENOMEM;
981 
982 		/*
983 		 * Intentionally passing just const char pointers,
984 		 * from 'pe' object, so they never go away. We don't
985 		 * need to change them, so there's no need to create
986 		 * our own copy.
987 		 */
988 		root_metric->metric_refs[cnt].metric_name = pm->metric_name;
989 		root_metric->metric_refs[cnt].metric_expr = pm->metric_expr;
990 
991 		/* Null terminate array. */
992 		root_metric->metric_refs[cnt+1].metric_name = NULL;
993 		root_metric->metric_refs[cnt+1].metric_expr = NULL;
994 	}
995 
996 	/*
997 	 * For both the parent and referenced metrics, we parse
998 	 * all the metric's IDs and add it to the root context.
999 	 */
1000 	ret = 0;
1001 	expr = pm->metric_expr;
1002 	if (is_root && pm->metric_threshold) {
1003 		/*
1004 		 * Threshold expressions are built off the actual metric. Switch
1005 		 * to use that in case of additional necessary events. Change
1006 		 * the visited node name to avoid this being flagged as
1007 		 * recursion. If the threshold events are disabled, just use the
1008 		 * metric's name as a reference. This allows metric threshold
1009 		 * computation if there are sufficient events.
1010 		 */
1011 		assert(strstr(pm->metric_threshold, pm->metric_name));
1012 		expr = metric_no_threshold ? pm->metric_name : pm->metric_threshold;
1013 		visited_node.name = "__threshold__";
1014 	}
1015 	if (expr__find_ids(expr, NULL, root_metric->pctx) < 0) {
1016 		/* Broken metric. */
1017 		ret = -EINVAL;
1018 	}
1019 	if (!ret) {
1020 		/* Resolve referenced metrics. */
1021 		const char *pmu = pm->pmu ?: "cpu";
1022 
1023 		ret = resolve_metric(metric_list, pmu, modifier, metric_no_group,
1024 				     metric_no_threshold, user_requested_cpu_list,
1025 				     system_wide, root_metric, &visited_node,
1026 				     table);
1027 	}
1028 	if (ret) {
1029 		if (is_root)
1030 			metric__free(root_metric);
1031 
1032 	} else if (is_root)
1033 		list_add(&root_metric->nd, metric_list);
1034 
1035 	return ret;
1036 }
1037 
1038 struct metricgroup__find_metric_data {
1039 	const char *pmu;
1040 	const char *metric;
1041 	struct pmu_metric *pm;
1042 };
1043 
1044 static int metricgroup__find_metric_callback(const struct pmu_metric *pm,
1045 					     const struct pmu_metrics_table *table  __maybe_unused,
1046 					     void *vdata)
1047 {
1048 	struct metricgroup__find_metric_data *data = vdata;
1049 	const char *pm_pmu = pm->pmu ?: "cpu";
1050 
1051 	if (strcmp(data->pmu, "all") && strcmp(pm_pmu, data->pmu))
1052 		return 0;
1053 
1054 	if (!match_metric(pm->metric_name, data->metric))
1055 		return 0;
1056 
1057 	memcpy(data->pm, pm, sizeof(*pm));
1058 	return 1;
1059 }
1060 
1061 static bool metricgroup__find_metric(const char *pmu,
1062 				     const char *metric,
1063 				     const struct pmu_metrics_table *table,
1064 				     struct pmu_metric *pm)
1065 {
1066 	struct metricgroup__find_metric_data data = {
1067 		.pmu = pmu,
1068 		.metric = metric,
1069 		.pm = pm,
1070 	};
1071 
1072 	return pmu_metrics_table__for_each_metric(table, metricgroup__find_metric_callback, &data)
1073 		? true : false;
1074 }
1075 
1076 static int add_metric(struct list_head *metric_list,
1077 		      const struct pmu_metric *pm,
1078 		      const char *modifier,
1079 		      bool metric_no_group,
1080 		      bool metric_no_threshold,
1081 		      const char *user_requested_cpu_list,
1082 		      bool system_wide,
1083 		      struct metric *root_metric,
1084 		      const struct visited_metric *visited,
1085 		      const struct pmu_metrics_table *table)
1086 {
1087 	int ret = 0;
1088 
1089 	pr_debug("metric expr %s for %s\n", pm->metric_expr, pm->metric_name);
1090 
1091 	if (!strstr(pm->metric_expr, "?")) {
1092 		ret = __add_metric(metric_list, pm, modifier, metric_no_group,
1093 				   metric_no_threshold, 0, user_requested_cpu_list,
1094 				   system_wide, root_metric, visited, table);
1095 	} else {
1096 		int j, count;
1097 
1098 		count = arch_get_runtimeparam(pm);
1099 
1100 		/* This loop is added to create multiple
1101 		 * events depend on count value and add
1102 		 * those events to metric_list.
1103 		 */
1104 
1105 		for (j = 0; j < count && !ret; j++)
1106 			ret = __add_metric(metric_list, pm, modifier, metric_no_group,
1107 					   metric_no_threshold, j, user_requested_cpu_list,
1108 					   system_wide, root_metric, visited, table);
1109 	}
1110 
1111 	return ret;
1112 }
1113 
1114 static int metricgroup__add_metric_sys_event_iter(const struct pmu_metric *pm,
1115 					const struct pmu_metrics_table *table __maybe_unused,
1116 					void *data)
1117 {
1118 	struct metricgroup_add_iter_data *d = data;
1119 	int ret;
1120 
1121 	if (!match_pm_metric(pm, d->pmu, d->metric_name))
1122 		return 0;
1123 
1124 	ret = add_metric(d->metric_list, pm, d->modifier, d->metric_no_group,
1125 			 d->metric_no_threshold, d->user_requested_cpu_list,
1126 			 d->system_wide, d->root_metric, d->visited, d->table);
1127 	if (ret)
1128 		goto out;
1129 
1130 	*(d->has_match) = true;
1131 
1132 out:
1133 	*(d->ret) = ret;
1134 	return ret;
1135 }
1136 
1137 /**
1138  * metric_list_cmp - list_sort comparator that sorts metrics with more events to
1139  *                   the front. tool events are excluded from the count.
1140  */
1141 static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
1142 			   const struct list_head *r)
1143 {
1144 	const struct metric *left = container_of(l, struct metric, nd);
1145 	const struct metric *right = container_of(r, struct metric, nd);
1146 	struct expr_id_data *data;
1147 	int i, left_count, right_count;
1148 
1149 	left_count = hashmap__size(left->pctx->ids);
1150 	perf_tool_event__for_each_event(i) {
1151 		if (!expr__get_id(left->pctx, perf_tool_event__to_str(i), &data))
1152 			left_count--;
1153 	}
1154 
1155 	right_count = hashmap__size(right->pctx->ids);
1156 	perf_tool_event__for_each_event(i) {
1157 		if (!expr__get_id(right->pctx, perf_tool_event__to_str(i), &data))
1158 			right_count--;
1159 	}
1160 
1161 	return right_count - left_count;
1162 }
1163 
1164 /**
1165  * default_metricgroup_cmp - Implements complex key for the Default metricgroup
1166  *			     that first sorts by default_metricgroup_name, then
1167  *			     metric_name.
1168  */
1169 static int default_metricgroup_cmp(void *priv __maybe_unused,
1170 				   const struct list_head *l,
1171 				   const struct list_head *r)
1172 {
1173 	const struct metric *left = container_of(l, struct metric, nd);
1174 	const struct metric *right = container_of(r, struct metric, nd);
1175 	int diff = strcmp(right->default_metricgroup_name, left->default_metricgroup_name);
1176 
1177 	if (diff)
1178 		return diff;
1179 
1180 	return strcmp(right->metric_name, left->metric_name);
1181 }
1182 
1183 struct metricgroup__add_metric_data {
1184 	struct list_head *list;
1185 	const char *pmu;
1186 	const char *metric_name;
1187 	const char *modifier;
1188 	const char *user_requested_cpu_list;
1189 	bool metric_no_group;
1190 	bool metric_no_threshold;
1191 	bool system_wide;
1192 	bool has_match;
1193 };
1194 
1195 static int metricgroup__add_metric_callback(const struct pmu_metric *pm,
1196 					    const struct pmu_metrics_table *table,
1197 					    void *vdata)
1198 {
1199 	struct metricgroup__add_metric_data *data = vdata;
1200 	int ret = 0;
1201 
1202 	if (pm->metric_expr && match_pm_metric(pm, data->pmu, data->metric_name)) {
1203 		bool metric_no_group = data->metric_no_group ||
1204 			match_metric(pm->metricgroup_no_group, data->metric_name);
1205 
1206 		data->has_match = true;
1207 		ret = add_metric(data->list, pm, data->modifier, metric_no_group,
1208 				 data->metric_no_threshold, data->user_requested_cpu_list,
1209 				 data->system_wide, /*root_metric=*/NULL,
1210 				 /*visited_metrics=*/NULL, table);
1211 	}
1212 	return ret;
1213 }
1214 
1215 /**
1216  * metricgroup__add_metric - Find and add a metric, or a metric group.
1217  * @pmu: The PMU name to search for metrics on, or "all" for all PMUs.
1218  * @metric_name: The name of the metric or metric group. For example, "IPC"
1219  *               could be the name of a metric and "TopDownL1" the name of a
1220  *               metric group.
1221  * @modifier: if non-null event modifiers like "u".
1222  * @metric_no_group: Should events written to events be grouped "{}" or
1223  *                   global. Grouping is the default but due to multiplexing the
1224  *                   user may override.
1225  * @user_requested_cpu_list: Command line specified CPUs to record on.
1226  * @system_wide: Are events for all processes recorded.
1227  * @metric_list: The list that the metric or metric group are added to.
1228  * @table: The table that is searched for metrics, most commonly the table for the
1229  *       architecture perf is running upon.
1230  */
1231 static int metricgroup__add_metric(const char *pmu, const char *metric_name, const char *modifier,
1232 				   bool metric_no_group, bool metric_no_threshold,
1233 				   const char *user_requested_cpu_list,
1234 				   bool system_wide,
1235 				   struct list_head *metric_list,
1236 				   const struct pmu_metrics_table *table)
1237 {
1238 	LIST_HEAD(list);
1239 	int ret;
1240 	bool has_match = false;
1241 
1242 	{
1243 		struct metricgroup__add_metric_data data = {
1244 			.list = &list,
1245 			.pmu = pmu,
1246 			.metric_name = metric_name,
1247 			.modifier = modifier,
1248 			.metric_no_group = metric_no_group,
1249 			.metric_no_threshold = metric_no_threshold,
1250 			.user_requested_cpu_list = user_requested_cpu_list,
1251 			.system_wide = system_wide,
1252 			.has_match = false,
1253 		};
1254 		/*
1255 		 * Iterate over all metrics seeing if metric matches either the
1256 		 * name or group. When it does add the metric to the list.
1257 		 */
1258 		ret = pmu_metrics_table__for_each_metric(table, metricgroup__add_metric_callback,
1259 						       &data);
1260 		if (ret)
1261 			goto out;
1262 
1263 		has_match = data.has_match;
1264 	}
1265 	{
1266 		struct metricgroup_iter_data data = {
1267 			.fn = metricgroup__add_metric_sys_event_iter,
1268 			.data = (void *) &(struct metricgroup_add_iter_data) {
1269 				.metric_list = &list,
1270 				.pmu = pmu,
1271 				.metric_name = metric_name,
1272 				.modifier = modifier,
1273 				.metric_no_group = metric_no_group,
1274 				.user_requested_cpu_list = user_requested_cpu_list,
1275 				.system_wide = system_wide,
1276 				.has_match = &has_match,
1277 				.ret = &ret,
1278 				.table = table,
1279 			},
1280 		};
1281 
1282 		pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data);
1283 	}
1284 	/* End of pmu events. */
1285 	if (!has_match)
1286 		ret = -EINVAL;
1287 
1288 out:
1289 	/*
1290 	 * add to metric_list so that they can be released
1291 	 * even if it's failed
1292 	 */
1293 	list_splice(&list, metric_list);
1294 	return ret;
1295 }
1296 
1297 /**
1298  * metricgroup__add_metric_list - Find and add metrics, or metric groups,
1299  *                                specified in a list.
1300  * @pmu: A pmu to restrict the metrics to, or "all" for all PMUS.
1301  * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1"
1302  *        would match the IPC and CPI metrics, and TopDownL1 would match all
1303  *        the metrics in the TopDownL1 group.
1304  * @metric_no_group: Should events written to events be grouped "{}" or
1305  *                   global. Grouping is the default but due to multiplexing the
1306  *                   user may override.
1307  * @user_requested_cpu_list: Command line specified CPUs to record on.
1308  * @system_wide: Are events for all processes recorded.
1309  * @metric_list: The list that metrics are added to.
1310  * @table: The table that is searched for metrics, most commonly the table for the
1311  *       architecture perf is running upon.
1312  */
1313 static int metricgroup__add_metric_list(const char *pmu, const char *list,
1314 					bool metric_no_group,
1315 					bool metric_no_threshold,
1316 					const char *user_requested_cpu_list,
1317 					bool system_wide, struct list_head *metric_list,
1318 					const struct pmu_metrics_table *table)
1319 {
1320 	char *list_itr, *list_copy, *metric_name, *modifier;
1321 	int ret, count = 0;
1322 
1323 	list_copy = strdup(list);
1324 	if (!list_copy)
1325 		return -ENOMEM;
1326 	list_itr = list_copy;
1327 
1328 	while ((metric_name = strsep(&list_itr, ",")) != NULL) {
1329 		modifier = strchr(metric_name, ':');
1330 		if (modifier)
1331 			*modifier++ = '\0';
1332 
1333 		ret = metricgroup__add_metric(pmu, metric_name, modifier,
1334 					      metric_no_group, metric_no_threshold,
1335 					      user_requested_cpu_list,
1336 					      system_wide, metric_list, table);
1337 		if (ret == -EINVAL)
1338 			pr_err("Cannot find metric or group `%s'\n", metric_name);
1339 
1340 		if (ret)
1341 			break;
1342 
1343 		count++;
1344 	}
1345 	free(list_copy);
1346 
1347 	if (!ret) {
1348 		/*
1349 		 * Warn about nmi_watchdog if any parsed metrics had the
1350 		 * NO_NMI_WATCHDOG constraint.
1351 		 */
1352 		metric__watchdog_constraint_hint(NULL, /*foot=*/true);
1353 		/* No metrics. */
1354 		if (count == 0)
1355 			return -EINVAL;
1356 	}
1357 	return ret;
1358 }
1359 
1360 static void metricgroup__free_metrics(struct list_head *metric_list)
1361 {
1362 	struct metric *m, *tmp;
1363 
1364 	list_for_each_entry_safe (m, tmp, metric_list, nd) {
1365 		list_del_init(&m->nd);
1366 		metric__free(m);
1367 	}
1368 }
1369 
1370 /**
1371  * find_tool_events - Search for the pressence of tool events in metric_list.
1372  * @metric_list: List to take metrics from.
1373  * @tool_events: Array of false values, indices corresponding to tool events set
1374  *               to true if tool event is found.
1375  */
1376 static void find_tool_events(const struct list_head *metric_list,
1377 			     bool tool_events[PERF_TOOL_MAX])
1378 {
1379 	struct metric *m;
1380 
1381 	list_for_each_entry(m, metric_list, nd) {
1382 		int i;
1383 
1384 		perf_tool_event__for_each_event(i) {
1385 			struct expr_id_data *data;
1386 
1387 			if (!tool_events[i] &&
1388 			    !expr__get_id(m->pctx, perf_tool_event__to_str(i), &data))
1389 				tool_events[i] = true;
1390 		}
1391 	}
1392 }
1393 
1394 /**
1395  * build_combined_expr_ctx - Make an expr_parse_ctx with all !group_events
1396  *                           metric IDs, as the IDs are held in a set,
1397  *                           duplicates will be removed.
1398  * @metric_list: List to take metrics from.
1399  * @combined: Out argument for result.
1400  */
1401 static int build_combined_expr_ctx(const struct list_head *metric_list,
1402 				   struct expr_parse_ctx **combined)
1403 {
1404 	struct hashmap_entry *cur;
1405 	size_t bkt;
1406 	struct metric *m;
1407 	char *dup;
1408 	int ret;
1409 
1410 	*combined = expr__ctx_new();
1411 	if (!*combined)
1412 		return -ENOMEM;
1413 
1414 	list_for_each_entry(m, metric_list, nd) {
1415 		if (!m->group_events && !m->modifier) {
1416 			hashmap__for_each_entry(m->pctx->ids, cur, bkt) {
1417 				dup = strdup(cur->pkey);
1418 				if (!dup) {
1419 					ret = -ENOMEM;
1420 					goto err_out;
1421 				}
1422 				ret = expr__add_id(*combined, dup);
1423 				if (ret)
1424 					goto err_out;
1425 			}
1426 		}
1427 	}
1428 	return 0;
1429 err_out:
1430 	expr__ctx_free(*combined);
1431 	*combined = NULL;
1432 	return ret;
1433 }
1434 
1435 /**
1436  * parse_ids - Build the event string for the ids and parse them creating an
1437  *             evlist. The encoded metric_ids are decoded.
1438  * @metric_no_merge: is metric sharing explicitly disabled.
1439  * @fake_pmu: used when testing metrics not supported by the current CPU.
1440  * @ids: the event identifiers parsed from a metric.
1441  * @modifier: any modifiers added to the events.
1442  * @group_events: should events be placed in a weak group.
1443  * @tool_events: entries set true if the tool event of index could be present in
1444  *               the overall list of metrics.
1445  * @out_evlist: the created list of events.
1446  */
1447 static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu,
1448 		     struct expr_parse_ctx *ids, const char *modifier,
1449 		     bool group_events, const bool tool_events[PERF_TOOL_MAX],
1450 		     struct evlist **out_evlist)
1451 {
1452 	struct parse_events_error parse_error;
1453 	struct evlist *parsed_evlist;
1454 	struct strbuf events = STRBUF_INIT;
1455 	int ret;
1456 
1457 	*out_evlist = NULL;
1458 	if (!metric_no_merge || hashmap__size(ids->ids) == 0) {
1459 		bool added_event = false;
1460 		int i;
1461 		/*
1462 		 * We may fail to share events between metrics because a tool
1463 		 * event isn't present in one metric. For example, a ratio of
1464 		 * cache misses doesn't need duration_time but the same events
1465 		 * may be used for a misses per second. Events without sharing
1466 		 * implies multiplexing, that is best avoided, so place
1467 		 * all tool events in every group.
1468 		 *
1469 		 * Also, there may be no ids/events in the expression parsing
1470 		 * context because of constant evaluation, e.g.:
1471 		 *    event1 if #smt_on else 0
1472 		 * Add a tool event to avoid a parse error on an empty string.
1473 		 */
1474 		perf_tool_event__for_each_event(i) {
1475 			if (tool_events[i]) {
1476 				char *tmp = strdup(perf_tool_event__to_str(i));
1477 
1478 				if (!tmp)
1479 					return -ENOMEM;
1480 				ids__insert(ids->ids, tmp);
1481 				added_event = true;
1482 			}
1483 		}
1484 		if (!added_event && hashmap__size(ids->ids) == 0) {
1485 			char *tmp = strdup("duration_time");
1486 
1487 			if (!tmp)
1488 				return -ENOMEM;
1489 			ids__insert(ids->ids, tmp);
1490 		}
1491 	}
1492 	ret = metricgroup__build_event_string(&events, ids, modifier,
1493 					      group_events);
1494 	if (ret)
1495 		return ret;
1496 
1497 	parsed_evlist = evlist__new();
1498 	if (!parsed_evlist) {
1499 		ret = -ENOMEM;
1500 		goto err_out;
1501 	}
1502 	pr_debug("Parsing metric events '%s'\n", events.buf);
1503 	parse_events_error__init(&parse_error);
1504 	ret = __parse_events(parsed_evlist, events.buf, /*pmu_filter=*/NULL,
1505 			     &parse_error, fake_pmu, /*warn_if_reordered=*/false,
1506 			     /*fake_tp=*/false);
1507 	if (ret) {
1508 		parse_events_error__print(&parse_error, events.buf);
1509 		goto err_out;
1510 	}
1511 	ret = decode_all_metric_ids(parsed_evlist, modifier);
1512 	if (ret)
1513 		goto err_out;
1514 
1515 	*out_evlist = parsed_evlist;
1516 	parsed_evlist = NULL;
1517 err_out:
1518 	parse_events_error__exit(&parse_error);
1519 	evlist__delete(parsed_evlist);
1520 	strbuf_release(&events);
1521 	return ret;
1522 }
1523 
1524 static int parse_groups(struct evlist *perf_evlist,
1525 			const char *pmu, const char *str,
1526 			bool metric_no_group,
1527 			bool metric_no_merge,
1528 			bool metric_no_threshold,
1529 			const char *user_requested_cpu_list,
1530 			bool system_wide,
1531 			struct perf_pmu *fake_pmu,
1532 			struct rblist *metric_events_list,
1533 			const struct pmu_metrics_table *table)
1534 {
1535 	struct evlist *combined_evlist = NULL;
1536 	LIST_HEAD(metric_list);
1537 	struct metric *m;
1538 	bool tool_events[PERF_TOOL_MAX] = {false};
1539 	bool is_default = !strcmp(str, "Default");
1540 	int ret;
1541 
1542 	if (metric_events_list->nr_entries == 0)
1543 		metricgroup__rblist_init(metric_events_list);
1544 	ret = metricgroup__add_metric_list(pmu, str, metric_no_group, metric_no_threshold,
1545 					   user_requested_cpu_list,
1546 					   system_wide, &metric_list, table);
1547 	if (ret)
1548 		goto out;
1549 
1550 	/* Sort metrics from largest to smallest. */
1551 	list_sort(NULL, &metric_list, metric_list_cmp);
1552 
1553 	if (!metric_no_merge) {
1554 		struct expr_parse_ctx *combined = NULL;
1555 
1556 		find_tool_events(&metric_list, tool_events);
1557 
1558 		ret = build_combined_expr_ctx(&metric_list, &combined);
1559 
1560 		if (!ret && combined && hashmap__size(combined->ids)) {
1561 			ret = parse_ids(metric_no_merge, fake_pmu, combined,
1562 					/*modifier=*/NULL,
1563 					/*group_events=*/false,
1564 					tool_events,
1565 					&combined_evlist);
1566 		}
1567 		if (combined)
1568 			expr__ctx_free(combined);
1569 
1570 		if (ret)
1571 			goto out;
1572 	}
1573 
1574 	if (is_default)
1575 		list_sort(NULL, &metric_list, default_metricgroup_cmp);
1576 
1577 	list_for_each_entry(m, &metric_list, nd) {
1578 		struct metric_event *me;
1579 		struct evsel **metric_events;
1580 		struct evlist *metric_evlist = NULL;
1581 		struct metric *n;
1582 		struct metric_expr *expr;
1583 
1584 		if (combined_evlist && !m->group_events) {
1585 			metric_evlist = combined_evlist;
1586 		} else if (!metric_no_merge) {
1587 			/*
1588 			 * See if the IDs for this metric are a subset of an
1589 			 * earlier metric.
1590 			 */
1591 			list_for_each_entry(n, &metric_list, nd) {
1592 				if (m == n)
1593 					break;
1594 
1595 				if (n->evlist == NULL)
1596 					continue;
1597 
1598 				if ((!m->modifier && n->modifier) ||
1599 				    (m->modifier && !n->modifier) ||
1600 				    (m->modifier && n->modifier &&
1601 					    strcmp(m->modifier, n->modifier)))
1602 					continue;
1603 
1604 				if ((!m->pmu && n->pmu) ||
1605 				    (m->pmu && !n->pmu) ||
1606 				    (m->pmu && n->pmu && strcmp(m->pmu, n->pmu)))
1607 					continue;
1608 
1609 				if (expr__subset_of_ids(n->pctx, m->pctx)) {
1610 					pr_debug("Events in '%s' fully contained within '%s'\n",
1611 						 m->metric_name, n->metric_name);
1612 					metric_evlist = n->evlist;
1613 					break;
1614 				}
1615 
1616 			}
1617 		}
1618 		if (!metric_evlist) {
1619 			ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier,
1620 					m->group_events, tool_events, &m->evlist);
1621 			if (ret)
1622 				goto out;
1623 
1624 			metric_evlist = m->evlist;
1625 		}
1626 		ret = setup_metric_events(fake_pmu ? "all" : m->pmu, m->pctx->ids,
1627 					  metric_evlist, &metric_events);
1628 		if (ret) {
1629 			pr_err("Cannot resolve IDs for %s: %s\n",
1630 				m->metric_name, m->metric_expr);
1631 			goto out;
1632 		}
1633 
1634 		me = metricgroup__lookup(metric_events_list, metric_events[0], true);
1635 
1636 		expr = malloc(sizeof(struct metric_expr));
1637 		if (!expr) {
1638 			ret = -ENOMEM;
1639 			free(metric_events);
1640 			goto out;
1641 		}
1642 
1643 		expr->metric_refs = m->metric_refs;
1644 		m->metric_refs = NULL;
1645 		expr->metric_expr = m->metric_expr;
1646 		if (m->modifier) {
1647 			char *tmp;
1648 
1649 			if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0)
1650 				expr->metric_name = NULL;
1651 			else
1652 				expr->metric_name = tmp;
1653 		} else
1654 			expr->metric_name = strdup(m->metric_name);
1655 
1656 		if (!expr->metric_name) {
1657 			ret = -ENOMEM;
1658 			free(metric_events);
1659 			goto out;
1660 		}
1661 		expr->metric_threshold = m->metric_threshold;
1662 		expr->metric_unit = m->metric_unit;
1663 		expr->metric_events = metric_events;
1664 		expr->runtime = m->pctx->sctx.runtime;
1665 		expr->default_metricgroup_name = m->default_metricgroup_name;
1666 		me->is_default = is_default;
1667 		list_add(&expr->nd, &me->head);
1668 	}
1669 
1670 
1671 	if (combined_evlist) {
1672 		evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries);
1673 		evlist__delete(combined_evlist);
1674 	}
1675 
1676 	list_for_each_entry(m, &metric_list, nd) {
1677 		if (m->evlist)
1678 			evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries);
1679 	}
1680 
1681 out:
1682 	metricgroup__free_metrics(&metric_list);
1683 	return ret;
1684 }
1685 
1686 int metricgroup__parse_groups(struct evlist *perf_evlist,
1687 			      const char *pmu,
1688 			      const char *str,
1689 			      bool metric_no_group,
1690 			      bool metric_no_merge,
1691 			      bool metric_no_threshold,
1692 			      const char *user_requested_cpu_list,
1693 			      bool system_wide,
1694 			      bool hardware_aware_grouping,
1695 			      struct rblist *metric_events)
1696 {
1697 	const struct pmu_metrics_table *table = pmu_metrics_table__find();
1698 
1699 	if (!table)
1700 		return -EINVAL;
1701 	if (hardware_aware_grouping)
1702 		pr_debug("Use hardware aware grouping instead of traditional metric grouping method\n");
1703 
1704 	return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge,
1705 			    metric_no_threshold, user_requested_cpu_list, system_wide,
1706 			    /*fake_pmu=*/NULL, metric_events, table);
1707 }
1708 
1709 int metricgroup__parse_groups_test(struct evlist *evlist,
1710 				   const struct pmu_metrics_table *table,
1711 				   const char *str,
1712 				   struct rblist *metric_events)
1713 {
1714 	return parse_groups(evlist, "all", str,
1715 			    /*metric_no_group=*/false,
1716 			    /*metric_no_merge=*/false,
1717 			    /*metric_no_threshold=*/false,
1718 			    /*user_requested_cpu_list=*/NULL,
1719 			    /*system_wide=*/false,
1720 			    &perf_pmu__fake, metric_events, table);
1721 }
1722 
1723 struct metricgroup__has_metric_data {
1724 	const char *pmu;
1725 	const char *metric;
1726 };
1727 static int metricgroup__has_metric_callback(const struct pmu_metric *pm,
1728 					    const struct pmu_metrics_table *table __maybe_unused,
1729 					    void *vdata)
1730 {
1731 	struct metricgroup__has_metric_data *data = vdata;
1732 
1733 	return match_pm_metric(pm, data->pmu, data->metric) ? 1 : 0;
1734 }
1735 
1736 bool metricgroup__has_metric(const char *pmu, const char *metric)
1737 {
1738 	const struct pmu_metrics_table *table = pmu_metrics_table__find();
1739 	struct metricgroup__has_metric_data data = {
1740 		.pmu = pmu,
1741 		.metric = metric,
1742 	};
1743 
1744 	if (!table)
1745 		return false;
1746 
1747 	return pmu_metrics_table__for_each_metric(table, metricgroup__has_metric_callback, &data)
1748 		? true : false;
1749 }
1750 
1751 static int metricgroup__topdown_max_level_callback(const struct pmu_metric *pm,
1752 					    const struct pmu_metrics_table *table __maybe_unused,
1753 					    void *data)
1754 {
1755 	unsigned int *max_level = data;
1756 	unsigned int level;
1757 	const char *p = strstr(pm->metric_group ?: "", "TopdownL");
1758 
1759 	if (!p || p[8] == '\0')
1760 		return 0;
1761 
1762 	level = p[8] - '0';
1763 	if (level > *max_level)
1764 		*max_level = level;
1765 
1766 	return 0;
1767 }
1768 
1769 unsigned int metricgroups__topdown_max_level(void)
1770 {
1771 	unsigned int max_level = 0;
1772 	const struct pmu_metrics_table *table = pmu_metrics_table__find();
1773 
1774 	if (!table)
1775 		return false;
1776 
1777 	pmu_metrics_table__for_each_metric(table, metricgroup__topdown_max_level_callback,
1778 					  &max_level);
1779 	return max_level;
1780 }
1781 
1782 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1783 				    struct rblist *new_metric_events,
1784 				    struct rblist *old_metric_events)
1785 {
1786 	unsigned int i;
1787 
1788 	for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1789 		struct rb_node *nd;
1790 		struct metric_event *old_me, *new_me;
1791 		struct metric_expr *old_expr, *new_expr;
1792 		struct evsel *evsel;
1793 		size_t alloc_size;
1794 		int idx, nr;
1795 
1796 		nd = rblist__entry(old_metric_events, i);
1797 		old_me = container_of(nd, struct metric_event, nd);
1798 
1799 		evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx);
1800 		if (!evsel)
1801 			return -EINVAL;
1802 		new_me = metricgroup__lookup(new_metric_events, evsel, true);
1803 		if (!new_me)
1804 			return -ENOMEM;
1805 
1806 		pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1807 			 cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx);
1808 
1809 		list_for_each_entry(old_expr, &old_me->head, nd) {
1810 			new_expr = malloc(sizeof(*new_expr));
1811 			if (!new_expr)
1812 				return -ENOMEM;
1813 
1814 			new_expr->metric_expr = old_expr->metric_expr;
1815 			new_expr->metric_threshold = old_expr->metric_threshold;
1816 			new_expr->metric_name = strdup(old_expr->metric_name);
1817 			if (!new_expr->metric_name)
1818 				return -ENOMEM;
1819 
1820 			new_expr->metric_unit = old_expr->metric_unit;
1821 			new_expr->runtime = old_expr->runtime;
1822 
1823 			if (old_expr->metric_refs) {
1824 				/* calculate number of metric_events */
1825 				for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1826 					continue;
1827 				alloc_size = sizeof(*new_expr->metric_refs);
1828 				new_expr->metric_refs = calloc(nr + 1, alloc_size);
1829 				if (!new_expr->metric_refs) {
1830 					free(new_expr);
1831 					return -ENOMEM;
1832 				}
1833 
1834 				memcpy(new_expr->metric_refs, old_expr->metric_refs,
1835 				       nr * alloc_size);
1836 			} else {
1837 				new_expr->metric_refs = NULL;
1838 			}
1839 
1840 			/* calculate number of metric_events */
1841 			for (nr = 0; old_expr->metric_events[nr]; nr++)
1842 				continue;
1843 			alloc_size = sizeof(*new_expr->metric_events);
1844 			new_expr->metric_events = calloc(nr + 1, alloc_size);
1845 			if (!new_expr->metric_events) {
1846 				zfree(&new_expr->metric_refs);
1847 				free(new_expr);
1848 				return -ENOMEM;
1849 			}
1850 
1851 			/* copy evsel in the same position */
1852 			for (idx = 0; idx < nr; idx++) {
1853 				evsel = old_expr->metric_events[idx];
1854 				evsel = evlist__find_evsel(evlist, evsel->core.idx);
1855 				if (evsel == NULL) {
1856 					zfree(&new_expr->metric_events);
1857 					zfree(&new_expr->metric_refs);
1858 					free(new_expr);
1859 					return -EINVAL;
1860 				}
1861 				new_expr->metric_events[idx] = evsel;
1862 			}
1863 
1864 			list_add(&new_expr->nd, &new_me->head);
1865 		}
1866 	}
1867 	return 0;
1868 }
1869