xref: /linux/tools/perf/util/metricgroup.c (revision 6beeaf48db6c548fcfc2ad32739d33af2fef3a5b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2017, Intel Corporation.
4  */
5 
6 /* Manage metrics and groups of metrics from JSON files */
7 
8 #include "metricgroup.h"
9 #include "debug.h"
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "strbuf.h"
13 #include "pmu.h"
14 #include "expr.h"
15 #include "rblist.h"
16 #include <string.h>
17 #include <errno.h>
18 #include "strlist.h"
19 #include <assert.h>
20 #include <linux/ctype.h>
21 #include <linux/string.h>
22 #include <linux/zalloc.h>
23 #include <subcmd/parse-options.h>
24 #include <api/fs/fs.h>
25 #include "util.h"
26 #include <asm/bug.h>
27 #include "cgroup.h"
28 
29 struct metric_event *metricgroup__lookup(struct rblist *metric_events,
30 					 struct evsel *evsel,
31 					 bool create)
32 {
33 	struct rb_node *nd;
34 	struct metric_event me = {
35 		.evsel = evsel
36 	};
37 
38 	if (!metric_events)
39 		return NULL;
40 
41 	nd = rblist__find(metric_events, &me);
42 	if (nd)
43 		return container_of(nd, struct metric_event, nd);
44 	if (create) {
45 		rblist__add_node(metric_events, &me);
46 		nd = rblist__find(metric_events, &me);
47 		if (nd)
48 			return container_of(nd, struct metric_event, nd);
49 	}
50 	return NULL;
51 }
52 
53 static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
54 {
55 	struct metric_event *a = container_of(rb_node,
56 					      struct metric_event,
57 					      nd);
58 	const struct metric_event *b = entry;
59 
60 	if (a->evsel == b->evsel)
61 		return 0;
62 	if ((char *)a->evsel < (char *)b->evsel)
63 		return -1;
64 	return +1;
65 }
66 
67 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
68 					const void *entry)
69 {
70 	struct metric_event *me = malloc(sizeof(struct metric_event));
71 
72 	if (!me)
73 		return NULL;
74 	memcpy(me, entry, sizeof(struct metric_event));
75 	me->evsel = ((struct metric_event *)entry)->evsel;
76 	INIT_LIST_HEAD(&me->head);
77 	return &me->nd;
78 }
79 
80 static void metric_event_delete(struct rblist *rblist __maybe_unused,
81 				struct rb_node *rb_node)
82 {
83 	struct metric_event *me = container_of(rb_node, struct metric_event, nd);
84 	struct metric_expr *expr, *tmp;
85 
86 	list_for_each_entry_safe(expr, tmp, &me->head, nd) {
87 		free(expr->metric_refs);
88 		free(expr->metric_events);
89 		free(expr);
90 	}
91 
92 	free(me);
93 }
94 
95 static void metricgroup__rblist_init(struct rblist *metric_events)
96 {
97 	rblist__init(metric_events);
98 	metric_events->node_cmp = metric_event_cmp;
99 	metric_events->node_new = metric_event_new;
100 	metric_events->node_delete = metric_event_delete;
101 }
102 
103 void metricgroup__rblist_exit(struct rblist *metric_events)
104 {
105 	rblist__exit(metric_events);
106 }
107 
108 /*
109  * A node in the list of referenced metrics. metric_expr
110  * is held as a convenience to avoid a search through the
111  * metric list.
112  */
113 struct metric_ref_node {
114 	const char *metric_name;
115 	const char *metric_expr;
116 	struct list_head list;
117 };
118 
119 struct metric {
120 	struct list_head nd;
121 	struct expr_parse_ctx pctx;
122 	const char *metric_name;
123 	const char *metric_expr;
124 	const char *metric_unit;
125 	struct list_head metric_refs;
126 	int metric_refs_cnt;
127 	int runtime;
128 	bool has_constraint;
129 };
130 
131 #define RECURSION_ID_MAX 1000
132 
133 struct expr_ids {
134 	struct expr_id	id[RECURSION_ID_MAX];
135 	int		cnt;
136 };
137 
138 static struct expr_id *expr_ids__alloc(struct expr_ids *ids)
139 {
140 	if (ids->cnt >= RECURSION_ID_MAX)
141 		return NULL;
142 	return &ids->id[ids->cnt++];
143 }
144 
145 static void expr_ids__exit(struct expr_ids *ids)
146 {
147 	int i;
148 
149 	for (i = 0; i < ids->cnt; i++)
150 		free(ids->id[i].id);
151 }
152 
153 static bool contains_event(struct evsel **metric_events, int num_events,
154 			const char *event_name)
155 {
156 	int i;
157 
158 	for (i = 0; i < num_events; i++) {
159 		if (!strcmp(metric_events[i]->name, event_name))
160 			return true;
161 	}
162 	return false;
163 }
164 
165 static bool evsel_same_pmu_or_none(struct evsel *ev1, struct evsel *ev2)
166 {
167 	if (!ev1->pmu_name || !ev2->pmu_name)
168 		return true;
169 
170 	return !strcmp(ev1->pmu_name, ev2->pmu_name);
171 }
172 
173 /**
174  * Find a group of events in perf_evlist that correspond to those from a parsed
175  * metric expression. Note, as find_evsel_group is called in the same order as
176  * perf_evlist was constructed, metric_no_merge doesn't need to test for
177  * underfilling a group.
178  * @perf_evlist: a list of events something like: {metric1 leader, metric1
179  * sibling, metric1 sibling}:W,duration_time,{metric2 leader, metric2 sibling,
180  * metric2 sibling}:W,duration_time
181  * @pctx: the parse context for the metric expression.
182  * @metric_no_merge: don't attempt to share events for the metric with other
183  * metrics.
184  * @has_constraint: is there a constraint on the group of events? In which case
185  * the events won't be grouped.
186  * @metric_events: out argument, null terminated array of evsel's associated
187  * with the metric.
188  * @evlist_used: in/out argument, bitmap tracking which evlist events are used.
189  * @return the first metric event or NULL on failure.
190  */
191 static struct evsel *find_evsel_group(struct evlist *perf_evlist,
192 				      struct expr_parse_ctx *pctx,
193 				      bool metric_no_merge,
194 				      bool has_constraint,
195 				      struct evsel **metric_events,
196 				      unsigned long *evlist_used)
197 {
198 	struct evsel *ev, *current_leader = NULL;
199 	struct expr_id_data *val_ptr;
200 	int i = 0, matched_events = 0, events_to_match;
201 	const int idnum = (int)hashmap__size(&pctx->ids);
202 
203 	/*
204 	 * duration_time is always grouped separately, when events are grouped
205 	 * (ie has_constraint is false) then ignore it in the matching loop and
206 	 * add it to metric_events at the end.
207 	 */
208 	if (!has_constraint &&
209 	    hashmap__find(&pctx->ids, "duration_time", (void **)&val_ptr))
210 		events_to_match = idnum - 1;
211 	else
212 		events_to_match = idnum;
213 
214 	evlist__for_each_entry (perf_evlist, ev) {
215 		/*
216 		 * Events with a constraint aren't grouped and match the first
217 		 * events available.
218 		 */
219 		if (has_constraint && ev->weak_group)
220 			continue;
221 		/* Ignore event if already used and merging is disabled. */
222 		if (metric_no_merge && test_bit(ev->core.idx, evlist_used))
223 			continue;
224 		if (!has_constraint && !evsel__has_leader(ev, current_leader)) {
225 			/*
226 			 * Start of a new group, discard the whole match and
227 			 * start again.
228 			 */
229 			matched_events = 0;
230 			memset(metric_events, 0,
231 				sizeof(struct evsel *) * idnum);
232 			current_leader = evsel__leader(ev);
233 		}
234 		/*
235 		 * Check for duplicate events with the same name. For example,
236 		 * uncore_imc/cas_count_read/ will turn into 6 events per socket
237 		 * on skylakex. Only the first such event is placed in
238 		 * metric_events. If events aren't grouped then this also
239 		 * ensures that the same event in different sibling groups
240 		 * aren't both added to metric_events.
241 		 */
242 		if (contains_event(metric_events, matched_events, ev->name))
243 			continue;
244 		/* Does this event belong to the parse context? */
245 		if (hashmap__find(&pctx->ids, ev->name, (void **)&val_ptr))
246 			metric_events[matched_events++] = ev;
247 
248 		if (matched_events == events_to_match)
249 			break;
250 	}
251 
252 	if (events_to_match != idnum) {
253 		/* Add the first duration_time. */
254 		evlist__for_each_entry(perf_evlist, ev) {
255 			if (!strcmp(ev->name, "duration_time")) {
256 				metric_events[matched_events++] = ev;
257 				break;
258 			}
259 		}
260 	}
261 
262 	if (matched_events != idnum) {
263 		/* Not a whole match */
264 		return NULL;
265 	}
266 
267 	metric_events[idnum] = NULL;
268 
269 	for (i = 0; i < idnum; i++) {
270 		ev = metric_events[i];
271 		/* Don't free the used events. */
272 		set_bit(ev->core.idx, evlist_used);
273 		/*
274 		 * The metric leader points to the identically named event in
275 		 * metric_events.
276 		 */
277 		ev->metric_leader = ev;
278 		/*
279 		 * Mark two events with identical names in the same group (or
280 		 * globally) as being in use as uncore events may be duplicated
281 		 * for each pmu. Set the metric leader of such events to be the
282 		 * event that appears in metric_events.
283 		 */
284 		evlist__for_each_entry_continue(perf_evlist, ev) {
285 			/*
286 			 * If events are grouped then the search can terminate
287 			 * when then group is left.
288 			 */
289 			if (!has_constraint &&
290 			    ev->core.leader != metric_events[i]->core.leader &&
291 			    evsel_same_pmu_or_none(evsel__leader(ev), evsel__leader(metric_events[i])))
292 				break;
293 			if (!strcmp(metric_events[i]->name, ev->name)) {
294 				set_bit(ev->core.idx, evlist_used);
295 				ev->metric_leader = metric_events[i];
296 			}
297 		}
298 	}
299 
300 	return metric_events[0];
301 }
302 
303 static int metricgroup__setup_events(struct list_head *groups,
304 				     bool metric_no_merge,
305 				     struct evlist *perf_evlist,
306 				     struct rblist *metric_events_list)
307 {
308 	struct metric_event *me;
309 	struct metric_expr *expr;
310 	int i = 0;
311 	int ret = 0;
312 	struct metric *m;
313 	struct evsel *evsel, *tmp;
314 	unsigned long *evlist_used;
315 
316 	evlist_used = bitmap_zalloc(perf_evlist->core.nr_entries);
317 	if (!evlist_used)
318 		return -ENOMEM;
319 
320 	list_for_each_entry (m, groups, nd) {
321 		struct evsel **metric_events;
322 		struct metric_ref *metric_refs = NULL;
323 
324 		metric_events = calloc(sizeof(void *),
325 				hashmap__size(&m->pctx.ids) + 1);
326 		if (!metric_events) {
327 			ret = -ENOMEM;
328 			break;
329 		}
330 		evsel = find_evsel_group(perf_evlist, &m->pctx,
331 					 metric_no_merge,
332 					 m->has_constraint, metric_events,
333 					 evlist_used);
334 		if (!evsel) {
335 			pr_debug("Cannot resolve %s: %s\n",
336 					m->metric_name, m->metric_expr);
337 			free(metric_events);
338 			continue;
339 		}
340 		for (i = 0; metric_events[i]; i++)
341 			metric_events[i]->collect_stat = true;
342 		me = metricgroup__lookup(metric_events_list, evsel, true);
343 		if (!me) {
344 			ret = -ENOMEM;
345 			free(metric_events);
346 			break;
347 		}
348 		expr = malloc(sizeof(struct metric_expr));
349 		if (!expr) {
350 			ret = -ENOMEM;
351 			free(metric_events);
352 			break;
353 		}
354 
355 		/*
356 		 * Collect and store collected nested expressions
357 		 * for metric processing.
358 		 */
359 		if (m->metric_refs_cnt) {
360 			struct metric_ref_node *ref;
361 
362 			metric_refs = zalloc(sizeof(struct metric_ref) * (m->metric_refs_cnt + 1));
363 			if (!metric_refs) {
364 				ret = -ENOMEM;
365 				free(metric_events);
366 				free(expr);
367 				break;
368 			}
369 
370 			i = 0;
371 			list_for_each_entry(ref, &m->metric_refs, list) {
372 				/*
373 				 * Intentionally passing just const char pointers,
374 				 * originally from 'struct pmu_event' object.
375 				 * We don't need to change them, so there's no
376 				 * need to create our own copy.
377 				 */
378 				metric_refs[i].metric_name = ref->metric_name;
379 				metric_refs[i].metric_expr = ref->metric_expr;
380 				i++;
381 			}
382 		}
383 
384 		expr->metric_refs = metric_refs;
385 		expr->metric_expr = m->metric_expr;
386 		expr->metric_name = m->metric_name;
387 		expr->metric_unit = m->metric_unit;
388 		expr->metric_events = metric_events;
389 		expr->runtime = m->runtime;
390 		list_add(&expr->nd, &me->head);
391 	}
392 
393 	evlist__for_each_entry_safe(perf_evlist, tmp, evsel) {
394 		if (!test_bit(evsel->core.idx, evlist_used)) {
395 			evlist__remove(perf_evlist, evsel);
396 			evsel__delete(evsel);
397 		}
398 	}
399 	bitmap_free(evlist_used);
400 
401 	return ret;
402 }
403 
404 static bool match_metric(const char *n, const char *list)
405 {
406 	int len;
407 	char *m;
408 
409 	if (!list)
410 		return false;
411 	if (!strcmp(list, "all"))
412 		return true;
413 	if (!n)
414 		return !strcasecmp(list, "No_group");
415 	len = strlen(list);
416 	m = strcasestr(n, list);
417 	if (!m)
418 		return false;
419 	if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
420 	    (m[len] == 0 || m[len] == ';'))
421 		return true;
422 	return false;
423 }
424 
425 static bool match_pe_metric(struct pmu_event *pe, const char *metric)
426 {
427 	return match_metric(pe->metric_group, metric) ||
428 	       match_metric(pe->metric_name, metric);
429 }
430 
431 struct mep {
432 	struct rb_node nd;
433 	const char *name;
434 	struct strlist *metrics;
435 };
436 
437 static int mep_cmp(struct rb_node *rb_node, const void *entry)
438 {
439 	struct mep *a = container_of(rb_node, struct mep, nd);
440 	struct mep *b = (struct mep *)entry;
441 
442 	return strcmp(a->name, b->name);
443 }
444 
445 static struct rb_node *mep_new(struct rblist *rl __maybe_unused,
446 					const void *entry)
447 {
448 	struct mep *me = malloc(sizeof(struct mep));
449 
450 	if (!me)
451 		return NULL;
452 	memcpy(me, entry, sizeof(struct mep));
453 	me->name = strdup(me->name);
454 	if (!me->name)
455 		goto out_me;
456 	me->metrics = strlist__new(NULL, NULL);
457 	if (!me->metrics)
458 		goto out_name;
459 	return &me->nd;
460 out_name:
461 	zfree(&me->name);
462 out_me:
463 	free(me);
464 	return NULL;
465 }
466 
467 static struct mep *mep_lookup(struct rblist *groups, const char *name)
468 {
469 	struct rb_node *nd;
470 	struct mep me = {
471 		.name = name
472 	};
473 	nd = rblist__find(groups, &me);
474 	if (nd)
475 		return container_of(nd, struct mep, nd);
476 	rblist__add_node(groups, &me);
477 	nd = rblist__find(groups, &me);
478 	if (nd)
479 		return container_of(nd, struct mep, nd);
480 	return NULL;
481 }
482 
483 static void mep_delete(struct rblist *rl __maybe_unused,
484 		       struct rb_node *nd)
485 {
486 	struct mep *me = container_of(nd, struct mep, nd);
487 
488 	strlist__delete(me->metrics);
489 	zfree(&me->name);
490 	free(me);
491 }
492 
493 static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
494 {
495 	struct str_node *sn;
496 	int n = 0;
497 
498 	strlist__for_each_entry (sn, metrics) {
499 		if (raw)
500 			printf("%s%s", n > 0 ? " " : "", sn->s);
501 		else
502 			printf("  %s\n", sn->s);
503 		n++;
504 	}
505 	if (raw)
506 		putchar('\n');
507 }
508 
509 static int metricgroup__print_pmu_event(struct pmu_event *pe,
510 					bool metricgroups, char *filter,
511 					bool raw, bool details,
512 					struct rblist *groups,
513 					struct strlist *metriclist)
514 {
515 	const char *g;
516 	char *omg, *mg;
517 
518 	g = pe->metric_group;
519 	if (!g && pe->metric_name) {
520 		if (pe->name)
521 			return 0;
522 		g = "No_group";
523 	}
524 
525 	if (!g)
526 		return 0;
527 
528 	mg = strdup(g);
529 
530 	if (!mg)
531 		return -ENOMEM;
532 	omg = mg;
533 	while ((g = strsep(&mg, ";")) != NULL) {
534 		struct mep *me;
535 		char *s;
536 
537 		g = skip_spaces(g);
538 		if (*g == 0)
539 			g = "No_group";
540 		if (filter && !strstr(g, filter))
541 			continue;
542 		if (raw)
543 			s = (char *)pe->metric_name;
544 		else {
545 			if (asprintf(&s, "%s\n%*s%s]",
546 				     pe->metric_name, 8, "[", pe->desc) < 0)
547 				return -1;
548 			if (details) {
549 				if (asprintf(&s, "%s\n%*s%s]",
550 					     s, 8, "[", pe->metric_expr) < 0)
551 					return -1;
552 			}
553 		}
554 
555 		if (!s)
556 			continue;
557 
558 		if (!metricgroups) {
559 			strlist__add(metriclist, s);
560 		} else {
561 			me = mep_lookup(groups, g);
562 			if (!me)
563 				continue;
564 			strlist__add(me->metrics, s);
565 		}
566 
567 		if (!raw)
568 			free(s);
569 	}
570 	free(omg);
571 
572 	return 0;
573 }
574 
575 struct metricgroup_print_sys_idata {
576 	struct strlist *metriclist;
577 	char *filter;
578 	struct rblist *groups;
579 	bool metricgroups;
580 	bool raw;
581 	bool details;
582 };
583 
584 typedef int (*metricgroup_sys_event_iter_fn)(struct pmu_event *pe, void *);
585 
586 struct metricgroup_iter_data {
587 	metricgroup_sys_event_iter_fn fn;
588 	void *data;
589 };
590 
591 static int metricgroup__sys_event_iter(struct pmu_event *pe, void *data)
592 {
593 	struct metricgroup_iter_data *d = data;
594 	struct perf_pmu *pmu = NULL;
595 
596 	if (!pe->metric_expr || !pe->compat)
597 		return 0;
598 
599 	while ((pmu = perf_pmu__scan(pmu))) {
600 
601 		if (!pmu->id || strcmp(pmu->id, pe->compat))
602 			continue;
603 
604 		return d->fn(pe, d->data);
605 	}
606 
607 	return 0;
608 }
609 
610 static int metricgroup__print_sys_event_iter(struct pmu_event *pe, void *data)
611 {
612 	struct metricgroup_print_sys_idata *d = data;
613 
614 	return metricgroup__print_pmu_event(pe, d->metricgroups, d->filter, d->raw,
615 				     d->details, d->groups, d->metriclist);
616 }
617 
618 void metricgroup__print(bool metrics, bool metricgroups, char *filter,
619 			bool raw, bool details)
620 {
621 	struct pmu_events_map *map = pmu_events_map__find();
622 	struct pmu_event *pe;
623 	int i;
624 	struct rblist groups;
625 	struct rb_node *node, *next;
626 	struct strlist *metriclist = NULL;
627 
628 	if (!metricgroups) {
629 		metriclist = strlist__new(NULL, NULL);
630 		if (!metriclist)
631 			return;
632 	}
633 
634 	rblist__init(&groups);
635 	groups.node_new = mep_new;
636 	groups.node_cmp = mep_cmp;
637 	groups.node_delete = mep_delete;
638 	for (i = 0; map; i++) {
639 		pe = &map->table[i];
640 
641 		if (!pe->name && !pe->metric_group && !pe->metric_name)
642 			break;
643 		if (!pe->metric_expr)
644 			continue;
645 		if (metricgroup__print_pmu_event(pe, metricgroups, filter,
646 						 raw, details, &groups,
647 						 metriclist) < 0)
648 			return;
649 	}
650 
651 	{
652 		struct metricgroup_iter_data data = {
653 			.fn = metricgroup__print_sys_event_iter,
654 			.data = (void *) &(struct metricgroup_print_sys_idata){
655 				.metriclist = metriclist,
656 				.metricgroups = metricgroups,
657 				.filter = filter,
658 				.raw = raw,
659 				.details = details,
660 				.groups = &groups,
661 			},
662 		};
663 
664 		pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
665 	}
666 
667 	if (!filter || !rblist__empty(&groups)) {
668 		if (metricgroups && !raw)
669 			printf("\nMetric Groups:\n\n");
670 		else if (metrics && !raw)
671 			printf("\nMetrics:\n\n");
672 	}
673 
674 	for (node = rb_first_cached(&groups.entries); node; node = next) {
675 		struct mep *me = container_of(node, struct mep, nd);
676 
677 		if (metricgroups)
678 			printf("%s%s%s", me->name, metrics && !raw ? ":" : "", raw ? " " : "\n");
679 		if (metrics)
680 			metricgroup__print_strlist(me->metrics, raw);
681 		next = rb_next(node);
682 		rblist__remove_node(&groups, node);
683 	}
684 	if (!metricgroups)
685 		metricgroup__print_strlist(metriclist, raw);
686 	strlist__delete(metriclist);
687 }
688 
689 static void metricgroup__add_metric_weak_group(struct strbuf *events,
690 					       struct expr_parse_ctx *ctx)
691 {
692 	struct hashmap_entry *cur;
693 	size_t bkt;
694 	bool no_group = true, has_duration = false;
695 
696 	hashmap__for_each_entry((&ctx->ids), cur, bkt) {
697 		pr_debug("found event %s\n", (const char *)cur->key);
698 		/*
699 		 * Duration time maps to a software event and can make
700 		 * groups not count. Always use it outside a
701 		 * group.
702 		 */
703 		if (!strcmp(cur->key, "duration_time")) {
704 			has_duration = true;
705 			continue;
706 		}
707 		strbuf_addf(events, "%s%s",
708 			no_group ? "{" : ",",
709 			(const char *)cur->key);
710 		no_group = false;
711 	}
712 	if (!no_group) {
713 		strbuf_addf(events, "}:W");
714 		if (has_duration)
715 			strbuf_addf(events, ",duration_time");
716 	} else if (has_duration)
717 		strbuf_addf(events, "duration_time");
718 }
719 
720 static void metricgroup__add_metric_non_group(struct strbuf *events,
721 					      struct expr_parse_ctx *ctx)
722 {
723 	struct hashmap_entry *cur;
724 	size_t bkt;
725 	bool first = true;
726 
727 	hashmap__for_each_entry((&ctx->ids), cur, bkt) {
728 		if (!first)
729 			strbuf_addf(events, ",");
730 		strbuf_addf(events, "%s", (const char *)cur->key);
731 		first = false;
732 	}
733 }
734 
735 static void metricgroup___watchdog_constraint_hint(const char *name, bool foot)
736 {
737 	static bool violate_nmi_constraint;
738 
739 	if (!foot) {
740 		pr_warning("Splitting metric group %s into standalone metrics.\n", name);
741 		violate_nmi_constraint = true;
742 		return;
743 	}
744 
745 	if (!violate_nmi_constraint)
746 		return;
747 
748 	pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
749 		   "    echo 0 > /proc/sys/kernel/nmi_watchdog\n"
750 		   "    perf stat ...\n"
751 		   "    echo 1 > /proc/sys/kernel/nmi_watchdog\n");
752 }
753 
754 static bool metricgroup__has_constraint(struct pmu_event *pe)
755 {
756 	if (!pe->metric_constraint)
757 		return false;
758 
759 	if (!strcmp(pe->metric_constraint, "NO_NMI_WATCHDOG") &&
760 	    sysctl__nmi_watchdog_enabled()) {
761 		metricgroup___watchdog_constraint_hint(pe->metric_name, false);
762 		return true;
763 	}
764 
765 	return false;
766 }
767 
768 int __weak arch_get_runtimeparam(struct pmu_event *pe __maybe_unused)
769 {
770 	return 1;
771 }
772 
773 struct metricgroup_add_iter_data {
774 	struct list_head *metric_list;
775 	const char *metric;
776 	struct expr_ids *ids;
777 	int *ret;
778 	bool *has_match;
779 	bool metric_no_group;
780 };
781 
782 static int __add_metric(struct list_head *metric_list,
783 			struct pmu_event *pe,
784 			bool metric_no_group,
785 			int runtime,
786 			struct metric **mp,
787 			struct expr_id *parent,
788 			struct expr_ids *ids)
789 {
790 	struct metric_ref_node *ref;
791 	struct metric *m;
792 
793 	if (*mp == NULL) {
794 		/*
795 		 * We got in here for the parent group,
796 		 * allocate it and put it on the list.
797 		 */
798 		m = zalloc(sizeof(*m));
799 		if (!m)
800 			return -ENOMEM;
801 
802 		expr__ctx_init(&m->pctx);
803 		m->metric_name = pe->metric_name;
804 		m->metric_expr = pe->metric_expr;
805 		m->metric_unit = pe->unit;
806 		m->runtime = runtime;
807 		m->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
808 		INIT_LIST_HEAD(&m->metric_refs);
809 		m->metric_refs_cnt = 0;
810 
811 		parent = expr_ids__alloc(ids);
812 		if (!parent) {
813 			free(m);
814 			return -EINVAL;
815 		}
816 
817 		parent->id = strdup(pe->metric_name);
818 		if (!parent->id) {
819 			free(m);
820 			return -ENOMEM;
821 		}
822 		*mp = m;
823 	} else {
824 		/*
825 		 * We got here for the referenced metric, via the
826 		 * recursive metricgroup__add_metric call, add
827 		 * it to the parent group.
828 		 */
829 		m = *mp;
830 
831 		ref = malloc(sizeof(*ref));
832 		if (!ref)
833 			return -ENOMEM;
834 
835 		/*
836 		 * Intentionally passing just const char pointers,
837 		 * from 'pe' object, so they never go away. We don't
838 		 * need to change them, so there's no need to create
839 		 * our own copy.
840 		 */
841 		ref->metric_name = pe->metric_name;
842 		ref->metric_expr = pe->metric_expr;
843 
844 		list_add(&ref->list, &m->metric_refs);
845 		m->metric_refs_cnt++;
846 	}
847 
848 	/* Force all found IDs in metric to have us as parent ID. */
849 	WARN_ON_ONCE(!parent);
850 	m->pctx.parent = parent;
851 
852 	/*
853 	 * For both the parent and referenced metrics, we parse
854 	 * all the metric's IDs and add it to the parent context.
855 	 */
856 	if (expr__find_other(pe->metric_expr, NULL, &m->pctx, runtime) < 0) {
857 		if (m->metric_refs_cnt == 0) {
858 			expr__ctx_clear(&m->pctx);
859 			free(m);
860 			*mp = NULL;
861 		}
862 		return -EINVAL;
863 	}
864 
865 	/*
866 	 * We add new group only in the 'parent' call,
867 	 * so bail out for referenced metric case.
868 	 */
869 	if (m->metric_refs_cnt)
870 		return 0;
871 
872 	if (list_empty(metric_list))
873 		list_add(&m->nd, metric_list);
874 	else {
875 		struct list_head *pos;
876 
877 		/* Place the largest groups at the front. */
878 		list_for_each_prev(pos, metric_list) {
879 			struct metric *old = list_entry(pos, struct metric, nd);
880 
881 			if (hashmap__size(&m->pctx.ids) <=
882 			    hashmap__size(&old->pctx.ids))
883 				break;
884 		}
885 		list_add(&m->nd, pos);
886 	}
887 
888 	return 0;
889 }
890 
891 #define map_for_each_event(__pe, __idx, __map)					\
892 	if (__map)								\
893 		for (__idx = 0, __pe = &__map->table[__idx];			\
894 		     __pe->name || __pe->metric_group || __pe->metric_name;	\
895 		     __pe = &__map->table[++__idx])
896 
897 #define map_for_each_metric(__pe, __idx, __map, __metric)		\
898 	map_for_each_event(__pe, __idx, __map)				\
899 		if (__pe->metric_expr &&				\
900 		    (match_metric(__pe->metric_group, __metric) ||	\
901 		     match_metric(__pe->metric_name, __metric)))
902 
903 struct pmu_event *metricgroup__find_metric(const char *metric,
904 					   struct pmu_events_map *map)
905 {
906 	struct pmu_event *pe;
907 	int i;
908 
909 	map_for_each_event(pe, i, map) {
910 		if (match_metric(pe->metric_name, metric))
911 			return pe;
912 	}
913 
914 	return NULL;
915 }
916 
917 static int recursion_check(struct metric *m, const char *id, struct expr_id **parent,
918 			   struct expr_ids *ids)
919 {
920 	struct expr_id_data *data;
921 	struct expr_id *p;
922 	int ret;
923 
924 	/*
925 	 * We get the parent referenced by 'id' argument and
926 	 * traverse through all the parent object IDs to check
927 	 * if we already processed 'id', if we did, it's recursion
928 	 * and we fail.
929 	 */
930 	ret = expr__get_id(&m->pctx, id, &data);
931 	if (ret)
932 		return ret;
933 
934 	p = expr_id_data__parent(data);
935 
936 	while (p->parent) {
937 		if (!strcmp(p->id, id)) {
938 			pr_err("failed: recursion detected for %s\n", id);
939 			return -1;
940 		}
941 		p = p->parent;
942 	}
943 
944 	/*
945 	 * If we are over the limit of static entris, the metric
946 	 * is too difficult/nested to process, fail as well.
947 	 */
948 	p = expr_ids__alloc(ids);
949 	if (!p) {
950 		pr_err("failed: too many nested metrics\n");
951 		return -EINVAL;
952 	}
953 
954 	p->id     = strdup(id);
955 	p->parent = expr_id_data__parent(data);
956 	*parent   = p;
957 
958 	return p->id ? 0 : -ENOMEM;
959 }
960 
961 static int add_metric(struct list_head *metric_list,
962 		      struct pmu_event *pe,
963 		      bool metric_no_group,
964 		      struct metric **mp,
965 		      struct expr_id *parent,
966 		      struct expr_ids *ids);
967 
968 static int __resolve_metric(struct metric *m,
969 			    bool metric_no_group,
970 			    struct list_head *metric_list,
971 			    struct pmu_events_map *map,
972 			    struct expr_ids *ids)
973 {
974 	struct hashmap_entry *cur;
975 	size_t bkt;
976 	bool all;
977 	int ret;
978 
979 	/*
980 	 * Iterate all the parsed IDs and if there's metric,
981 	 * add it to the context.
982 	 */
983 	do {
984 		all = true;
985 		hashmap__for_each_entry((&m->pctx.ids), cur, bkt) {
986 			struct expr_id *parent;
987 			struct pmu_event *pe;
988 
989 			pe = metricgroup__find_metric(cur->key, map);
990 			if (!pe)
991 				continue;
992 
993 			ret = recursion_check(m, cur->key, &parent, ids);
994 			if (ret)
995 				return ret;
996 
997 			all = false;
998 			/* The metric key itself needs to go out.. */
999 			expr__del_id(&m->pctx, cur->key);
1000 
1001 			/* ... and it gets resolved to the parent context. */
1002 			ret = add_metric(metric_list, pe, metric_no_group, &m, parent, ids);
1003 			if (ret)
1004 				return ret;
1005 
1006 			/*
1007 			 * We added new metric to hashmap, so we need
1008 			 * to break the iteration and start over.
1009 			 */
1010 			break;
1011 		}
1012 	} while (!all);
1013 
1014 	return 0;
1015 }
1016 
1017 static int resolve_metric(bool metric_no_group,
1018 			  struct list_head *metric_list,
1019 			  struct pmu_events_map *map,
1020 			  struct expr_ids *ids)
1021 {
1022 	struct metric *m;
1023 	int err;
1024 
1025 	list_for_each_entry(m, metric_list, nd) {
1026 		err = __resolve_metric(m, metric_no_group, metric_list, map, ids);
1027 		if (err)
1028 			return err;
1029 	}
1030 	return 0;
1031 }
1032 
1033 static int add_metric(struct list_head *metric_list,
1034 		      struct pmu_event *pe,
1035 		      bool metric_no_group,
1036 		      struct metric **m,
1037 		      struct expr_id *parent,
1038 		      struct expr_ids *ids)
1039 {
1040 	struct metric *orig = *m;
1041 	int ret = 0;
1042 
1043 	pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
1044 
1045 	if (!strstr(pe->metric_expr, "?")) {
1046 		ret = __add_metric(metric_list, pe, metric_no_group, 1, m, parent, ids);
1047 	} else {
1048 		int j, count;
1049 
1050 		count = arch_get_runtimeparam(pe);
1051 
1052 		/* This loop is added to create multiple
1053 		 * events depend on count value and add
1054 		 * those events to metric_list.
1055 		 */
1056 
1057 		for (j = 0; j < count && !ret; j++, *m = orig)
1058 			ret = __add_metric(metric_list, pe, metric_no_group, j, m, parent, ids);
1059 	}
1060 
1061 	return ret;
1062 }
1063 
1064 static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
1065 						  void *data)
1066 {
1067 	struct metricgroup_add_iter_data *d = data;
1068 	struct metric *m = NULL;
1069 	int ret;
1070 
1071 	if (!match_pe_metric(pe, d->metric))
1072 		return 0;
1073 
1074 	ret = add_metric(d->metric_list, pe, d->metric_no_group, &m, NULL, d->ids);
1075 	if (ret)
1076 		goto out;
1077 
1078 	ret = resolve_metric(d->metric_no_group,
1079 				     d->metric_list, NULL, d->ids);
1080 	if (ret)
1081 		goto out;
1082 
1083 	*(d->has_match) = true;
1084 
1085 out:
1086 	*(d->ret) = ret;
1087 	return ret;
1088 }
1089 
1090 static int metricgroup__add_metric(const char *metric, bool metric_no_group,
1091 				   struct strbuf *events,
1092 				   struct list_head *metric_list,
1093 				   struct pmu_events_map *map)
1094 {
1095 	struct expr_ids ids = { .cnt = 0, };
1096 	struct pmu_event *pe;
1097 	struct metric *m;
1098 	LIST_HEAD(list);
1099 	int i, ret;
1100 	bool has_match = false;
1101 
1102 	map_for_each_metric(pe, i, map, metric) {
1103 		has_match = true;
1104 		m = NULL;
1105 
1106 		ret = add_metric(&list, pe, metric_no_group, &m, NULL, &ids);
1107 		if (ret)
1108 			goto out;
1109 
1110 		/*
1111 		 * Process any possible referenced metrics
1112 		 * included in the expression.
1113 		 */
1114 		ret = resolve_metric(metric_no_group,
1115 				     &list, map, &ids);
1116 		if (ret)
1117 			goto out;
1118 	}
1119 
1120 	{
1121 		struct metricgroup_iter_data data = {
1122 			.fn = metricgroup__add_metric_sys_event_iter,
1123 			.data = (void *) &(struct metricgroup_add_iter_data) {
1124 				.metric_list = &list,
1125 				.metric = metric,
1126 				.metric_no_group = metric_no_group,
1127 				.ids = &ids,
1128 				.has_match = &has_match,
1129 				.ret = &ret,
1130 			},
1131 		};
1132 
1133 		pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
1134 	}
1135 	/* End of pmu events. */
1136 	if (!has_match) {
1137 		ret = -EINVAL;
1138 		goto out;
1139 	}
1140 
1141 	list_for_each_entry(m, &list, nd) {
1142 		if (events->len > 0)
1143 			strbuf_addf(events, ",");
1144 
1145 		if (m->has_constraint) {
1146 			metricgroup__add_metric_non_group(events,
1147 							  &m->pctx);
1148 		} else {
1149 			metricgroup__add_metric_weak_group(events,
1150 							   &m->pctx);
1151 		}
1152 	}
1153 
1154 out:
1155 	/*
1156 	 * add to metric_list so that they can be released
1157 	 * even if it's failed
1158 	 */
1159 	list_splice(&list, metric_list);
1160 	expr_ids__exit(&ids);
1161 	return ret;
1162 }
1163 
1164 static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
1165 					struct strbuf *events,
1166 					struct list_head *metric_list,
1167 					struct pmu_events_map *map)
1168 {
1169 	char *llist, *nlist, *p;
1170 	int ret = -EINVAL;
1171 
1172 	nlist = strdup(list);
1173 	if (!nlist)
1174 		return -ENOMEM;
1175 	llist = nlist;
1176 
1177 	strbuf_init(events, 100);
1178 	strbuf_addf(events, "%s", "");
1179 
1180 	while ((p = strsep(&llist, ",")) != NULL) {
1181 		ret = metricgroup__add_metric(p, metric_no_group, events,
1182 					      metric_list, map);
1183 		if (ret == -EINVAL) {
1184 			fprintf(stderr, "Cannot find metric or group `%s'\n",
1185 					p);
1186 			break;
1187 		}
1188 	}
1189 	free(nlist);
1190 
1191 	if (!ret)
1192 		metricgroup___watchdog_constraint_hint(NULL, true);
1193 
1194 	return ret;
1195 }
1196 
1197 static void metric__free_refs(struct metric *metric)
1198 {
1199 	struct metric_ref_node *ref, *tmp;
1200 
1201 	list_for_each_entry_safe(ref, tmp, &metric->metric_refs, list) {
1202 		list_del(&ref->list);
1203 		free(ref);
1204 	}
1205 }
1206 
1207 static void metricgroup__free_metrics(struct list_head *metric_list)
1208 {
1209 	struct metric *m, *tmp;
1210 
1211 	list_for_each_entry_safe (m, tmp, metric_list, nd) {
1212 		metric__free_refs(m);
1213 		expr__ctx_clear(&m->pctx);
1214 		list_del_init(&m->nd);
1215 		free(m);
1216 	}
1217 }
1218 
1219 static int parse_groups(struct evlist *perf_evlist, const char *str,
1220 			bool metric_no_group,
1221 			bool metric_no_merge,
1222 			struct perf_pmu *fake_pmu,
1223 			struct rblist *metric_events,
1224 			struct pmu_events_map *map)
1225 {
1226 	struct parse_events_error parse_error;
1227 	struct strbuf extra_events;
1228 	LIST_HEAD(metric_list);
1229 	int ret;
1230 
1231 	if (metric_events->nr_entries == 0)
1232 		metricgroup__rblist_init(metric_events);
1233 	ret = metricgroup__add_metric_list(str, metric_no_group,
1234 					   &extra_events, &metric_list, map);
1235 	if (ret)
1236 		goto out;
1237 	pr_debug("adding %s\n", extra_events.buf);
1238 	bzero(&parse_error, sizeof(parse_error));
1239 	ret = __parse_events(perf_evlist, extra_events.buf, &parse_error, fake_pmu);
1240 	if (ret) {
1241 		parse_events_print_error(&parse_error, extra_events.buf);
1242 		goto out;
1243 	}
1244 	ret = metricgroup__setup_events(&metric_list, metric_no_merge,
1245 					perf_evlist, metric_events);
1246 out:
1247 	metricgroup__free_metrics(&metric_list);
1248 	strbuf_release(&extra_events);
1249 	return ret;
1250 }
1251 
1252 int metricgroup__parse_groups(const struct option *opt,
1253 			      const char *str,
1254 			      bool metric_no_group,
1255 			      bool metric_no_merge,
1256 			      struct rblist *metric_events)
1257 {
1258 	struct evlist *perf_evlist = *(struct evlist **)opt->value;
1259 	struct pmu_events_map *map = pmu_events_map__find();
1260 
1261 	return parse_groups(perf_evlist, str, metric_no_group,
1262 			    metric_no_merge, NULL, metric_events, map);
1263 }
1264 
1265 int metricgroup__parse_groups_test(struct evlist *evlist,
1266 				   struct pmu_events_map *map,
1267 				   const char *str,
1268 				   bool metric_no_group,
1269 				   bool metric_no_merge,
1270 				   struct rblist *metric_events)
1271 {
1272 	return parse_groups(evlist, str, metric_no_group,
1273 			    metric_no_merge, &perf_pmu__fake, metric_events, map);
1274 }
1275 
1276 bool metricgroup__has_metric(const char *metric)
1277 {
1278 	struct pmu_events_map *map = pmu_events_map__find();
1279 	struct pmu_event *pe;
1280 	int i;
1281 
1282 	if (!map)
1283 		return false;
1284 
1285 	for (i = 0; ; i++) {
1286 		pe = &map->table[i];
1287 
1288 		if (!pe->name && !pe->metric_group && !pe->metric_name)
1289 			break;
1290 		if (!pe->metric_expr)
1291 			continue;
1292 		if (match_metric(pe->metric_name, metric))
1293 			return true;
1294 	}
1295 	return false;
1296 }
1297 
1298 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1299 				    struct rblist *new_metric_events,
1300 				    struct rblist *old_metric_events)
1301 {
1302 	unsigned i;
1303 
1304 	for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1305 		struct rb_node *nd;
1306 		struct metric_event *old_me, *new_me;
1307 		struct metric_expr *old_expr, *new_expr;
1308 		struct evsel *evsel;
1309 		size_t alloc_size;
1310 		int idx, nr;
1311 
1312 		nd = rblist__entry(old_metric_events, i);
1313 		old_me = container_of(nd, struct metric_event, nd);
1314 
1315 		evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx);
1316 		if (!evsel)
1317 			return -EINVAL;
1318 		new_me = metricgroup__lookup(new_metric_events, evsel, true);
1319 		if (!new_me)
1320 			return -ENOMEM;
1321 
1322 		pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1323 			 cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx);
1324 
1325 		list_for_each_entry(old_expr, &old_me->head, nd) {
1326 			new_expr = malloc(sizeof(*new_expr));
1327 			if (!new_expr)
1328 				return -ENOMEM;
1329 
1330 			new_expr->metric_expr = old_expr->metric_expr;
1331 			new_expr->metric_name = old_expr->metric_name;
1332 			new_expr->metric_unit = old_expr->metric_unit;
1333 			new_expr->runtime = old_expr->runtime;
1334 
1335 			if (old_expr->metric_refs) {
1336 				/* calculate number of metric_events */
1337 				for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1338 					continue;
1339 				alloc_size = sizeof(*new_expr->metric_refs);
1340 				new_expr->metric_refs = calloc(nr + 1, alloc_size);
1341 				if (!new_expr->metric_refs) {
1342 					free(new_expr);
1343 					return -ENOMEM;
1344 				}
1345 
1346 				memcpy(new_expr->metric_refs, old_expr->metric_refs,
1347 				       nr * alloc_size);
1348 			} else {
1349 				new_expr->metric_refs = NULL;
1350 			}
1351 
1352 			/* calculate number of metric_events */
1353 			for (nr = 0; old_expr->metric_events[nr]; nr++)
1354 				continue;
1355 			alloc_size = sizeof(*new_expr->metric_events);
1356 			new_expr->metric_events = calloc(nr + 1, alloc_size);
1357 			if (!new_expr->metric_events) {
1358 				free(new_expr->metric_refs);
1359 				free(new_expr);
1360 				return -ENOMEM;
1361 			}
1362 
1363 			/* copy evsel in the same position */
1364 			for (idx = 0; idx < nr; idx++) {
1365 				evsel = old_expr->metric_events[idx];
1366 				evsel = evlist__find_evsel(evlist, evsel->core.idx);
1367 				if (evsel == NULL) {
1368 					free(new_expr->metric_events);
1369 					free(new_expr->metric_refs);
1370 					free(new_expr);
1371 					return -EINVAL;
1372 				}
1373 				new_expr->metric_events[idx] = evsel;
1374 			}
1375 
1376 			list_add(&new_expr->nd, &new_me->head);
1377 		}
1378 	}
1379 	return 0;
1380 }
1381