xref: /linux/tools/perf/util/parse-events.c (revision 811082e4b668db9689f8ce927a106036b4ed4e96)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/hw_breakpoint.h>
3 #include <linux/err.h>
4 #include <linux/list_sort.h>
5 #include <linux/zalloc.h>
6 #include <dirent.h>
7 #include <errno.h>
8 #include <sys/ioctl.h>
9 #include <sys/param.h>
10 #include "cpumap.h"
11 #include "term.h"
12 #include "env.h"
13 #include "evlist.h"
14 #include "evsel.h"
15 #include <subcmd/parse-options.h>
16 #include "parse-events.h"
17 #include "string2.h"
18 #include "strbuf.h"
19 #include "debug.h"
20 #include <api/fs/tracing_path.h>
21 #include <api/io_dir.h>
22 #include <perf/cpumap.h>
23 #include <util/parse-events-bison.h>
24 #include <util/parse-events-flex.h>
25 #include "pmu.h"
26 #include "pmus.h"
27 #include "asm/bug.h"
28 #include "ui/ui.h"
29 #include "util/parse-branch-options.h"
30 #include "util/evsel_config.h"
31 #include "util/event.h"
32 #include "util/bpf-filter.h"
33 #include "util/stat.h"
34 #include "util/util.h"
35 #include "tracepoint.h"
36 
37 #define MAX_NAME_LEN 100
38 
39 static int get_config_terms(const struct parse_events_terms *head_config,
40 			    struct list_head *head_terms);
41 static int parse_events_terms__copy(const struct parse_events_terms *src,
42 				    struct parse_events_terms *dest);
43 
44 const struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
45 	[PERF_COUNT_HW_CPU_CYCLES] = {
46 		.symbol = "cpu-cycles",
47 		.alias  = "cycles",
48 	},
49 	[PERF_COUNT_HW_INSTRUCTIONS] = {
50 		.symbol = "instructions",
51 		.alias  = "",
52 	},
53 	[PERF_COUNT_HW_CACHE_REFERENCES] = {
54 		.symbol = "cache-references",
55 		.alias  = "",
56 	},
57 	[PERF_COUNT_HW_CACHE_MISSES] = {
58 		.symbol = "cache-misses",
59 		.alias  = "",
60 	},
61 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
62 		.symbol = "branch-instructions",
63 		.alias  = "branches",
64 	},
65 	[PERF_COUNT_HW_BRANCH_MISSES] = {
66 		.symbol = "branch-misses",
67 		.alias  = "",
68 	},
69 	[PERF_COUNT_HW_BUS_CYCLES] = {
70 		.symbol = "bus-cycles",
71 		.alias  = "",
72 	},
73 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
74 		.symbol = "stalled-cycles-frontend",
75 		.alias  = "idle-cycles-frontend",
76 	},
77 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
78 		.symbol = "stalled-cycles-backend",
79 		.alias  = "idle-cycles-backend",
80 	},
81 	[PERF_COUNT_HW_REF_CPU_CYCLES] = {
82 		.symbol = "ref-cycles",
83 		.alias  = "",
84 	},
85 };
86 
87 const struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
88 	[PERF_COUNT_SW_CPU_CLOCK] = {
89 		.symbol = "cpu-clock",
90 		.alias  = "",
91 	},
92 	[PERF_COUNT_SW_TASK_CLOCK] = {
93 		.symbol = "task-clock",
94 		.alias  = "",
95 	},
96 	[PERF_COUNT_SW_PAGE_FAULTS] = {
97 		.symbol = "page-faults",
98 		.alias  = "faults",
99 	},
100 	[PERF_COUNT_SW_CONTEXT_SWITCHES] = {
101 		.symbol = "context-switches",
102 		.alias  = "cs",
103 	},
104 	[PERF_COUNT_SW_CPU_MIGRATIONS] = {
105 		.symbol = "cpu-migrations",
106 		.alias  = "migrations",
107 	},
108 	[PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
109 		.symbol = "minor-faults",
110 		.alias  = "",
111 	},
112 	[PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
113 		.symbol = "major-faults",
114 		.alias  = "",
115 	},
116 	[PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
117 		.symbol = "alignment-faults",
118 		.alias  = "",
119 	},
120 	[PERF_COUNT_SW_EMULATION_FAULTS] = {
121 		.symbol = "emulation-faults",
122 		.alias  = "",
123 	},
124 	[PERF_COUNT_SW_DUMMY] = {
125 		.symbol = "dummy",
126 		.alias  = "",
127 	},
128 	[PERF_COUNT_SW_BPF_OUTPUT] = {
129 		.symbol = "bpf-output",
130 		.alias  = "",
131 	},
132 	[PERF_COUNT_SW_CGROUP_SWITCHES] = {
133 		.symbol = "cgroup-switches",
134 		.alias  = "",
135 	},
136 };
137 
138 static const char *const event_types[] = {
139 	[PERF_TYPE_HARDWARE]	= "hardware",
140 	[PERF_TYPE_SOFTWARE]	= "software",
141 	[PERF_TYPE_TRACEPOINT]	= "tracepoint",
142 	[PERF_TYPE_HW_CACHE]	= "hardware-cache",
143 	[PERF_TYPE_RAW]		= "raw",
144 	[PERF_TYPE_BREAKPOINT]	= "breakpoint",
145 };
146 
147 const char *event_type(size_t type)
148 {
149 	if (type >= PERF_TYPE_MAX)
150 		return "unknown";
151 
152 	return event_types[type];
153 }
154 
155 static char *get_config_str(const struct parse_events_terms *head_terms,
156 			    enum parse_events__term_type type_term)
157 {
158 	struct parse_events_term *term;
159 
160 	if (!head_terms)
161 		return NULL;
162 
163 	list_for_each_entry(term, &head_terms->terms, list)
164 		if (term->type_term == type_term)
165 			return term->val.str;
166 
167 	return NULL;
168 }
169 
170 static char *get_config_metric_id(const struct parse_events_terms *head_terms)
171 {
172 	return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
173 }
174 
175 static char *get_config_name(const struct parse_events_terms *head_terms)
176 {
177 	return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
178 }
179 
180 static struct perf_cpu_map *get_config_cpu(const struct parse_events_terms *head_terms)
181 {
182 	struct parse_events_term *term;
183 	struct perf_cpu_map *cpus = NULL;
184 
185 	if (!head_terms)
186 		return NULL;
187 
188 	list_for_each_entry(term, &head_terms->terms, list) {
189 		if (term->type_term == PARSE_EVENTS__TERM_TYPE_CPU) {
190 			struct perf_cpu_map *term_cpus;
191 
192 			if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
193 				term_cpus = perf_cpu_map__new_int(term->val.num);
194 			} else {
195 				struct perf_pmu *pmu = perf_pmus__find(term->val.str);
196 
197 				if (pmu && perf_cpu_map__is_empty(pmu->cpus))
198 					term_cpus = pmu->is_core ? cpu_map__online() : NULL;
199 				else if (pmu)
200 					term_cpus = perf_cpu_map__get(pmu->cpus);
201 				else
202 					term_cpus = perf_cpu_map__new(term->val.str);
203 			}
204 			perf_cpu_map__merge(&cpus, term_cpus);
205 			perf_cpu_map__put(term_cpus);
206 		}
207 	}
208 
209 	return cpus;
210 }
211 
212 /**
213  * fix_raw - For each raw term see if there is an event (aka alias) in pmu that
214  *           matches the raw's string value. If the string value matches an
215  *           event then change the term to be an event, if not then change it to
216  *           be a config term. For example, "read" may be an event of the PMU or
217  *           a raw hex encoding of 0xead. The fix-up is done late so the PMU of
218  *           the event can be determined and we don't need to scan all PMUs
219  *           ahead-of-time.
220  * @config_terms: the list of terms that may contain a raw term.
221  * @pmu: the PMU to scan for events from.
222  */
223 static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu)
224 {
225 	struct parse_events_term *term;
226 
227 	list_for_each_entry(term, &config_terms->terms, list) {
228 		u64 num;
229 
230 		if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW)
231 			continue;
232 
233 		if (perf_pmu__have_event(pmu, term->val.str)) {
234 			zfree(&term->config);
235 			term->config = term->val.str;
236 			term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
237 			term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
238 			term->val.num = 1;
239 			term->no_value = true;
240 			continue;
241 		}
242 
243 		zfree(&term->config);
244 		term->config = strdup("config");
245 		errno = 0;
246 		num = strtoull(term->val.str + 1, NULL, 16);
247 		assert(errno == 0);
248 		free(term->val.str);
249 		term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
250 		term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG;
251 		term->val.num = num;
252 		term->no_value = false;
253 	}
254 }
255 
256 static struct evsel *
257 __add_event(struct list_head *list, int *idx,
258 	    struct perf_event_attr *attr,
259 	    bool init_attr,
260 	    const char *name, const char *metric_id, struct perf_pmu *pmu,
261 	    struct list_head *config_terms, struct evsel *first_wildcard_match,
262 	    struct perf_cpu_map *user_cpus, u64 alternate_hw_config)
263 {
264 	struct evsel *evsel;
265 	bool is_pmu_core;
266 	struct perf_cpu_map *cpus, *pmu_cpus;
267 	bool has_user_cpus = !perf_cpu_map__is_empty(user_cpus);
268 
269 	/*
270 	 * Ensure the first_wildcard_match's PMU matches that of the new event
271 	 * being added. Otherwise try to match with another event further down
272 	 * the evlist.
273 	 */
274 	if (first_wildcard_match) {
275 		struct evsel *pos = list_prev_entry(first_wildcard_match, core.node);
276 
277 		first_wildcard_match = NULL;
278 		list_for_each_entry_continue(pos, list, core.node) {
279 			if (perf_pmu__name_no_suffix_match(pos->pmu, pmu->name)) {
280 				first_wildcard_match = pos;
281 				break;
282 			}
283 			if (pos->pmu->is_core && (!pmu || pmu->is_core)) {
284 				first_wildcard_match = pos;
285 				break;
286 			}
287 		}
288 	}
289 
290 	if (pmu) {
291 		perf_pmu__warn_invalid_formats(pmu);
292 		if (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX) {
293 			perf_pmu__warn_invalid_config(pmu, attr->config, name,
294 						PERF_PMU_FORMAT_VALUE_CONFIG, "config");
295 			perf_pmu__warn_invalid_config(pmu, attr->config1, name,
296 						PERF_PMU_FORMAT_VALUE_CONFIG1, "config1");
297 			perf_pmu__warn_invalid_config(pmu, attr->config2, name,
298 						PERF_PMU_FORMAT_VALUE_CONFIG2, "config2");
299 			perf_pmu__warn_invalid_config(pmu, attr->config3, name,
300 						PERF_PMU_FORMAT_VALUE_CONFIG3, "config3");
301 		}
302 	}
303 	/*
304 	 * If a PMU wasn't given, such as for legacy events, find now that
305 	 * warnings won't be generated.
306 	 */
307 	if (!pmu)
308 		pmu = perf_pmus__find_by_attr(attr);
309 
310 	if (pmu) {
311 		is_pmu_core = pmu->is_core;
312 		pmu_cpus = perf_cpu_map__get(pmu->cpus);
313 		if (perf_cpu_map__is_empty(pmu_cpus))
314 			pmu_cpus = cpu_map__online();
315 	} else {
316 		is_pmu_core = (attr->type == PERF_TYPE_HARDWARE ||
317 			       attr->type == PERF_TYPE_HW_CACHE);
318 		pmu_cpus = is_pmu_core ? cpu_map__online() : NULL;
319 	}
320 
321 	if (has_user_cpus)
322 		cpus = perf_cpu_map__get(user_cpus);
323 	else
324 		cpus = perf_cpu_map__get(pmu_cpus);
325 
326 	if (init_attr)
327 		event_attr_init(attr);
328 
329 	evsel = evsel__new_idx(attr, *idx);
330 	if (!evsel)
331 		goto out_err;
332 
333 	if (name) {
334 		evsel->name = strdup(name);
335 		if (!evsel->name)
336 			goto out_err;
337 	}
338 
339 	if (metric_id) {
340 		evsel->metric_id = strdup(metric_id);
341 		if (!evsel->metric_id)
342 			goto out_err;
343 	}
344 
345 	(*idx)++;
346 	evsel->core.cpus = cpus;
347 	evsel->core.pmu_cpus = pmu_cpus;
348 	evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
349 	evsel->core.is_pmu_core = is_pmu_core;
350 	evsel->pmu = pmu;
351 	evsel->alternate_hw_config = alternate_hw_config;
352 	evsel->first_wildcard_match = first_wildcard_match;
353 
354 	if (config_terms)
355 		list_splice_init(config_terms, &evsel->config_terms);
356 
357 	if (list)
358 		list_add_tail(&evsel->core.node, list);
359 
360 	if (has_user_cpus)
361 		evsel__warn_user_requested_cpus(evsel, user_cpus);
362 
363 	return evsel;
364 out_err:
365 	perf_cpu_map__put(cpus);
366 	perf_cpu_map__put(pmu_cpus);
367 	zfree(&evsel->name);
368 	zfree(&evsel->metric_id);
369 	free(evsel);
370 	return NULL;
371 }
372 
373 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
374 				      const char *name, const char *metric_id,
375 				      struct perf_pmu *pmu)
376 {
377 	return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
378 			   metric_id, pmu, /*config_terms=*/NULL,
379 			   /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
380 			   /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
381 }
382 
383 static int add_event(struct list_head *list, int *idx,
384 		     struct perf_event_attr *attr, const char *name,
385 		     const char *metric_id, struct list_head *config_terms,
386 		     u64 alternate_hw_config)
387 {
388 	return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
389 			   /*pmu=*/NULL, config_terms,
390 			   /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
391 			   alternate_hw_config) ? 0 : -ENOMEM;
392 }
393 
394 /**
395  * parse_aliases - search names for entries beginning or equalling str ignoring
396  *                 case. If mutliple entries in names match str then the longest
397  *                 is chosen.
398  * @str: The needle to look for.
399  * @names: The haystack to search.
400  * @size: The size of the haystack.
401  * @longest: Out argument giving the length of the matching entry.
402  */
403 static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size,
404 			 int *longest)
405 {
406 	*longest = -1;
407 	for (int i = 0; i < size; i++) {
408 		for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) {
409 			int n = strlen(names[i][j]);
410 
411 			if (n > *longest && !strncasecmp(str, names[i][j], n))
412 				*longest = n;
413 		}
414 		if (*longest > 0)
415 			return i;
416 	}
417 
418 	return -1;
419 }
420 
421 typedef int config_term_func_t(struct perf_event_attr *attr,
422 			       struct parse_events_term *term,
423 			       struct parse_events_error *err);
424 static int config_term_common(struct perf_event_attr *attr,
425 			      struct parse_events_term *term,
426 			      struct parse_events_error *err);
427 static int config_attr(struct perf_event_attr *attr,
428 		       const struct parse_events_terms *head,
429 		       struct parse_events_error *err,
430 		       config_term_func_t config_term);
431 
432 /**
433  * parse_events__decode_legacy_cache - Search name for the legacy cache event
434  *                                     name composed of 1, 2 or 3 hyphen
435  *                                     separated sections. The first section is
436  *                                     the cache type while the others are the
437  *                                     optional op and optional result. To make
438  *                                     life hard the names in the table also
439  *                                     contain hyphens and the longest name
440  *                                     should always be selected.
441  */
442 int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config)
443 {
444 	int len, cache_type = -1, cache_op = -1, cache_result = -1;
445 	const char *name_end = &name[strlen(name) + 1];
446 	const char *str = name;
447 
448 	cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len);
449 	if (cache_type == -1)
450 		return -EINVAL;
451 	str += len + 1;
452 
453 	if (str < name_end) {
454 		cache_op = parse_aliases(str, evsel__hw_cache_op,
455 					PERF_COUNT_HW_CACHE_OP_MAX, &len);
456 		if (cache_op >= 0) {
457 			if (!evsel__is_cache_op_valid(cache_type, cache_op))
458 				return -EINVAL;
459 			str += len + 1;
460 		} else {
461 			cache_result = parse_aliases(str, evsel__hw_cache_result,
462 						PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
463 			if (cache_result >= 0)
464 				str += len + 1;
465 		}
466 	}
467 	if (str < name_end) {
468 		if (cache_op < 0) {
469 			cache_op = parse_aliases(str, evsel__hw_cache_op,
470 						PERF_COUNT_HW_CACHE_OP_MAX, &len);
471 			if (cache_op >= 0) {
472 				if (!evsel__is_cache_op_valid(cache_type, cache_op))
473 					return -EINVAL;
474 			}
475 		} else if (cache_result < 0) {
476 			cache_result = parse_aliases(str, evsel__hw_cache_result,
477 						PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
478 		}
479 	}
480 
481 	/*
482 	 * Fall back to reads:
483 	 */
484 	if (cache_op == -1)
485 		cache_op = PERF_COUNT_HW_CACHE_OP_READ;
486 
487 	/*
488 	 * Fall back to accesses:
489 	 */
490 	if (cache_result == -1)
491 		cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
492 
493 	*config = cache_type | (cache_op << 8) | (cache_result << 16);
494 	if (perf_pmus__supports_extended_type())
495 		*config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT;
496 	return 0;
497 }
498 
499 /**
500  * parse_events__filter_pmu - returns false if a wildcard PMU should be
501  *                            considered, true if it should be filtered.
502  */
503 bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
504 			      const struct perf_pmu *pmu)
505 {
506 	if (parse_state->pmu_filter == NULL)
507 		return false;
508 
509 	return strcmp(parse_state->pmu_filter, pmu->name) != 0;
510 }
511 
512 static int parse_events_add_pmu(struct parse_events_state *parse_state,
513 				struct list_head *list, struct perf_pmu *pmu,
514 				const struct parse_events_terms *const_parsed_terms,
515 				struct evsel *first_wildcard_match, u64 alternate_hw_config);
516 
517 int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
518 			   struct parse_events_state *parse_state,
519 			   struct parse_events_terms *parsed_terms)
520 {
521 	struct perf_pmu *pmu = NULL;
522 	bool found_supported = false;
523 	const char *config_name = get_config_name(parsed_terms);
524 	const char *metric_id = get_config_metric_id(parsed_terms);
525 	struct perf_cpu_map *cpus = get_config_cpu(parsed_terms);
526 	int ret = 0;
527 	struct evsel *first_wildcard_match = NULL;
528 
529 	while ((pmu = perf_pmus__scan_for_event(pmu, name)) != NULL) {
530 		LIST_HEAD(config_terms);
531 		struct perf_event_attr attr;
532 
533 		if (parse_events__filter_pmu(parse_state, pmu))
534 			continue;
535 
536 		if (perf_pmu__have_event(pmu, name)) {
537 			/*
538 			 * The PMU has the event so add as not a legacy cache
539 			 * event.
540 			 */
541 			ret = parse_events_add_pmu(parse_state, list, pmu,
542 						   parsed_terms,
543 						   first_wildcard_match,
544 						   /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
545 			if (ret)
546 				goto out_err;
547 			if (first_wildcard_match == NULL)
548 				first_wildcard_match =
549 					container_of(list->prev, struct evsel, core.node);
550 			continue;
551 		}
552 
553 		if (!pmu->is_core) {
554 			/* Legacy cache events are only supported by core PMUs. */
555 			continue;
556 		}
557 
558 		memset(&attr, 0, sizeof(attr));
559 		attr.type = PERF_TYPE_HW_CACHE;
560 
561 		ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config);
562 		if (ret)
563 			return ret;
564 
565 		found_supported = true;
566 
567 		if (parsed_terms) {
568 			if (config_attr(&attr, parsed_terms, parse_state->error,
569 					config_term_common)) {
570 				ret = -EINVAL;
571 				goto out_err;
572 			}
573 			if (get_config_terms(parsed_terms, &config_terms)) {
574 				ret = -ENOMEM;
575 				goto out_err;
576 			}
577 		}
578 
579 		if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name,
580 				metric_id, pmu, &config_terms, first_wildcard_match,
581 				cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) == NULL)
582 			ret = -ENOMEM;
583 
584 		if (first_wildcard_match == NULL)
585 			first_wildcard_match = container_of(list->prev, struct evsel, core.node);
586 		free_config_terms(&config_terms);
587 		if (ret)
588 			goto out_err;
589 	}
590 out_err:
591 	perf_cpu_map__put(cpus);
592 	return found_supported ? 0 : -EINVAL;
593 }
594 
595 static void tracepoint_error(struct parse_events_error *e, int err,
596 			     const char *sys, const char *name, int column)
597 {
598 	const char *str;
599 	char help[BUFSIZ];
600 
601 	if (!e)
602 		return;
603 
604 	/*
605 	 * We get error directly from syscall errno ( > 0),
606 	 * or from encoded pointer's error ( < 0).
607 	 */
608 	err = abs(err);
609 
610 	switch (err) {
611 	case EACCES:
612 		str = "can't access trace events";
613 		break;
614 	case ENOENT:
615 		str = "unknown tracepoint";
616 		break;
617 	default:
618 		str = "failed to add tracepoint";
619 		break;
620 	}
621 
622 	tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
623 	parse_events_error__handle(e, column, strdup(str), strdup(help));
624 }
625 
626 static int add_tracepoint(struct parse_events_state *parse_state,
627 			  struct list_head *list,
628 			  const char *sys_name, const char *evt_name,
629 			  struct parse_events_error *err,
630 			  struct parse_events_terms *head_config, void *loc_)
631 {
632 	YYLTYPE *loc = loc_;
633 	struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++,
634 					       !parse_state->fake_tp);
635 
636 	if (IS_ERR(evsel)) {
637 		tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column);
638 		return PTR_ERR(evsel);
639 	}
640 
641 	if (head_config) {
642 		LIST_HEAD(config_terms);
643 
644 		if (get_config_terms(head_config, &config_terms))
645 			return -ENOMEM;
646 		list_splice(&config_terms, &evsel->config_terms);
647 	}
648 
649 	list_add_tail(&evsel->core.node, list);
650 	return 0;
651 }
652 
653 static int add_tracepoint_multi_event(struct parse_events_state *parse_state,
654 				      struct list_head *list,
655 				      const char *sys_name, const char *evt_name,
656 				      struct parse_events_error *err,
657 				      struct parse_events_terms *head_config, YYLTYPE *loc)
658 {
659 	char *evt_path;
660 	struct io_dirent64 *evt_ent;
661 	struct io_dir evt_dir;
662 	int ret = 0, found = 0;
663 
664 	evt_path = get_events_file(sys_name);
665 	if (!evt_path) {
666 		tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
667 		return -1;
668 	}
669 	io_dir__init(&evt_dir, open(evt_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
670 	if (evt_dir.dirfd < 0) {
671 		put_events_file(evt_path);
672 		tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
673 		return -1;
674 	}
675 
676 	while (!ret && (evt_ent = io_dir__readdir(&evt_dir))) {
677 		if (!strcmp(evt_ent->d_name, ".")
678 		    || !strcmp(evt_ent->d_name, "..")
679 		    || !strcmp(evt_ent->d_name, "enable")
680 		    || !strcmp(evt_ent->d_name, "filter"))
681 			continue;
682 
683 		if (!strglobmatch(evt_ent->d_name, evt_name))
684 			continue;
685 
686 		found++;
687 
688 		ret = add_tracepoint(parse_state, list, sys_name, evt_ent->d_name,
689 				     err, head_config, loc);
690 	}
691 
692 	if (!found) {
693 		tracepoint_error(err, ENOENT, sys_name, evt_name, loc->first_column);
694 		ret = -1;
695 	}
696 
697 	put_events_file(evt_path);
698 	close(evt_dir.dirfd);
699 	return ret;
700 }
701 
702 static int add_tracepoint_event(struct parse_events_state *parse_state,
703 				struct list_head *list,
704 				const char *sys_name, const char *evt_name,
705 				struct parse_events_error *err,
706 				struct parse_events_terms *head_config, YYLTYPE *loc)
707 {
708 	return strpbrk(evt_name, "*?") ?
709 		add_tracepoint_multi_event(parse_state, list, sys_name, evt_name,
710 					   err, head_config, loc) :
711 		add_tracepoint(parse_state, list, sys_name, evt_name,
712 			       err, head_config, loc);
713 }
714 
715 static int add_tracepoint_multi_sys(struct parse_events_state *parse_state,
716 				    struct list_head *list,
717 				    const char *sys_name, const char *evt_name,
718 				    struct parse_events_error *err,
719 				    struct parse_events_terms *head_config, YYLTYPE *loc)
720 {
721 	struct io_dirent64 *events_ent;
722 	struct io_dir events_dir;
723 	int ret = 0;
724 	char *events_dir_path = get_tracing_file("events");
725 
726 	if (!events_dir_path) {
727 		tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
728 		return -1;
729 	}
730 	io_dir__init(&events_dir, open(events_dir_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
731 	put_events_file(events_dir_path);
732 	if (events_dir.dirfd < 0) {
733 		tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
734 		return -1;
735 	}
736 
737 	while (!ret && (events_ent = io_dir__readdir(&events_dir))) {
738 		if (!strcmp(events_ent->d_name, ".")
739 		    || !strcmp(events_ent->d_name, "..")
740 		    || !strcmp(events_ent->d_name, "enable")
741 		    || !strcmp(events_ent->d_name, "header_event")
742 		    || !strcmp(events_ent->d_name, "header_page"))
743 			continue;
744 
745 		if (!strglobmatch(events_ent->d_name, sys_name))
746 			continue;
747 
748 		ret = add_tracepoint_event(parse_state, list, events_ent->d_name,
749 					   evt_name, err, head_config, loc);
750 	}
751 	close(events_dir.dirfd);
752 	return ret;
753 }
754 
755 size_t default_breakpoint_len(void)
756 {
757 #if defined(__i386__)
758 	static int len;
759 
760 	if (len == 0) {
761 		struct perf_env env = {};
762 
763 		perf_env__init(&env);
764 		len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long);
765 		perf_env__exit(&env);
766 	}
767 	return len;
768 #elif defined(__aarch64__)
769 	return 4;
770 #else
771 	return sizeof(long);
772 #endif
773 }
774 
775 static int
776 parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
777 {
778 	int i;
779 
780 	for (i = 0; i < 3; i++) {
781 		if (!type || !type[i])
782 			break;
783 
784 #define CHECK_SET_TYPE(bit)		\
785 do {					\
786 	if (attr->bp_type & bit)	\
787 		return -EINVAL;		\
788 	else				\
789 		attr->bp_type |= bit;	\
790 } while (0)
791 
792 		switch (type[i]) {
793 		case 'r':
794 			CHECK_SET_TYPE(HW_BREAKPOINT_R);
795 			break;
796 		case 'w':
797 			CHECK_SET_TYPE(HW_BREAKPOINT_W);
798 			break;
799 		case 'x':
800 			CHECK_SET_TYPE(HW_BREAKPOINT_X);
801 			break;
802 		default:
803 			return -EINVAL;
804 		}
805 	}
806 
807 #undef CHECK_SET_TYPE
808 
809 	if (!attr->bp_type) /* Default */
810 		attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
811 
812 	return 0;
813 }
814 
815 int parse_events_add_breakpoint(struct parse_events_state *parse_state,
816 				struct list_head *list,
817 				u64 addr, char *type, u64 len,
818 				struct parse_events_terms *head_config)
819 {
820 	struct perf_event_attr attr;
821 	LIST_HEAD(config_terms);
822 	const char *name;
823 
824 	memset(&attr, 0, sizeof(attr));
825 	attr.bp_addr = addr;
826 
827 	if (parse_breakpoint_type(type, &attr))
828 		return -EINVAL;
829 
830 	/* Provide some defaults if len is not specified */
831 	if (!len) {
832 		if (attr.bp_type == HW_BREAKPOINT_X)
833 			len = default_breakpoint_len();
834 		else
835 			len = HW_BREAKPOINT_LEN_4;
836 	}
837 
838 	attr.bp_len = len;
839 
840 	attr.type = PERF_TYPE_BREAKPOINT;
841 	attr.sample_period = 1;
842 
843 	if (head_config) {
844 		if (config_attr(&attr, head_config, parse_state->error,
845 				config_term_common))
846 			return -EINVAL;
847 
848 		if (get_config_terms(head_config, &config_terms))
849 			return -ENOMEM;
850 	}
851 
852 	name = get_config_name(head_config);
853 
854 	return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL,
855 			&config_terms, /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
856 }
857 
858 static int check_type_val(struct parse_events_term *term,
859 			  struct parse_events_error *err,
860 			  enum parse_events__term_val_type type)
861 {
862 	if (type == term->type_val)
863 		return 0;
864 
865 	if (err) {
866 		parse_events_error__handle(err, term->err_val,
867 					type == PARSE_EVENTS__TERM_TYPE_NUM
868 					? strdup("expected numeric value")
869 					: strdup("expected string value"),
870 					NULL);
871 	}
872 	return -EINVAL;
873 }
874 
875 static bool config_term_shrinked;
876 
877 const char *parse_events__term_type_str(enum parse_events__term_type term_type)
878 {
879 	/*
880 	 * Update according to parse-events.l
881 	 */
882 	static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
883 		[PARSE_EVENTS__TERM_TYPE_USER]			= "<sysfs term>",
884 		[PARSE_EVENTS__TERM_TYPE_CONFIG]		= "config",
885 		[PARSE_EVENTS__TERM_TYPE_CONFIG1]		= "config1",
886 		[PARSE_EVENTS__TERM_TYPE_CONFIG2]		= "config2",
887 		[PARSE_EVENTS__TERM_TYPE_CONFIG3]		= "config3",
888 		[PARSE_EVENTS__TERM_TYPE_NAME]			= "name",
889 		[PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD]		= "period",
890 		[PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ]		= "freq",
891 		[PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE]	= "branch_type",
892 		[PARSE_EVENTS__TERM_TYPE_TIME]			= "time",
893 		[PARSE_EVENTS__TERM_TYPE_CALLGRAPH]		= "call-graph",
894 		[PARSE_EVENTS__TERM_TYPE_STACKSIZE]		= "stack-size",
895 		[PARSE_EVENTS__TERM_TYPE_NOINHERIT]		= "no-inherit",
896 		[PARSE_EVENTS__TERM_TYPE_INHERIT]		= "inherit",
897 		[PARSE_EVENTS__TERM_TYPE_MAX_STACK]		= "max-stack",
898 		[PARSE_EVENTS__TERM_TYPE_MAX_EVENTS]		= "nr",
899 		[PARSE_EVENTS__TERM_TYPE_OVERWRITE]		= "overwrite",
900 		[PARSE_EVENTS__TERM_TYPE_NOOVERWRITE]		= "no-overwrite",
901 		[PARSE_EVENTS__TERM_TYPE_DRV_CFG]		= "driver-config",
902 		[PARSE_EVENTS__TERM_TYPE_PERCORE]		= "percore",
903 		[PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT]		= "aux-output",
904 		[PARSE_EVENTS__TERM_TYPE_AUX_ACTION]		= "aux-action",
905 		[PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE]	= "aux-sample-size",
906 		[PARSE_EVENTS__TERM_TYPE_METRIC_ID]		= "metric-id",
907 		[PARSE_EVENTS__TERM_TYPE_RAW]                   = "raw",
908 		[PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE]          = "legacy-cache",
909 		[PARSE_EVENTS__TERM_TYPE_HARDWARE]              = "hardware",
910 		[PARSE_EVENTS__TERM_TYPE_CPU]			= "cpu",
911 	};
912 	if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR)
913 		return "unknown term";
914 
915 	return config_term_names[term_type];
916 }
917 
918 static bool
919 config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err)
920 {
921 	char *err_str;
922 
923 	if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
924 		parse_events_error__handle(err, -1,
925 					strdup("Invalid term_type"), NULL);
926 		return false;
927 	}
928 	if (!config_term_shrinked)
929 		return true;
930 
931 	switch (term_type) {
932 	case PARSE_EVENTS__TERM_TYPE_CONFIG:
933 	case PARSE_EVENTS__TERM_TYPE_CONFIG1:
934 	case PARSE_EVENTS__TERM_TYPE_CONFIG2:
935 	case PARSE_EVENTS__TERM_TYPE_CONFIG3:
936 	case PARSE_EVENTS__TERM_TYPE_NAME:
937 	case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
938 	case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
939 	case PARSE_EVENTS__TERM_TYPE_PERCORE:
940 	case PARSE_EVENTS__TERM_TYPE_CPU:
941 		return true;
942 	case PARSE_EVENTS__TERM_TYPE_USER:
943 	case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
944 	case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
945 	case PARSE_EVENTS__TERM_TYPE_TIME:
946 	case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
947 	case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
948 	case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
949 	case PARSE_EVENTS__TERM_TYPE_INHERIT:
950 	case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
951 	case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
952 	case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
953 	case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
954 	case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
955 	case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
956 	case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
957 	case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
958 	case PARSE_EVENTS__TERM_TYPE_RAW:
959 	case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
960 	case PARSE_EVENTS__TERM_TYPE_HARDWARE:
961 	default:
962 		if (!err)
963 			return false;
964 
965 		/* term_type is validated so indexing is safe */
966 		if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
967 			     parse_events__term_type_str(term_type)) >= 0)
968 			parse_events_error__handle(err, -1, err_str, NULL);
969 		return false;
970 	}
971 }
972 
973 void parse_events__shrink_config_terms(void)
974 {
975 	config_term_shrinked = true;
976 }
977 
978 static int config_term_common(struct perf_event_attr *attr,
979 			      struct parse_events_term *term,
980 			      struct parse_events_error *err)
981 {
982 #define CHECK_TYPE_VAL(type)						   \
983 do {									   \
984 	if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
985 		return -EINVAL;						   \
986 } while (0)
987 
988 	switch (term->type_term) {
989 	case PARSE_EVENTS__TERM_TYPE_CONFIG:
990 		CHECK_TYPE_VAL(NUM);
991 		attr->config = term->val.num;
992 		break;
993 	case PARSE_EVENTS__TERM_TYPE_CONFIG1:
994 		CHECK_TYPE_VAL(NUM);
995 		attr->config1 = term->val.num;
996 		break;
997 	case PARSE_EVENTS__TERM_TYPE_CONFIG2:
998 		CHECK_TYPE_VAL(NUM);
999 		attr->config2 = term->val.num;
1000 		break;
1001 	case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1002 		CHECK_TYPE_VAL(NUM);
1003 		attr->config3 = term->val.num;
1004 		break;
1005 	case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1006 		CHECK_TYPE_VAL(NUM);
1007 		break;
1008 	case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1009 		CHECK_TYPE_VAL(NUM);
1010 		break;
1011 	case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1012 		CHECK_TYPE_VAL(STR);
1013 		if (strcmp(term->val.str, "no") &&
1014 		    parse_branch_str(term->val.str,
1015 				    &attr->branch_sample_type)) {
1016 			parse_events_error__handle(err, term->err_val,
1017 					strdup("invalid branch sample type"),
1018 					NULL);
1019 			return -EINVAL;
1020 		}
1021 		break;
1022 	case PARSE_EVENTS__TERM_TYPE_TIME:
1023 		CHECK_TYPE_VAL(NUM);
1024 		if (term->val.num > 1) {
1025 			parse_events_error__handle(err, term->err_val,
1026 						strdup("expected 0 or 1"),
1027 						NULL);
1028 			return -EINVAL;
1029 		}
1030 		break;
1031 	case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1032 		CHECK_TYPE_VAL(STR);
1033 		break;
1034 	case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1035 		CHECK_TYPE_VAL(NUM);
1036 		break;
1037 	case PARSE_EVENTS__TERM_TYPE_INHERIT:
1038 		CHECK_TYPE_VAL(NUM);
1039 		break;
1040 	case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1041 		CHECK_TYPE_VAL(NUM);
1042 		break;
1043 	case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1044 		CHECK_TYPE_VAL(NUM);
1045 		break;
1046 	case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1047 		CHECK_TYPE_VAL(NUM);
1048 		break;
1049 	case PARSE_EVENTS__TERM_TYPE_NAME:
1050 		CHECK_TYPE_VAL(STR);
1051 		break;
1052 	case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1053 		CHECK_TYPE_VAL(STR);
1054 		break;
1055 	case PARSE_EVENTS__TERM_TYPE_RAW:
1056 		CHECK_TYPE_VAL(STR);
1057 		break;
1058 	case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1059 		CHECK_TYPE_VAL(NUM);
1060 		break;
1061 	case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1062 		CHECK_TYPE_VAL(NUM);
1063 		break;
1064 	case PARSE_EVENTS__TERM_TYPE_PERCORE:
1065 		CHECK_TYPE_VAL(NUM);
1066 		if ((unsigned int)term->val.num > 1) {
1067 			parse_events_error__handle(err, term->err_val,
1068 						strdup("expected 0 or 1"),
1069 						NULL);
1070 			return -EINVAL;
1071 		}
1072 		break;
1073 	case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1074 		CHECK_TYPE_VAL(NUM);
1075 		break;
1076 	case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1077 		CHECK_TYPE_VAL(STR);
1078 		break;
1079 	case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1080 		CHECK_TYPE_VAL(NUM);
1081 		if (term->val.num > UINT_MAX) {
1082 			parse_events_error__handle(err, term->err_val,
1083 						strdup("too big"),
1084 						NULL);
1085 			return -EINVAL;
1086 		}
1087 		break;
1088 	case PARSE_EVENTS__TERM_TYPE_CPU: {
1089 		struct perf_cpu_map *map;
1090 
1091 		if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
1092 			if (term->val.num >= (u64)cpu__max_present_cpu().cpu) {
1093 				parse_events_error__handle(err, term->err_val,
1094 							strdup("too big"),
1095 							/*help=*/NULL);
1096 				return -EINVAL;
1097 			}
1098 			break;
1099 		}
1100 		assert(term->type_val == PARSE_EVENTS__TERM_TYPE_STR);
1101 		if (perf_pmus__find(term->val.str) != NULL)
1102 			break;
1103 
1104 		map = perf_cpu_map__new(term->val.str);
1105 		if (!map) {
1106 			parse_events_error__handle(err, term->err_val,
1107 						   strdup("not a valid PMU or CPU number"),
1108 						   /*help=*/NULL);
1109 			return -EINVAL;
1110 		}
1111 		perf_cpu_map__put(map);
1112 		break;
1113 	}
1114 	case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1115 	case PARSE_EVENTS__TERM_TYPE_USER:
1116 	case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1117 	case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1118 	default:
1119 		parse_events_error__handle(err, term->err_term,
1120 					strdup(parse_events__term_type_str(term->type_term)),
1121 					parse_events_formats_error_string(NULL));
1122 		return -EINVAL;
1123 	}
1124 
1125 	/*
1126 	 * Check term availability after basic checking so
1127 	 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
1128 	 *
1129 	 * If check availability at the entry of this function,
1130 	 * user will see "'<sysfs term>' is not usable in 'perf stat'"
1131 	 * if an invalid config term is provided for legacy events
1132 	 * (for example, instructions/badterm/...), which is confusing.
1133 	 */
1134 	if (!config_term_avail(term->type_term, err))
1135 		return -EINVAL;
1136 	return 0;
1137 #undef CHECK_TYPE_VAL
1138 }
1139 
1140 static int config_term_pmu(struct perf_event_attr *attr,
1141 			   struct parse_events_term *term,
1142 			   struct parse_events_error *err)
1143 {
1144 	if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) {
1145 		struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
1146 
1147 		if (!pmu) {
1148 			char *err_str;
1149 
1150 			if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
1151 				parse_events_error__handle(err, term->err_term,
1152 							   err_str, /*help=*/NULL);
1153 			return -EINVAL;
1154 		}
1155 		/*
1156 		 * Rewrite the PMU event to a legacy cache one unless the PMU
1157 		 * doesn't support legacy cache events or the event is present
1158 		 * within the PMU.
1159 		 */
1160 		if (perf_pmu__supports_legacy_cache(pmu) &&
1161 		    !perf_pmu__have_event(pmu, term->config)) {
1162 			attr->type = PERF_TYPE_HW_CACHE;
1163 			return parse_events__decode_legacy_cache(term->config, pmu->type,
1164 								 &attr->config);
1165 		} else {
1166 			term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
1167 			term->no_value = true;
1168 		}
1169 	}
1170 	if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) {
1171 		struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
1172 
1173 		if (!pmu) {
1174 			char *err_str;
1175 
1176 			if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
1177 				parse_events_error__handle(err, term->err_term,
1178 							   err_str, /*help=*/NULL);
1179 			return -EINVAL;
1180 		}
1181 		/*
1182 		 * If the PMU has a sysfs or json event prefer it over
1183 		 * legacy. ARM requires this.
1184 		 */
1185 		if (perf_pmu__have_event(pmu, term->config)) {
1186 			term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
1187 			term->no_value = true;
1188 			term->alternate_hw_config = true;
1189 		} else {
1190 			attr->type = PERF_TYPE_HARDWARE;
1191 			attr->config = term->val.num;
1192 			if (perf_pmus__supports_extended_type())
1193 				attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
1194 		}
1195 		return 0;
1196 	}
1197 	if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
1198 	    term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) {
1199 		/*
1200 		 * Always succeed for sysfs terms, as we dont know
1201 		 * at this point what type they need to have.
1202 		 */
1203 		return 0;
1204 	}
1205 	return config_term_common(attr, term, err);
1206 }
1207 
1208 static int config_term_tracepoint(struct perf_event_attr *attr,
1209 				  struct parse_events_term *term,
1210 				  struct parse_events_error *err)
1211 {
1212 	switch (term->type_term) {
1213 	case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1214 	case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1215 	case PARSE_EVENTS__TERM_TYPE_INHERIT:
1216 	case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1217 	case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1218 	case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1219 	case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1220 	case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1221 	case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1222 	case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1223 	case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1224 		return config_term_common(attr, term, err);
1225 	case PARSE_EVENTS__TERM_TYPE_USER:
1226 	case PARSE_EVENTS__TERM_TYPE_CONFIG:
1227 	case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1228 	case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1229 	case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1230 	case PARSE_EVENTS__TERM_TYPE_NAME:
1231 	case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1232 	case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1233 	case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1234 	case PARSE_EVENTS__TERM_TYPE_TIME:
1235 	case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1236 	case PARSE_EVENTS__TERM_TYPE_PERCORE:
1237 	case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1238 	case PARSE_EVENTS__TERM_TYPE_RAW:
1239 	case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1240 	case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1241 	case PARSE_EVENTS__TERM_TYPE_CPU:
1242 	default:
1243 		if (err) {
1244 			parse_events_error__handle(err, term->err_term,
1245 					strdup(parse_events__term_type_str(term->type_term)),
1246 					strdup("valid terms: call-graph,stack-size\n")
1247 				);
1248 		}
1249 		return -EINVAL;
1250 	}
1251 
1252 	return 0;
1253 }
1254 
1255 static int config_attr(struct perf_event_attr *attr,
1256 		       const struct parse_events_terms *head,
1257 		       struct parse_events_error *err,
1258 		       config_term_func_t config_term)
1259 {
1260 	struct parse_events_term *term;
1261 
1262 	list_for_each_entry(term, &head->terms, list)
1263 		if (config_term(attr, term, err))
1264 			return -EINVAL;
1265 
1266 	return 0;
1267 }
1268 
1269 static int get_config_terms(const struct parse_events_terms *head_config,
1270 			    struct list_head *head_terms)
1271 {
1272 #define ADD_CONFIG_TERM(__type, __weak)				\
1273 	struct evsel_config_term *__t;			\
1274 								\
1275 	__t = zalloc(sizeof(*__t));				\
1276 	if (!__t)						\
1277 		return -ENOMEM;					\
1278 								\
1279 	INIT_LIST_HEAD(&__t->list);				\
1280 	__t->type       = EVSEL__CONFIG_TERM_ ## __type;	\
1281 	__t->weak	= __weak;				\
1282 	list_add_tail(&__t->list, head_terms)
1283 
1284 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak)	\
1285 do {								\
1286 	ADD_CONFIG_TERM(__type, __weak);			\
1287 	__t->val.__name = __val;				\
1288 } while (0)
1289 
1290 #define ADD_CONFIG_TERM_STR(__type, __val, __weak)		\
1291 do {								\
1292 	ADD_CONFIG_TERM(__type, __weak);			\
1293 	__t->val.str = strdup(__val);				\
1294 	if (!__t->val.str) {					\
1295 		zfree(&__t);					\
1296 		return -ENOMEM;					\
1297 	}							\
1298 	__t->free_str = true;					\
1299 } while (0)
1300 
1301 	struct parse_events_term *term;
1302 
1303 	list_for_each_entry(term, &head_config->terms, list) {
1304 		switch (term->type_term) {
1305 		case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1306 			ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
1307 			break;
1308 		case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1309 			ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
1310 			break;
1311 		case PARSE_EVENTS__TERM_TYPE_TIME:
1312 			ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
1313 			break;
1314 		case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1315 			ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
1316 			break;
1317 		case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1318 			ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
1319 			break;
1320 		case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1321 			ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
1322 					    term->val.num, term->weak);
1323 			break;
1324 		case PARSE_EVENTS__TERM_TYPE_INHERIT:
1325 			ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1326 					    term->val.num ? 1 : 0, term->weak);
1327 			break;
1328 		case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1329 			ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1330 					    term->val.num ? 0 : 1, term->weak);
1331 			break;
1332 		case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1333 			ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
1334 					    term->val.num, term->weak);
1335 			break;
1336 		case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1337 			ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
1338 					    term->val.num, term->weak);
1339 			break;
1340 		case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1341 			ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1342 					    term->val.num ? 1 : 0, term->weak);
1343 			break;
1344 		case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1345 			ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1346 					    term->val.num ? 0 : 1, term->weak);
1347 			break;
1348 		case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1349 			ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
1350 			break;
1351 		case PARSE_EVENTS__TERM_TYPE_PERCORE:
1352 			ADD_CONFIG_TERM_VAL(PERCORE, percore,
1353 					    term->val.num ? true : false, term->weak);
1354 			break;
1355 		case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1356 			ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
1357 					    term->val.num ? 1 : 0, term->weak);
1358 			break;
1359 		case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1360 			ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak);
1361 			break;
1362 		case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1363 			ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
1364 					    term->val.num, term->weak);
1365 			break;
1366 		case PARSE_EVENTS__TERM_TYPE_USER:
1367 		case PARSE_EVENTS__TERM_TYPE_CONFIG:
1368 		case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1369 		case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1370 		case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1371 		case PARSE_EVENTS__TERM_TYPE_NAME:
1372 		case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1373 		case PARSE_EVENTS__TERM_TYPE_RAW:
1374 		case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1375 		case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1376 		case PARSE_EVENTS__TERM_TYPE_CPU:
1377 		default:
1378 			break;
1379 		}
1380 	}
1381 	return 0;
1382 }
1383 
1384 /*
1385  * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
1386  * each bit of attr->config that the user has changed.
1387  */
1388 static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config,
1389 			   struct list_head *head_terms)
1390 {
1391 	struct parse_events_term *term;
1392 	u64 bits = 0;
1393 	int type;
1394 
1395 	list_for_each_entry(term, &head_config->terms, list) {
1396 		switch (term->type_term) {
1397 		case PARSE_EVENTS__TERM_TYPE_USER:
1398 			type = perf_pmu__format_type(pmu, term->config);
1399 			if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
1400 				continue;
1401 			bits |= perf_pmu__format_bits(pmu, term->config);
1402 			break;
1403 		case PARSE_EVENTS__TERM_TYPE_CONFIG:
1404 			bits = ~(u64)0;
1405 			break;
1406 		case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1407 		case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1408 		case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1409 		case PARSE_EVENTS__TERM_TYPE_NAME:
1410 		case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1411 		case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1412 		case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1413 		case PARSE_EVENTS__TERM_TYPE_TIME:
1414 		case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1415 		case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1416 		case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1417 		case PARSE_EVENTS__TERM_TYPE_INHERIT:
1418 		case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1419 		case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1420 		case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1421 		case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1422 		case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1423 		case PARSE_EVENTS__TERM_TYPE_PERCORE:
1424 		case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1425 		case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1426 		case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1427 		case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1428 		case PARSE_EVENTS__TERM_TYPE_RAW:
1429 		case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1430 		case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1431 		case PARSE_EVENTS__TERM_TYPE_CPU:
1432 		default:
1433 			break;
1434 		}
1435 	}
1436 
1437 	if (bits)
1438 		ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
1439 
1440 #undef ADD_CONFIG_TERM
1441 	return 0;
1442 }
1443 
1444 int parse_events_add_tracepoint(struct parse_events_state *parse_state,
1445 				struct list_head *list,
1446 				const char *sys, const char *event,
1447 				struct parse_events_error *err,
1448 				struct parse_events_terms *head_config, void *loc_)
1449 {
1450 	YYLTYPE *loc = loc_;
1451 
1452 	if (head_config) {
1453 		struct perf_event_attr attr;
1454 
1455 		if (config_attr(&attr, head_config, err,
1456 				config_term_tracepoint))
1457 			return -EINVAL;
1458 	}
1459 
1460 	if (strpbrk(sys, "*?"))
1461 		return add_tracepoint_multi_sys(parse_state, list, sys, event,
1462 						err, head_config, loc);
1463 	else
1464 		return add_tracepoint_event(parse_state, list, sys, event,
1465 					    err, head_config, loc);
1466 }
1467 
1468 static int __parse_events_add_numeric(struct parse_events_state *parse_state,
1469 				struct list_head *list,
1470 				struct perf_pmu *pmu, u32 type, u32 extended_type,
1471 				u64 config, const struct parse_events_terms *head_config,
1472 				struct evsel *first_wildcard_match)
1473 {
1474 	struct perf_event_attr attr;
1475 	LIST_HEAD(config_terms);
1476 	const char *name, *metric_id;
1477 	struct perf_cpu_map *cpus;
1478 	int ret;
1479 
1480 	memset(&attr, 0, sizeof(attr));
1481 	attr.type = type;
1482 	attr.config = config;
1483 	if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) {
1484 		assert(perf_pmus__supports_extended_type());
1485 		attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT;
1486 	}
1487 
1488 	if (head_config) {
1489 		if (config_attr(&attr, head_config, parse_state->error,
1490 				config_term_common))
1491 			return -EINVAL;
1492 
1493 		if (get_config_terms(head_config, &config_terms))
1494 			return -ENOMEM;
1495 	}
1496 
1497 	name = get_config_name(head_config);
1498 	metric_id = get_config_metric_id(head_config);
1499 	cpus = get_config_cpu(head_config);
1500 	ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name,
1501 			metric_id, pmu, &config_terms, first_wildcard_match,
1502 			cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) ? 0 : -ENOMEM;
1503 	perf_cpu_map__put(cpus);
1504 	free_config_terms(&config_terms);
1505 	return ret;
1506 }
1507 
1508 int parse_events_add_numeric(struct parse_events_state *parse_state,
1509 			     struct list_head *list,
1510 			     u32 type, u64 config,
1511 			     const struct parse_events_terms *head_config,
1512 			     bool wildcard)
1513 {
1514 	struct perf_pmu *pmu = NULL;
1515 	bool found_supported = false;
1516 
1517 	/* Wildcards on numeric values are only supported by core PMUs. */
1518 	if (wildcard && perf_pmus__supports_extended_type()) {
1519 		struct evsel *first_wildcard_match = NULL;
1520 		while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
1521 			int ret;
1522 
1523 			found_supported = true;
1524 			if (parse_events__filter_pmu(parse_state, pmu))
1525 				continue;
1526 
1527 			ret = __parse_events_add_numeric(parse_state, list, pmu,
1528 							 type, pmu->type,
1529 							 config, head_config,
1530 							 first_wildcard_match);
1531 			if (ret)
1532 				return ret;
1533 			if (first_wildcard_match == NULL)
1534 				first_wildcard_match =
1535 					container_of(list->prev, struct evsel, core.node);
1536 		}
1537 		if (found_supported)
1538 			return 0;
1539 	}
1540 	return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type),
1541 					type, /*extended_type=*/0, config, head_config,
1542 					/*first_wildcard_match=*/NULL);
1543 }
1544 
1545 static bool config_term_percore(struct list_head *config_terms)
1546 {
1547 	struct evsel_config_term *term;
1548 
1549 	list_for_each_entry(term, config_terms, list) {
1550 		if (term->type == EVSEL__CONFIG_TERM_PERCORE)
1551 			return term->val.percore;
1552 	}
1553 
1554 	return false;
1555 }
1556 
1557 static int parse_events_add_pmu(struct parse_events_state *parse_state,
1558 				struct list_head *list, struct perf_pmu *pmu,
1559 				const struct parse_events_terms *const_parsed_terms,
1560 				struct evsel *first_wildcard_match, u64 alternate_hw_config)
1561 {
1562 	struct perf_event_attr attr;
1563 	struct perf_pmu_info info;
1564 	struct evsel *evsel;
1565 	struct parse_events_error *err = parse_state->error;
1566 	LIST_HEAD(config_terms);
1567 	struct parse_events_terms parsed_terms;
1568 	bool alias_rewrote_terms = false;
1569 	struct perf_cpu_map *term_cpu = NULL;
1570 
1571 	if (verbose > 1) {
1572 		struct strbuf sb;
1573 
1574 		strbuf_init(&sb, /*hint=*/ 0);
1575 		if (pmu->selectable && const_parsed_terms &&
1576 		    list_empty(&const_parsed_terms->terms)) {
1577 			strbuf_addf(&sb, "%s//", pmu->name);
1578 		} else {
1579 			strbuf_addf(&sb, "%s/", pmu->name);
1580 			parse_events_terms__to_strbuf(const_parsed_terms, &sb);
1581 			strbuf_addch(&sb, '/');
1582 		}
1583 		fprintf(stderr, "Attempt to add: %s\n", sb.buf);
1584 		strbuf_release(&sb);
1585 	}
1586 
1587 	memset(&attr, 0, sizeof(attr));
1588 	if (pmu->perf_event_attr_init_default)
1589 		pmu->perf_event_attr_init_default(pmu, &attr);
1590 
1591 	attr.type = pmu->type;
1592 
1593 	if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) {
1594 		evsel = __add_event(list, &parse_state->idx, &attr,
1595 				    /*init_attr=*/true, /*name=*/NULL,
1596 				    /*metric_id=*/NULL, pmu,
1597 				    /*config_terms=*/NULL, first_wildcard_match,
1598 				    /*cpu_list=*/NULL, alternate_hw_config);
1599 		return evsel ? 0 : -ENOMEM;
1600 	}
1601 
1602 	parse_events_terms__init(&parsed_terms);
1603 	if (const_parsed_terms) {
1604 		int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
1605 
1606 		if (ret)
1607 			return ret;
1608 	}
1609 	fix_raw(&parsed_terms, pmu);
1610 
1611 	/* Configure attr/terms with a known PMU, this will set hardcoded terms. */
1612 	if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
1613 		parse_events_terms__exit(&parsed_terms);
1614 		return -EINVAL;
1615 	}
1616 
1617 	/* Look for event names in the terms and rewrite into format based terms. */
1618 	if (perf_pmu__check_alias(pmu, &parsed_terms,
1619 				  &info, &alias_rewrote_terms,
1620 				  &alternate_hw_config, err)) {
1621 		parse_events_terms__exit(&parsed_terms);
1622 		return -EINVAL;
1623 	}
1624 
1625 	if (verbose > 1) {
1626 		struct strbuf sb;
1627 
1628 		strbuf_init(&sb, /*hint=*/ 0);
1629 		parse_events_terms__to_strbuf(&parsed_terms, &sb);
1630 		fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf);
1631 		strbuf_release(&sb);
1632 	}
1633 
1634 	/* Configure attr/terms again if an alias was expanded. */
1635 	if (alias_rewrote_terms &&
1636 	    config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
1637 		parse_events_terms__exit(&parsed_terms);
1638 		return -EINVAL;
1639 	}
1640 
1641 	if (get_config_terms(&parsed_terms, &config_terms)) {
1642 		parse_events_terms__exit(&parsed_terms);
1643 		return -ENOMEM;
1644 	}
1645 
1646 	/*
1647 	 * When using default config, record which bits of attr->config were
1648 	 * changed by the user.
1649 	 */
1650 	if (pmu->perf_event_attr_init_default &&
1651 	    get_config_chgs(pmu, &parsed_terms, &config_terms)) {
1652 		parse_events_terms__exit(&parsed_terms);
1653 		return -ENOMEM;
1654 	}
1655 
1656 	/* Skip configuring hard coded terms that were applied by config_attr. */
1657 	if (perf_pmu__config(pmu, &attr, &parsed_terms, /*apply_hardcoded=*/false,
1658 			     parse_state->error)) {
1659 		free_config_terms(&config_terms);
1660 		parse_events_terms__exit(&parsed_terms);
1661 		return -EINVAL;
1662 	}
1663 
1664 	term_cpu = get_config_cpu(&parsed_terms);
1665 	evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
1666 			    get_config_name(&parsed_terms),
1667 			    get_config_metric_id(&parsed_terms), pmu,
1668 			    &config_terms, first_wildcard_match, term_cpu, alternate_hw_config);
1669 	perf_cpu_map__put(term_cpu);
1670 	if (!evsel) {
1671 		parse_events_terms__exit(&parsed_terms);
1672 		return -ENOMEM;
1673 	}
1674 
1675 	if (evsel->name)
1676 		evsel->use_config_name = true;
1677 
1678 	evsel->percore = config_term_percore(&evsel->config_terms);
1679 
1680 	parse_events_terms__exit(&parsed_terms);
1681 	free((char *)evsel->unit);
1682 	evsel->unit = strdup(info.unit);
1683 	evsel->scale = info.scale;
1684 	evsel->per_pkg = info.per_pkg;
1685 	evsel->snapshot = info.snapshot;
1686 	evsel->retirement_latency.mean = info.retirement_latency_mean;
1687 	evsel->retirement_latency.min = info.retirement_latency_min;
1688 	evsel->retirement_latency.max = info.retirement_latency_max;
1689 
1690 	return 0;
1691 }
1692 
1693 int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
1694 			       const char *event_name, u64 hw_config,
1695 			       const struct parse_events_terms *const_parsed_terms,
1696 			       struct list_head **listp, void *loc_)
1697 {
1698 	struct parse_events_term *term;
1699 	struct list_head *list = NULL;
1700 	struct perf_pmu *pmu = NULL;
1701 	YYLTYPE *loc = loc_;
1702 	int ok = 0;
1703 	const char *config;
1704 	struct parse_events_terms parsed_terms;
1705 	struct evsel *first_wildcard_match = NULL;
1706 
1707 	*listp = NULL;
1708 
1709 	parse_events_terms__init(&parsed_terms);
1710 	if (const_parsed_terms) {
1711 		int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
1712 
1713 		if (ret)
1714 			return ret;
1715 	}
1716 
1717 	config = strdup(event_name);
1718 	if (!config)
1719 		goto out_err;
1720 
1721 	if (parse_events_term__num(&term,
1722 				   PARSE_EVENTS__TERM_TYPE_USER,
1723 				   config, /*num=*/1, /*novalue=*/true,
1724 				   loc, /*loc_val=*/NULL) < 0) {
1725 		zfree(&config);
1726 		goto out_err;
1727 	}
1728 	list_add_tail(&term->list, &parsed_terms.terms);
1729 
1730 	/* Add it for all PMUs that support the alias */
1731 	list = malloc(sizeof(struct list_head));
1732 	if (!list)
1733 		goto out_err;
1734 
1735 	INIT_LIST_HEAD(list);
1736 
1737 	while ((pmu = perf_pmus__scan_for_event(pmu, event_name)) != NULL) {
1738 
1739 		if (parse_events__filter_pmu(parse_state, pmu))
1740 			continue;
1741 
1742 		if (!perf_pmu__have_event(pmu, event_name))
1743 			continue;
1744 
1745 		if (!parse_events_add_pmu(parse_state, list, pmu,
1746 					  &parsed_terms, first_wildcard_match, hw_config)) {
1747 			struct strbuf sb;
1748 
1749 			strbuf_init(&sb, /*hint=*/ 0);
1750 			parse_events_terms__to_strbuf(&parsed_terms, &sb);
1751 			pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf);
1752 			strbuf_release(&sb);
1753 			ok++;
1754 		}
1755 		if (first_wildcard_match == NULL)
1756 			first_wildcard_match = container_of(list->prev, struct evsel, core.node);
1757 	}
1758 
1759 	if (parse_state->fake_pmu) {
1760 		if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms,
1761 					 first_wildcard_match, hw_config)) {
1762 			struct strbuf sb;
1763 
1764 			strbuf_init(&sb, /*hint=*/ 0);
1765 			parse_events_terms__to_strbuf(&parsed_terms, &sb);
1766 			pr_debug("%s -> fake/%s/\n", event_name, sb.buf);
1767 			strbuf_release(&sb);
1768 			ok++;
1769 		}
1770 	}
1771 
1772 out_err:
1773 	parse_events_terms__exit(&parsed_terms);
1774 	if (ok)
1775 		*listp = list;
1776 	else
1777 		free(list);
1778 
1779 	return ok ? 0 : -1;
1780 }
1781 
1782 int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state,
1783 					const char *event_or_pmu,
1784 					const struct parse_events_terms *const_parsed_terms,
1785 					struct list_head **listp,
1786 					void *loc_)
1787 {
1788 	YYLTYPE *loc = loc_;
1789 	struct perf_pmu *pmu;
1790 	int ok = 0;
1791 	char *help;
1792 	struct evsel *first_wildcard_match = NULL;
1793 
1794 	*listp = malloc(sizeof(**listp));
1795 	if (!*listp)
1796 		return -ENOMEM;
1797 
1798 	INIT_LIST_HEAD(*listp);
1799 
1800 	/* Attempt to add to list assuming event_or_pmu is a PMU name. */
1801 	pmu = perf_pmus__find(event_or_pmu);
1802 	if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms,
1803 					 first_wildcard_match,
1804 					 /*alternate_hw_config=*/PERF_COUNT_HW_MAX))
1805 		return 0;
1806 
1807 	if (parse_state->fake_pmu) {
1808 		if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(),
1809 					  const_parsed_terms,
1810 					  first_wildcard_match,
1811 					  /*alternate_hw_config=*/PERF_COUNT_HW_MAX))
1812 			return 0;
1813 	}
1814 
1815 	pmu = NULL;
1816 	/* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */
1817 	while ((pmu = perf_pmus__scan_matching_wildcard(pmu, event_or_pmu)) != NULL) {
1818 
1819 		if (parse_events__filter_pmu(parse_state, pmu))
1820 			continue;
1821 
1822 		if (!parse_events_add_pmu(parse_state, *listp, pmu,
1823 					  const_parsed_terms,
1824 					  first_wildcard_match,
1825 					  /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) {
1826 			ok++;
1827 			parse_state->wild_card_pmus = true;
1828 		}
1829 		if (first_wildcard_match == NULL) {
1830 			first_wildcard_match =
1831 				container_of((*listp)->prev, struct evsel, core.node);
1832 		}
1833 	}
1834 	if (ok)
1835 		return 0;
1836 
1837 	/* Failure to add, assume event_or_pmu is an event name. */
1838 	zfree(listp);
1839 	if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, PERF_COUNT_HW_MAX,
1840 					const_parsed_terms, listp, loc))
1841 		return 0;
1842 
1843 	if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0)
1844 		help = NULL;
1845 	parse_events_error__handle(parse_state->error, loc->first_column,
1846 				strdup("Bad event or PMU"),
1847 				help);
1848 	zfree(listp);
1849 	return -EINVAL;
1850 }
1851 
1852 void parse_events__set_leader(char *name, struct list_head *list)
1853 {
1854 	struct evsel *leader;
1855 
1856 	if (list_empty(list)) {
1857 		WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1858 		return;
1859 	}
1860 
1861 	leader = list_first_entry(list, struct evsel, core.node);
1862 	__perf_evlist__set_leader(list, &leader->core);
1863 	zfree(&leader->group_name);
1864 	leader->group_name = name;
1865 }
1866 
1867 static int parse_events__modifier_list(struct parse_events_state *parse_state,
1868 				       YYLTYPE *loc,
1869 				       struct list_head *list,
1870 				       struct parse_events_modifier mod,
1871 				       bool group)
1872 {
1873 	struct evsel *evsel;
1874 
1875 	if (!group && mod.weak) {
1876 		parse_events_error__handle(parse_state->error, loc->first_column,
1877 					   strdup("Weak modifier is for use with groups"), NULL);
1878 		return -EINVAL;
1879 	}
1880 
1881 	__evlist__for_each_entry(list, evsel) {
1882 		/* Translate modifiers into the equivalent evsel excludes. */
1883 		int eu = group ? evsel->core.attr.exclude_user : 0;
1884 		int ek = group ? evsel->core.attr.exclude_kernel : 0;
1885 		int eh = group ? evsel->core.attr.exclude_hv : 0;
1886 		int eH = group ? evsel->core.attr.exclude_host : 0;
1887 		int eG = group ? evsel->core.attr.exclude_guest : 0;
1888 		int exclude = eu | ek | eh;
1889 		int exclude_GH = eG | eH;
1890 
1891 		if (mod.user) {
1892 			if (!exclude)
1893 				exclude = eu = ek = eh = 1;
1894 			eu = 0;
1895 		}
1896 		if (mod.kernel) {
1897 			if (!exclude)
1898 				exclude = eu = ek = eh = 1;
1899 			ek = 0;
1900 		}
1901 		if (mod.hypervisor) {
1902 			if (!exclude)
1903 				exclude = eu = ek = eh = 1;
1904 			eh = 0;
1905 		}
1906 		if (mod.guest) {
1907 			if (!exclude_GH)
1908 				exclude_GH = eG = eH = 1;
1909 			eG = 0;
1910 		}
1911 		if (mod.host) {
1912 			if (!exclude_GH)
1913 				exclude_GH = eG = eH = 1;
1914 			eH = 0;
1915 		}
1916 		if (!exclude_GH && exclude_GH_default) {
1917 			if (perf_host)
1918 				eG = 1;
1919 			else if (perf_guest)
1920 				eH = 1;
1921 		}
1922 
1923 		evsel->core.attr.exclude_user   = eu;
1924 		evsel->core.attr.exclude_kernel = ek;
1925 		evsel->core.attr.exclude_hv     = eh;
1926 		evsel->core.attr.exclude_host   = eH;
1927 		evsel->core.attr.exclude_guest  = eG;
1928 		evsel->exclude_GH               = exclude_GH;
1929 
1930 		/* Simple modifiers copied to the evsel. */
1931 		if (mod.precise) {
1932 			u8 precise = evsel->core.attr.precise_ip + mod.precise;
1933 			/*
1934 			 * precise ip:
1935 			 *
1936 			 *  0 - SAMPLE_IP can have arbitrary skid
1937 			 *  1 - SAMPLE_IP must have constant skid
1938 			 *  2 - SAMPLE_IP requested to have 0 skid
1939 			 *  3 - SAMPLE_IP must have 0 skid
1940 			 *
1941 			 *  See also PERF_RECORD_MISC_EXACT_IP
1942 			 */
1943 			if (precise > 3) {
1944 				char *help;
1945 
1946 				if (asprintf(&help,
1947 					     "Maximum combined precise value is 3, adding precision to \"%s\"",
1948 					     evsel__name(evsel)) > 0) {
1949 					parse_events_error__handle(parse_state->error,
1950 								   loc->first_column,
1951 								   help, NULL);
1952 				}
1953 				return -EINVAL;
1954 			}
1955 			evsel->core.attr.precise_ip = precise;
1956 		}
1957 		if (mod.precise_max)
1958 			evsel->precise_max = 1;
1959 		if (mod.non_idle)
1960 			evsel->core.attr.exclude_idle = 1;
1961 		if (mod.sample_read)
1962 			evsel->sample_read = 1;
1963 		if (mod.pinned && evsel__is_group_leader(evsel))
1964 			evsel->core.attr.pinned = 1;
1965 		if (mod.exclusive && evsel__is_group_leader(evsel))
1966 			evsel->core.attr.exclusive = 1;
1967 		if (mod.weak)
1968 			evsel->weak_group = true;
1969 		if (mod.bpf)
1970 			evsel->bpf_counter = true;
1971 		if (mod.retire_lat)
1972 			evsel->retire_lat = true;
1973 	}
1974 	return 0;
1975 }
1976 
1977 int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc,
1978 				 struct list_head *list,
1979 				 struct parse_events_modifier mod)
1980 {
1981 	return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true);
1982 }
1983 
1984 int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc,
1985 				 struct list_head *list,
1986 				 struct parse_events_modifier mod)
1987 {
1988 	return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false);
1989 }
1990 
1991 int parse_events__set_default_name(struct list_head *list, char *name)
1992 {
1993 	struct evsel *evsel;
1994 	bool used_name = false;
1995 
1996 	__evlist__for_each_entry(list, evsel) {
1997 		if (!evsel->name) {
1998 			evsel->name = used_name ? strdup(name) : name;
1999 			used_name = true;
2000 			if (!evsel->name)
2001 				return -ENOMEM;
2002 		}
2003 	}
2004 	if (!used_name)
2005 		free(name);
2006 	return 0;
2007 }
2008 
2009 static int parse_events__scanner(const char *str,
2010 				 FILE *input,
2011 				 struct parse_events_state *parse_state)
2012 {
2013 	YY_BUFFER_STATE buffer;
2014 	void *scanner;
2015 	int ret;
2016 
2017 	ret = parse_events_lex_init_extra(parse_state, &scanner);
2018 	if (ret)
2019 		return ret;
2020 
2021 	if (str)
2022 		buffer = parse_events__scan_string(str, scanner);
2023 	else
2024 	        parse_events_set_in(input, scanner);
2025 
2026 #ifdef PARSER_DEBUG
2027 	parse_events_debug = 1;
2028 	parse_events_set_debug(1, scanner);
2029 #endif
2030 	ret = parse_events_parse(parse_state, scanner);
2031 
2032 	if (str) {
2033 		parse_events__flush_buffer(buffer, scanner);
2034 		parse_events__delete_buffer(buffer, scanner);
2035 	}
2036 	parse_events_lex_destroy(scanner);
2037 	return ret;
2038 }
2039 
2040 /*
2041  * parse event config string, return a list of event terms.
2042  */
2043 int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input)
2044 {
2045 	struct parse_events_state parse_state = {
2046 		.terms  = NULL,
2047 		.stoken = PE_START_TERMS,
2048 	};
2049 	int ret;
2050 
2051 	ret = parse_events__scanner(str, input, &parse_state);
2052 	if (!ret)
2053 		list_splice(&parse_state.terms->terms, &terms->terms);
2054 
2055 	zfree(&parse_state.terms);
2056 	return ret;
2057 }
2058 
2059 static int evsel__compute_group_pmu_name(struct evsel *evsel,
2060 					  const struct list_head *head)
2061 {
2062 	struct evsel *leader = evsel__leader(evsel);
2063 	struct evsel *pos;
2064 	const char *group_pmu_name;
2065 	struct perf_pmu *pmu = evsel__find_pmu(evsel);
2066 
2067 	if (!pmu) {
2068 		/*
2069 		 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU
2070 		 * is a core PMU, but in heterogeneous systems this is
2071 		 * unknown. For now pick the first core PMU.
2072 		 */
2073 		pmu = perf_pmus__scan_core(NULL);
2074 	}
2075 	if (!pmu) {
2076 		pr_debug("No PMU found for '%s'\n", evsel__name(evsel));
2077 		return -EINVAL;
2078 	}
2079 	group_pmu_name = pmu->name;
2080 	/*
2081 	 * Software events may be in a group with other uncore PMU events. Use
2082 	 * the pmu_name of the first non-software event to avoid breaking the
2083 	 * software event out of the group.
2084 	 *
2085 	 * Aux event leaders, like intel_pt, expect a group with events from
2086 	 * other PMUs, so substitute the AUX event's PMU in this case.
2087 	 */
2088 	if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) {
2089 		struct perf_pmu *leader_pmu = evsel__find_pmu(leader);
2090 
2091 		if (!leader_pmu) {
2092 			/* As with determining pmu above. */
2093 			leader_pmu = perf_pmus__scan_core(NULL);
2094 		}
2095 		/*
2096 		 * Starting with the leader, find the first event with a named
2097 		 * non-software PMU. for_each_group_(member|evsel) isn't used as
2098 		 * the list isn't yet sorted putting evsel's in the same group
2099 		 * together.
2100 		 */
2101 		if (leader_pmu && !perf_pmu__is_software(leader_pmu)) {
2102 			group_pmu_name = leader_pmu->name;
2103 		} else if (leader->core.nr_members > 1) {
2104 			list_for_each_entry(pos, head, core.node) {
2105 				struct perf_pmu *pos_pmu;
2106 
2107 				if (pos == leader || evsel__leader(pos) != leader)
2108 					continue;
2109 				pos_pmu = evsel__find_pmu(pos);
2110 				if (!pos_pmu) {
2111 					/* As with determining pmu above. */
2112 					pos_pmu = perf_pmus__scan_core(NULL);
2113 				}
2114 				if (pos_pmu && !perf_pmu__is_software(pos_pmu)) {
2115 					group_pmu_name = pos_pmu->name;
2116 					break;
2117 				}
2118 			}
2119 		}
2120 	}
2121 	/* Record computed name. */
2122 	evsel->group_pmu_name = strdup(group_pmu_name);
2123 	return evsel->group_pmu_name ? 0 : -ENOMEM;
2124 }
2125 
2126 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
2127 {
2128 	/* Order by insertion index. */
2129 	return lhs->core.idx - rhs->core.idx;
2130 }
2131 
2132 static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r)
2133 {
2134 	const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
2135 	const struct evsel *lhs = container_of(lhs_core, struct evsel, core);
2136 	const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
2137 	const struct evsel *rhs = container_of(rhs_core, struct evsel, core);
2138 	int *force_grouped_idx = _fg_idx;
2139 	int lhs_sort_idx, rhs_sort_idx, ret;
2140 	const char *lhs_pmu_name, *rhs_pmu_name;
2141 
2142 	/*
2143 	 * Get the indexes of the 2 events to sort. If the events are
2144 	 * in groups then the leader's index is used otherwise the
2145 	 * event's index is used. An index may be forced for events that
2146 	 * must be in the same group, namely Intel topdown events.
2147 	 */
2148 	if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) {
2149 		lhs_sort_idx = *force_grouped_idx;
2150 	} else {
2151 		bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1;
2152 
2153 		lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx;
2154 	}
2155 	if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) {
2156 		rhs_sort_idx = *force_grouped_idx;
2157 	} else {
2158 		bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1;
2159 
2160 		rhs_sort_idx = rhs_has_group ? rhs_core->leader->idx : rhs_core->idx;
2161 	}
2162 
2163 	/* If the indices differ then respect the insertion order. */
2164 	if (lhs_sort_idx != rhs_sort_idx)
2165 		return lhs_sort_idx - rhs_sort_idx;
2166 
2167 	/*
2168 	 * Ignoring forcing, lhs_sort_idx == rhs_sort_idx so lhs and rhs should
2169 	 * be in the same group. Events in the same group need to be ordered by
2170 	 * their grouping PMU name as the group will be broken to ensure only
2171 	 * events on the same PMU are programmed together.
2172 	 *
2173 	 * With forcing the lhs_sort_idx == rhs_sort_idx shows that one or both
2174 	 * events are being forced to be at force_group_index. If only one event
2175 	 * is being forced then the other event is the group leader of the group
2176 	 * we're trying to force the event into. Ensure for the force grouped
2177 	 * case that the PMU name ordering is also respected.
2178 	 */
2179 	lhs_pmu_name = lhs->group_pmu_name;
2180 	rhs_pmu_name = rhs->group_pmu_name;
2181 	ret = strcmp(lhs_pmu_name, rhs_pmu_name);
2182 	if (ret)
2183 		return ret;
2184 
2185 	/*
2186 	 * Architecture specific sorting, by default sort events in the same
2187 	 * group with the same PMU by their insertion index. On Intel topdown
2188 	 * constraints must be adhered to - slots first, etc.
2189 	 */
2190 	return arch_evlist__cmp(lhs, rhs);
2191 }
2192 
2193 static int parse_events__sort_events_and_fix_groups(struct list_head *list)
2194 {
2195 	int idx = 0, force_grouped_idx = -1;
2196 	struct evsel *pos, *cur_leader = NULL;
2197 	struct perf_evsel *cur_leaders_grp = NULL;
2198 	bool idx_changed = false;
2199 	int orig_num_leaders = 0, num_leaders = 0;
2200 	int ret;
2201 	struct evsel *force_grouped_leader = NULL;
2202 	bool last_event_was_forced_leader = false;
2203 
2204 	/*
2205 	 * Compute index to insert ungrouped events at. Place them where the
2206 	 * first ungrouped event appears.
2207 	 */
2208 	list_for_each_entry(pos, list, core.node) {
2209 		const struct evsel *pos_leader = evsel__leader(pos);
2210 
2211 		ret = evsel__compute_group_pmu_name(pos, list);
2212 		if (ret)
2213 			return ret;
2214 
2215 		if (pos == pos_leader)
2216 			orig_num_leaders++;
2217 
2218 		/*
2219 		 * Ensure indexes are sequential, in particular for multiple
2220 		 * event lists being merged. The indexes are used to detect when
2221 		 * the user order is modified.
2222 		 */
2223 		pos->core.idx = idx++;
2224 
2225 		/*
2226 		 * Remember an index to sort all forced grouped events
2227 		 * together to. Use the group leader as some events
2228 		 * must appear first within the group.
2229 		 */
2230 		if (force_grouped_idx == -1 && arch_evsel__must_be_in_group(pos))
2231 			force_grouped_idx = pos_leader->core.idx;
2232 	}
2233 
2234 	/* Sort events. */
2235 	list_sort(&force_grouped_idx, list, evlist__cmp);
2236 
2237 	/*
2238 	 * Recompute groups, splitting for PMUs and adding groups for events
2239 	 * that require them.
2240 	 */
2241 	idx = 0;
2242 	list_for_each_entry(pos, list, core.node) {
2243 		const struct evsel *pos_leader = evsel__leader(pos);
2244 		const char *pos_pmu_name = pos->group_pmu_name;
2245 		const char *cur_leader_pmu_name;
2246 		bool pos_force_grouped = force_grouped_idx != -1 &&
2247 			arch_evsel__must_be_in_group(pos);
2248 
2249 		/* Reset index and nr_members. */
2250 		if (pos->core.idx != idx)
2251 			idx_changed = true;
2252 		pos->core.idx = idx++;
2253 		pos->core.nr_members = 0;
2254 
2255 		/*
2256 		 * Set the group leader respecting the given groupings and that
2257 		 * groups can't span PMUs.
2258 		 */
2259 		if (!cur_leader) {
2260 			cur_leader = pos;
2261 			cur_leaders_grp = &pos->core;
2262 			if (pos_force_grouped)
2263 				force_grouped_leader = pos;
2264 		}
2265 
2266 		cur_leader_pmu_name = cur_leader->group_pmu_name;
2267 		if (strcmp(cur_leader_pmu_name, pos_pmu_name)) {
2268 			/* PMU changed so the group/leader must change. */
2269 			cur_leader = pos;
2270 			cur_leaders_grp = pos->core.leader;
2271 			if (pos_force_grouped && force_grouped_leader == NULL)
2272 				force_grouped_leader = pos;
2273 		} else if (cur_leaders_grp != pos->core.leader) {
2274 			bool split_even_if_last_leader_was_forced = true;
2275 
2276 			/*
2277 			 * Event is for a different group. If the last event was
2278 			 * the forced group leader then subsequent group events
2279 			 * and forced events should be in the same group. If
2280 			 * there are no other forced group events then the
2281 			 * forced group leader wasn't really being forced into a
2282 			 * group, it just set arch_evsel__must_be_in_group, and
2283 			 * we don't want the group to split here.
2284 			 */
2285 			if (force_grouped_idx != -1 && last_event_was_forced_leader) {
2286 				struct evsel *pos2 = pos;
2287 				/*
2288 				 * Search the whole list as the group leaders
2289 				 * aren't currently valid.
2290 				 */
2291 				list_for_each_entry_continue(pos2, list, core.node) {
2292 					if (pos->core.leader == pos2->core.leader &&
2293 					    arch_evsel__must_be_in_group(pos2)) {
2294 						split_even_if_last_leader_was_forced = false;
2295 						break;
2296 					}
2297 				}
2298 			}
2299 			if (!last_event_was_forced_leader || split_even_if_last_leader_was_forced) {
2300 				if (pos_force_grouped) {
2301 					if (force_grouped_leader) {
2302 						cur_leader = force_grouped_leader;
2303 						cur_leaders_grp = force_grouped_leader->core.leader;
2304 					} else {
2305 						cur_leader = force_grouped_leader = pos;
2306 						cur_leaders_grp = &pos->core;
2307 					}
2308 				} else {
2309 					cur_leader = pos;
2310 					cur_leaders_grp = pos->core.leader;
2311 				}
2312 			}
2313 		}
2314 		if (pos_leader != cur_leader) {
2315 			/* The leader changed so update it. */
2316 			evsel__set_leader(pos, cur_leader);
2317 		}
2318 		last_event_was_forced_leader = (force_grouped_leader == pos);
2319 	}
2320 	list_for_each_entry(pos, list, core.node) {
2321 		struct evsel *pos_leader = evsel__leader(pos);
2322 
2323 		if (pos == pos_leader)
2324 			num_leaders++;
2325 		pos_leader->core.nr_members++;
2326 	}
2327 	return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0;
2328 }
2329 
2330 int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter,
2331 		   struct parse_events_error *err, bool fake_pmu,
2332 		   bool warn_if_reordered, bool fake_tp)
2333 {
2334 	struct parse_events_state parse_state = {
2335 		.list	  = LIST_HEAD_INIT(parse_state.list),
2336 		.idx	  = evlist->core.nr_entries,
2337 		.error	  = err,
2338 		.stoken	  = PE_START_EVENTS,
2339 		.fake_pmu = fake_pmu,
2340 		.fake_tp  = fake_tp,
2341 		.pmu_filter = pmu_filter,
2342 		.match_legacy_cache_terms = true,
2343 	};
2344 	int ret, ret2;
2345 
2346 	ret = parse_events__scanner(str, /*input=*/ NULL, &parse_state);
2347 
2348 	if (!ret && list_empty(&parse_state.list)) {
2349 		WARN_ONCE(true, "WARNING: event parser found nothing\n");
2350 		return -1;
2351 	}
2352 
2353 	ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list);
2354 	if (ret2 < 0)
2355 		return ret;
2356 
2357 	/*
2358 	 * Add list to the evlist even with errors to allow callers to clean up.
2359 	 */
2360 	evlist__splice_list_tail(evlist, &parse_state.list);
2361 
2362 	if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) {
2363 		pr_warning("WARNING: events were regrouped to match PMUs\n");
2364 
2365 		if (verbose > 0) {
2366 			struct strbuf sb = STRBUF_INIT;
2367 
2368 			evlist__uniquify_evsel_names(evlist, &stat_config);
2369 			evlist__format_evsels(evlist, &sb, 2048);
2370 			pr_debug("evlist after sorting/fixing: '%s'\n", sb.buf);
2371 			strbuf_release(&sb);
2372 		}
2373 	}
2374 	if (!ret) {
2375 		struct evsel *last;
2376 
2377 		last = evlist__last(evlist);
2378 		last->cmdline_group_boundary = true;
2379 
2380 		return 0;
2381 	}
2382 
2383 	/*
2384 	 * There are 2 users - builtin-record and builtin-test objects.
2385 	 * Both call evlist__delete in case of error, so we dont
2386 	 * need to bother.
2387 	 */
2388 	return ret;
2389 }
2390 
2391 int parse_event(struct evlist *evlist, const char *str)
2392 {
2393 	struct parse_events_error err;
2394 	int ret;
2395 
2396 	parse_events_error__init(&err);
2397 	ret = parse_events(evlist, str, &err);
2398 	parse_events_error__exit(&err);
2399 	return ret;
2400 }
2401 
2402 struct parse_events_error_entry {
2403 	/** @list: The list the error is part of. */
2404 	struct list_head list;
2405 	/** @idx: index in the parsed string */
2406 	int   idx;
2407 	/** @str: string to display at the index */
2408 	char *str;
2409 	/** @help: optional help string */
2410 	char *help;
2411 };
2412 
2413 void parse_events_error__init(struct parse_events_error *err)
2414 {
2415 	INIT_LIST_HEAD(&err->list);
2416 }
2417 
2418 void parse_events_error__exit(struct parse_events_error *err)
2419 {
2420 	struct parse_events_error_entry *pos, *tmp;
2421 
2422 	list_for_each_entry_safe(pos, tmp, &err->list, list) {
2423 		zfree(&pos->str);
2424 		zfree(&pos->help);
2425 		list_del_init(&pos->list);
2426 		free(pos);
2427 	}
2428 }
2429 
2430 void parse_events_error__handle(struct parse_events_error *err, int idx,
2431 				char *str, char *help)
2432 {
2433 	struct parse_events_error_entry *entry;
2434 
2435 	if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n"))
2436 		goto out_free;
2437 
2438 	entry = zalloc(sizeof(*entry));
2439 	if (!entry) {
2440 		pr_err("Failed to allocate memory for event parsing error: %s (%s)\n",
2441 			str, help ?: "<no help>");
2442 		goto out_free;
2443 	}
2444 	entry->idx = idx;
2445 	entry->str = str;
2446 	entry->help = help;
2447 	list_add(&entry->list, &err->list);
2448 	return;
2449 out_free:
2450 	free(str);
2451 	free(help);
2452 }
2453 
2454 #define MAX_WIDTH 1000
2455 static int get_term_width(void)
2456 {
2457 	struct winsize ws;
2458 
2459 	get_term_dimensions(&ws);
2460 	return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
2461 }
2462 
2463 static void __parse_events_error__print(int err_idx, const char *err_str,
2464 					const char *err_help, const char *event)
2465 {
2466 	const char *str = "invalid or unsupported event: ";
2467 	char _buf[MAX_WIDTH];
2468 	char *buf = (char *) event;
2469 	int idx = 0;
2470 	if (err_str) {
2471 		/* -2 for extra '' in the final fprintf */
2472 		int width       = get_term_width() - 2;
2473 		int len_event   = strlen(event);
2474 		int len_str, max_len, cut = 0;
2475 
2476 		/*
2477 		 * Maximum error index indent, we will cut
2478 		 * the event string if it's bigger.
2479 		 */
2480 		int max_err_idx = 13;
2481 
2482 		/*
2483 		 * Let's be specific with the message when
2484 		 * we have the precise error.
2485 		 */
2486 		str     = "event syntax error: ";
2487 		len_str = strlen(str);
2488 		max_len = width - len_str;
2489 
2490 		buf = _buf;
2491 
2492 		/* We're cutting from the beginning. */
2493 		if (err_idx > max_err_idx)
2494 			cut = err_idx - max_err_idx;
2495 
2496 		strncpy(buf, event + cut, max_len);
2497 
2498 		/* Mark cut parts with '..' on both sides. */
2499 		if (cut)
2500 			buf[0] = buf[1] = '.';
2501 
2502 		if ((len_event - cut) > max_len) {
2503 			buf[max_len - 1] = buf[max_len - 2] = '.';
2504 			buf[max_len] = 0;
2505 		}
2506 
2507 		idx = len_str + err_idx - cut;
2508 	}
2509 
2510 	fprintf(stderr, "%s'%s'\n", str, buf);
2511 	if (idx) {
2512 		fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str);
2513 		if (err_help)
2514 			fprintf(stderr, "\n%s\n", err_help);
2515 	}
2516 }
2517 
2518 void parse_events_error__print(const struct parse_events_error *err,
2519 			       const char *event)
2520 {
2521 	struct parse_events_error_entry *pos;
2522 	bool first = true;
2523 
2524 	list_for_each_entry(pos, &err->list, list) {
2525 		if (!first)
2526 			fputs("\n", stderr);
2527 		__parse_events_error__print(pos->idx, pos->str, pos->help, event);
2528 		first = false;
2529 	}
2530 }
2531 
2532 /*
2533  * In the list of errors err, do any of the error strings (str) contain the
2534  * given needle string?
2535  */
2536 bool parse_events_error__contains(const struct parse_events_error *err,
2537 				  const char *needle)
2538 {
2539 	struct parse_events_error_entry *pos;
2540 
2541 	list_for_each_entry(pos, &err->list, list) {
2542 		if (strstr(pos->str, needle) != NULL)
2543 			return true;
2544 	}
2545 	return false;
2546 }
2547 
2548 #undef MAX_WIDTH
2549 
2550 int parse_events_option(const struct option *opt, const char *str,
2551 			int unset __maybe_unused)
2552 {
2553 	struct parse_events_option_args *args = opt->value;
2554 	struct parse_events_error err;
2555 	int ret;
2556 
2557 	parse_events_error__init(&err);
2558 	ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err,
2559 			     /*fake_pmu=*/false, /*warn_if_reordered=*/true,
2560 			     /*fake_tp=*/false);
2561 
2562 	if (ret) {
2563 		parse_events_error__print(&err, str);
2564 		fprintf(stderr, "Run 'perf list' for a list of valid events\n");
2565 	}
2566 	parse_events_error__exit(&err);
2567 
2568 	return ret;
2569 }
2570 
2571 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset)
2572 {
2573 	struct parse_events_option_args *args = opt->value;
2574 	int ret;
2575 
2576 	if (*args->evlistp == NULL) {
2577 		*args->evlistp = evlist__new();
2578 
2579 		if (*args->evlistp == NULL) {
2580 			fprintf(stderr, "Not enough memory to create evlist\n");
2581 			return -1;
2582 		}
2583 	}
2584 	ret = parse_events_option(opt, str, unset);
2585 	if (ret) {
2586 		evlist__delete(*args->evlistp);
2587 		*args->evlistp = NULL;
2588 	}
2589 
2590 	return ret;
2591 }
2592 
2593 static int
2594 foreach_evsel_in_last_glob(struct evlist *evlist,
2595 			   int (*func)(struct evsel *evsel,
2596 				       const void *arg),
2597 			   const void *arg)
2598 {
2599 	struct evsel *last = NULL;
2600 	int err;
2601 
2602 	/*
2603 	 * Don't return when list_empty, give func a chance to report
2604 	 * error when it found last == NULL.
2605 	 *
2606 	 * So no need to WARN here, let *func do this.
2607 	 */
2608 	if (evlist->core.nr_entries > 0)
2609 		last = evlist__last(evlist);
2610 
2611 	do {
2612 		err = (*func)(last, arg);
2613 		if (err)
2614 			return -1;
2615 		if (!last)
2616 			return 0;
2617 
2618 		if (last->core.node.prev == &evlist->core.entries)
2619 			return 0;
2620 		last = list_entry(last->core.node.prev, struct evsel, core.node);
2621 	} while (!last->cmdline_group_boundary);
2622 
2623 	return 0;
2624 }
2625 
2626 /* Will a tracepoint filter work for str or should a BPF filter be used? */
2627 static bool is_possible_tp_filter(const char *str)
2628 {
2629 	return strstr(str, "uid") == NULL;
2630 }
2631 
2632 static int set_filter(struct evsel *evsel, const void *arg)
2633 {
2634 	const char *str = arg;
2635 	int nr_addr_filters = 0;
2636 	struct perf_pmu *pmu;
2637 
2638 	if (evsel == NULL) {
2639 		fprintf(stderr,
2640 			"--filter option should follow a -e tracepoint or HW tracer option\n");
2641 		return -1;
2642 	}
2643 
2644 	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && is_possible_tp_filter(str)) {
2645 		if (evsel__append_tp_filter(evsel, str) < 0) {
2646 			fprintf(stderr,
2647 				"not enough memory to hold filter string\n");
2648 			return -1;
2649 		}
2650 
2651 		return 0;
2652 	}
2653 
2654 	pmu = evsel__find_pmu(evsel);
2655 	if (pmu) {
2656 		perf_pmu__scan_file(pmu, "nr_addr_filters",
2657 				    "%d", &nr_addr_filters);
2658 	}
2659 	if (!nr_addr_filters)
2660 		return perf_bpf_filter__parse(&evsel->bpf_filters, str);
2661 
2662 	if (evsel__append_addr_filter(evsel, str) < 0) {
2663 		fprintf(stderr,
2664 			"not enough memory to hold filter string\n");
2665 		return -1;
2666 	}
2667 
2668 	return 0;
2669 }
2670 
2671 int parse_filter(const struct option *opt, const char *str,
2672 		 int unset __maybe_unused)
2673 {
2674 	struct evlist *evlist = *(struct evlist **)opt->value;
2675 
2676 	return foreach_evsel_in_last_glob(evlist, set_filter,
2677 					  (const void *)str);
2678 }
2679 
2680 int parse_uid_filter(struct evlist *evlist, uid_t uid)
2681 {
2682 	struct option opt = {
2683 		.value = &evlist,
2684 	};
2685 	char buf[128];
2686 	int ret;
2687 
2688 	snprintf(buf, sizeof(buf), "uid == %d", uid);
2689 	ret = parse_filter(&opt, buf, /*unset=*/0);
2690 	if (ret) {
2691 		if (use_browser >= 1) {
2692 			/*
2693 			 * Use ui__warning so a pop up appears above the
2694 			 * underlying BPF error message.
2695 			 */
2696 			ui__warning("Failed to add UID filtering that uses BPF filtering.\n");
2697 		} else {
2698 			fprintf(stderr, "Failed to add UID filtering that uses BPF filtering.\n");
2699 		}
2700 	}
2701 	return ret;
2702 }
2703 
2704 static int add_exclude_perf_filter(struct evsel *evsel,
2705 				   const void *arg __maybe_unused)
2706 {
2707 	char new_filter[64];
2708 
2709 	if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2710 		fprintf(stderr,
2711 			"--exclude-perf option should follow a -e tracepoint option\n");
2712 		return -1;
2713 	}
2714 
2715 	snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid());
2716 
2717 	if (evsel__append_tp_filter(evsel, new_filter) < 0) {
2718 		fprintf(stderr,
2719 			"not enough memory to hold filter string\n");
2720 		return -1;
2721 	}
2722 
2723 	return 0;
2724 }
2725 
2726 int exclude_perf(const struct option *opt,
2727 		 const char *arg __maybe_unused,
2728 		 int unset __maybe_unused)
2729 {
2730 	struct evlist *evlist = *(struct evlist **)opt->value;
2731 
2732 	return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter,
2733 					  NULL);
2734 }
2735 
2736 int parse_events__is_hardcoded_term(struct parse_events_term *term)
2737 {
2738 	return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
2739 }
2740 
2741 static int new_term(struct parse_events_term **_term,
2742 		    struct parse_events_term *temp,
2743 		    char *str, u64 num)
2744 {
2745 	struct parse_events_term *term;
2746 
2747 	term = malloc(sizeof(*term));
2748 	if (!term)
2749 		return -ENOMEM;
2750 
2751 	*term = *temp;
2752 	INIT_LIST_HEAD(&term->list);
2753 	term->weak = false;
2754 
2755 	switch (term->type_val) {
2756 	case PARSE_EVENTS__TERM_TYPE_NUM:
2757 		term->val.num = num;
2758 		break;
2759 	case PARSE_EVENTS__TERM_TYPE_STR:
2760 		term->val.str = str;
2761 		break;
2762 	default:
2763 		free(term);
2764 		return -EINVAL;
2765 	}
2766 
2767 	*_term = term;
2768 	return 0;
2769 }
2770 
2771 int parse_events_term__num(struct parse_events_term **term,
2772 			   enum parse_events__term_type type_term,
2773 			   const char *config, u64 num,
2774 			   bool no_value,
2775 			   void *loc_term_, void *loc_val_)
2776 {
2777 	YYLTYPE *loc_term = loc_term_;
2778 	YYLTYPE *loc_val = loc_val_;
2779 
2780 	struct parse_events_term temp = {
2781 		.type_val  = PARSE_EVENTS__TERM_TYPE_NUM,
2782 		.type_term = type_term,
2783 		.config    = config ? : strdup(parse_events__term_type_str(type_term)),
2784 		.no_value  = no_value,
2785 		.err_term  = loc_term ? loc_term->first_column : 0,
2786 		.err_val   = loc_val  ? loc_val->first_column  : 0,
2787 	};
2788 
2789 	return new_term(term, &temp, /*str=*/NULL, num);
2790 }
2791 
2792 int parse_events_term__str(struct parse_events_term **term,
2793 			   enum parse_events__term_type type_term,
2794 			   char *config, char *str,
2795 			   void *loc_term_, void *loc_val_)
2796 {
2797 	YYLTYPE *loc_term = loc_term_;
2798 	YYLTYPE *loc_val = loc_val_;
2799 
2800 	struct parse_events_term temp = {
2801 		.type_val  = PARSE_EVENTS__TERM_TYPE_STR,
2802 		.type_term = type_term,
2803 		.config    = config,
2804 		.err_term  = loc_term ? loc_term->first_column : 0,
2805 		.err_val   = loc_val  ? loc_val->first_column  : 0,
2806 	};
2807 
2808 	return new_term(term, &temp, str, /*num=*/0);
2809 }
2810 
2811 int parse_events_term__term(struct parse_events_term **term,
2812 			    enum parse_events__term_type term_lhs,
2813 			    enum parse_events__term_type term_rhs,
2814 			    void *loc_term, void *loc_val)
2815 {
2816 	return parse_events_term__str(term, term_lhs, NULL,
2817 				      strdup(parse_events__term_type_str(term_rhs)),
2818 				      loc_term, loc_val);
2819 }
2820 
2821 int parse_events_term__clone(struct parse_events_term **new,
2822 			     const struct parse_events_term *term)
2823 {
2824 	char *str;
2825 	struct parse_events_term temp = *term;
2826 
2827 	temp.used = false;
2828 	if (term->config) {
2829 		temp.config = strdup(term->config);
2830 		if (!temp.config)
2831 			return -ENOMEM;
2832 	}
2833 	if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2834 		return new_term(new, &temp, /*str=*/NULL, term->val.num);
2835 
2836 	str = strdup(term->val.str);
2837 	if (!str) {
2838 		zfree(&temp.config);
2839 		return -ENOMEM;
2840 	}
2841 	return new_term(new, &temp, str, /*num=*/0);
2842 }
2843 
2844 void parse_events_term__delete(struct parse_events_term *term)
2845 {
2846 	if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
2847 		zfree(&term->val.str);
2848 
2849 	zfree(&term->config);
2850 	free(term);
2851 }
2852 
2853 static int parse_events_terms__copy(const struct parse_events_terms *src,
2854 				    struct parse_events_terms *dest)
2855 {
2856 	struct parse_events_term *term;
2857 
2858 	list_for_each_entry (term, &src->terms, list) {
2859 		struct parse_events_term *n;
2860 		int ret;
2861 
2862 		ret = parse_events_term__clone(&n, term);
2863 		if (ret)
2864 			return ret;
2865 
2866 		list_add_tail(&n->list, &dest->terms);
2867 	}
2868 	return 0;
2869 }
2870 
2871 void parse_events_terms__init(struct parse_events_terms *terms)
2872 {
2873 	INIT_LIST_HEAD(&terms->terms);
2874 }
2875 
2876 void parse_events_terms__exit(struct parse_events_terms *terms)
2877 {
2878 	struct parse_events_term *term, *h;
2879 
2880 	list_for_each_entry_safe(term, h, &terms->terms, list) {
2881 		list_del_init(&term->list);
2882 		parse_events_term__delete(term);
2883 	}
2884 }
2885 
2886 void parse_events_terms__delete(struct parse_events_terms *terms)
2887 {
2888 	if (!terms)
2889 		return;
2890 	parse_events_terms__exit(terms);
2891 	free(terms);
2892 }
2893 
2894 int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb)
2895 {
2896 	struct parse_events_term *term;
2897 	bool first = true;
2898 
2899 	if (!terms)
2900 		return 0;
2901 
2902 	list_for_each_entry(term, &terms->terms, list) {
2903 		int ret;
2904 
2905 		if (!first) {
2906 			ret = strbuf_addch(sb, ',');
2907 			if (ret < 0)
2908 				return ret;
2909 		}
2910 		first = false;
2911 
2912 		if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2913 			if (term->no_value) {
2914 				assert(term->val.num == 1);
2915 				ret = strbuf_addf(sb, "%s", term->config);
2916 			} else
2917 				ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num);
2918 		else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
2919 			if (term->config) {
2920 				ret = strbuf_addf(sb, "%s=", term->config);
2921 				if (ret < 0)
2922 					return ret;
2923 			} else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) {
2924 				ret = strbuf_addf(sb, "%s=",
2925 						  parse_events__term_type_str(term->type_term));
2926 				if (ret < 0)
2927 					return ret;
2928 			}
2929 			assert(!term->no_value);
2930 			ret = strbuf_addf(sb, "%s", term->val.str);
2931 		}
2932 		if (ret < 0)
2933 			return ret;
2934 	}
2935 	return 0;
2936 }
2937 
2938 static void config_terms_list(char *buf, size_t buf_sz)
2939 {
2940 	int i;
2941 	bool first = true;
2942 
2943 	buf[0] = '\0';
2944 	for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
2945 		const char *name = parse_events__term_type_str(i);
2946 
2947 		if (!config_term_avail(i, NULL))
2948 			continue;
2949 		if (!name)
2950 			continue;
2951 		if (name[0] == '<')
2952 			continue;
2953 
2954 		if (strlen(buf) + strlen(name) + 2 >= buf_sz)
2955 			return;
2956 
2957 		if (!first)
2958 			strcat(buf, ",");
2959 		else
2960 			first = false;
2961 		strcat(buf, name);
2962 	}
2963 }
2964 
2965 /*
2966  * Return string contains valid config terms of an event.
2967  * @additional_terms: For terms such as PMU sysfs terms.
2968  */
2969 char *parse_events_formats_error_string(char *additional_terms)
2970 {
2971 	char *str;
2972 	/* "no-overwrite" is the longest name */
2973 	char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
2974 			  (sizeof("no-overwrite") - 1)];
2975 
2976 	config_terms_list(static_terms, sizeof(static_terms));
2977 	/* valid terms */
2978 	if (additional_terms) {
2979 		if (asprintf(&str, "valid terms: %s,%s",
2980 			     additional_terms, static_terms) < 0)
2981 			goto fail;
2982 	} else {
2983 		if (asprintf(&str, "valid terms: %s", static_terms) < 0)
2984 			goto fail;
2985 	}
2986 	return str;
2987 
2988 fail:
2989 	return NULL;
2990 }
2991