xref: /linux/tools/perf/util/parse-events.c (revision a85ac2dae6bf8050deaf9839e4c0328756b48720)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/hw_breakpoint.h>
3 #include <linux/err.h>
4 #include <linux/list_sort.h>
5 #include <linux/zalloc.h>
6 #include <dirent.h>
7 #include <errno.h>
8 #include <sys/ioctl.h>
9 #include <sys/param.h>
10 #include "cpumap.h"
11 #include "term.h"
12 #include "env.h"
13 #include "evlist.h"
14 #include "evsel.h"
15 #include <subcmd/parse-options.h>
16 #include "parse-events.h"
17 #include "string2.h"
18 #include "strbuf.h"
19 #include "debug.h"
20 #include <perf/cpumap.h>
21 #include <util/parse-events-bison.h>
22 #include <util/parse-events-flex.h>
23 #include "pmu.h"
24 #include "pmus.h"
25 #include "tp_pmu.h"
26 #include "asm/bug.h"
27 #include "ui/ui.h"
28 #include "util/parse-branch-options.h"
29 #include "util/evsel_config.h"
30 #include "util/event.h"
31 #include "util/bpf-filter.h"
32 #include "util/stat.h"
33 #include "util/util.h"
34 #include "tracepoint.h"
35 #include <api/fs/tracing_path.h>
36 
37 #define MAX_NAME_LEN 100
38 
39 static int get_config_terms(const struct parse_events_terms *head_config,
40 			    struct list_head *head_terms);
41 static int parse_events_terms__copy(const struct parse_events_terms *src,
42 				    struct parse_events_terms *dest);
43 
44 const struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
45 	[PERF_COUNT_HW_CPU_CYCLES] = {
46 		.symbol = "cpu-cycles",
47 		.alias  = "cycles",
48 	},
49 	[PERF_COUNT_HW_INSTRUCTIONS] = {
50 		.symbol = "instructions",
51 		.alias  = "",
52 	},
53 	[PERF_COUNT_HW_CACHE_REFERENCES] = {
54 		.symbol = "cache-references",
55 		.alias  = "",
56 	},
57 	[PERF_COUNT_HW_CACHE_MISSES] = {
58 		.symbol = "cache-misses",
59 		.alias  = "",
60 	},
61 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
62 		.symbol = "branch-instructions",
63 		.alias  = "branches",
64 	},
65 	[PERF_COUNT_HW_BRANCH_MISSES] = {
66 		.symbol = "branch-misses",
67 		.alias  = "",
68 	},
69 	[PERF_COUNT_HW_BUS_CYCLES] = {
70 		.symbol = "bus-cycles",
71 		.alias  = "",
72 	},
73 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
74 		.symbol = "stalled-cycles-frontend",
75 		.alias  = "idle-cycles-frontend",
76 	},
77 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
78 		.symbol = "stalled-cycles-backend",
79 		.alias  = "idle-cycles-backend",
80 	},
81 	[PERF_COUNT_HW_REF_CPU_CYCLES] = {
82 		.symbol = "ref-cycles",
83 		.alias  = "",
84 	},
85 };
86 
87 static const char *const event_types[] = {
88 	[PERF_TYPE_HARDWARE]	= "hardware",
89 	[PERF_TYPE_SOFTWARE]	= "software",
90 	[PERF_TYPE_TRACEPOINT]	= "tracepoint",
91 	[PERF_TYPE_HW_CACHE]	= "hardware-cache",
92 	[PERF_TYPE_RAW]		= "raw",
93 	[PERF_TYPE_BREAKPOINT]	= "breakpoint",
94 };
95 
96 const char *event_type(size_t type)
97 {
98 	if (type >= PERF_TYPE_MAX)
99 		return "unknown";
100 
101 	return event_types[type];
102 }
103 
104 static char *get_config_str(const struct parse_events_terms *head_terms,
105 			    enum parse_events__term_type type_term)
106 {
107 	struct parse_events_term *term;
108 
109 	if (!head_terms)
110 		return NULL;
111 
112 	list_for_each_entry(term, &head_terms->terms, list)
113 		if (term->type_term == type_term)
114 			return term->val.str;
115 
116 	return NULL;
117 }
118 
119 static char *get_config_metric_id(const struct parse_events_terms *head_terms)
120 {
121 	return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
122 }
123 
124 static char *get_config_name(const struct parse_events_terms *head_terms)
125 {
126 	return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
127 }
128 
129 static struct perf_cpu_map *get_config_cpu(const struct parse_events_terms *head_terms,
130 					   bool fake_pmu)
131 {
132 	struct parse_events_term *term;
133 	struct perf_cpu_map *cpus = NULL;
134 
135 	if (!head_terms)
136 		return NULL;
137 
138 	list_for_each_entry(term, &head_terms->terms, list) {
139 		struct perf_cpu_map *term_cpus;
140 
141 		if (term->type_term != PARSE_EVENTS__TERM_TYPE_CPU)
142 			continue;
143 
144 		if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
145 			term_cpus = perf_cpu_map__new_int(term->val.num);
146 		} else {
147 			struct perf_pmu *pmu = perf_pmus__find(term->val.str);
148 
149 			if (pmu) {
150 				term_cpus = pmu->is_core && perf_cpu_map__is_empty(pmu->cpus)
151 					    ? cpu_map__online()
152 					    : perf_cpu_map__get(pmu->cpus);
153 			} else {
154 				term_cpus = perf_cpu_map__new(term->val.str);
155 				if (!term_cpus && fake_pmu) {
156 					/*
157 					 * Assume the PMU string makes sense on a different
158 					 * machine and fake a value with all online CPUs.
159 					 */
160 					term_cpus = cpu_map__online();
161 				}
162 			}
163 		}
164 		perf_cpu_map__merge(&cpus, term_cpus);
165 		perf_cpu_map__put(term_cpus);
166 	}
167 
168 	return cpus;
169 }
170 
171 /**
172  * fix_raw - For each raw term see if there is an event (aka alias) in pmu that
173  *           matches the raw's string value. If the string value matches an
174  *           event then change the term to be an event, if not then change it to
175  *           be a config term. For example, "read" may be an event of the PMU or
176  *           a raw hex encoding of 0xead. The fix-up is done late so the PMU of
177  *           the event can be determined and we don't need to scan all PMUs
178  *           ahead-of-time.
179  * @config_terms: the list of terms that may contain a raw term.
180  * @pmu: the PMU to scan for events from.
181  */
182 static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu)
183 {
184 	struct parse_events_term *term;
185 
186 	list_for_each_entry(term, &config_terms->terms, list) {
187 		u64 num;
188 
189 		if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW)
190 			continue;
191 
192 		if (perf_pmu__have_event(pmu, term->val.str)) {
193 			zfree(&term->config);
194 			term->config = term->val.str;
195 			term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
196 			term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
197 			term->val.num = 1;
198 			term->no_value = true;
199 			continue;
200 		}
201 
202 		zfree(&term->config);
203 		term->config = strdup("config");
204 		errno = 0;
205 		num = strtoull(term->val.str + 1, NULL, 16);
206 		assert(errno == 0);
207 		free(term->val.str);
208 		term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
209 		term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG;
210 		term->val.num = num;
211 		term->no_value = false;
212 	}
213 }
214 
215 static struct evsel *
216 __add_event(struct list_head *list, int *idx,
217 	    struct perf_event_attr *attr,
218 	    bool init_attr,
219 	    const char *name, const char *metric_id, struct perf_pmu *pmu,
220 	    struct list_head *config_terms, struct evsel *first_wildcard_match,
221 	    struct perf_cpu_map *user_cpus, u64 alternate_hw_config)
222 {
223 	struct evsel *evsel;
224 	bool is_pmu_core;
225 	struct perf_cpu_map *cpus, *pmu_cpus;
226 	bool has_user_cpus = !perf_cpu_map__is_empty(user_cpus);
227 
228 	/*
229 	 * Ensure the first_wildcard_match's PMU matches that of the new event
230 	 * being added. Otherwise try to match with another event further down
231 	 * the evlist.
232 	 */
233 	if (first_wildcard_match) {
234 		struct evsel *pos = list_prev_entry(first_wildcard_match, core.node);
235 
236 		first_wildcard_match = NULL;
237 		list_for_each_entry_continue(pos, list, core.node) {
238 			if (perf_pmu__name_no_suffix_match(pos->pmu, pmu->name)) {
239 				first_wildcard_match = pos;
240 				break;
241 			}
242 			if (pos->pmu->is_core && (!pmu || pmu->is_core)) {
243 				first_wildcard_match = pos;
244 				break;
245 			}
246 		}
247 	}
248 
249 	if (pmu) {
250 		perf_pmu__warn_invalid_formats(pmu);
251 		if (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX) {
252 			perf_pmu__warn_invalid_config(pmu, attr->config, name,
253 						PERF_PMU_FORMAT_VALUE_CONFIG, "config");
254 			perf_pmu__warn_invalid_config(pmu, attr->config1, name,
255 						PERF_PMU_FORMAT_VALUE_CONFIG1, "config1");
256 			perf_pmu__warn_invalid_config(pmu, attr->config2, name,
257 						PERF_PMU_FORMAT_VALUE_CONFIG2, "config2");
258 			perf_pmu__warn_invalid_config(pmu, attr->config3, name,
259 						PERF_PMU_FORMAT_VALUE_CONFIG3, "config3");
260 		}
261 	}
262 	/*
263 	 * If a PMU wasn't given, such as for legacy events, find now that
264 	 * warnings won't be generated.
265 	 */
266 	if (!pmu)
267 		pmu = perf_pmus__find_by_attr(attr);
268 
269 	if (pmu) {
270 		is_pmu_core = pmu->is_core;
271 		pmu_cpus = perf_cpu_map__get(pmu->cpus);
272 		if (perf_cpu_map__is_empty(pmu_cpus))
273 			pmu_cpus = cpu_map__online();
274 	} else {
275 		is_pmu_core = (attr->type == PERF_TYPE_HARDWARE ||
276 			       attr->type == PERF_TYPE_HW_CACHE);
277 		pmu_cpus = is_pmu_core ? cpu_map__online() : NULL;
278 	}
279 
280 	if (has_user_cpus)
281 		cpus = perf_cpu_map__get(user_cpus);
282 	else
283 		cpus = perf_cpu_map__get(pmu_cpus);
284 
285 	if (init_attr)
286 		event_attr_init(attr);
287 
288 	evsel = evsel__new_idx(attr, *idx);
289 	if (!evsel)
290 		goto out_err;
291 
292 	if (name) {
293 		evsel->name = strdup(name);
294 		if (!evsel->name)
295 			goto out_err;
296 	}
297 
298 	if (metric_id) {
299 		evsel->metric_id = strdup(metric_id);
300 		if (!evsel->metric_id)
301 			goto out_err;
302 	}
303 
304 	(*idx)++;
305 	evsel->core.cpus = cpus;
306 	evsel->core.pmu_cpus = pmu_cpus;
307 	evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
308 	evsel->core.is_pmu_core = is_pmu_core;
309 	evsel->pmu = pmu;
310 	evsel->alternate_hw_config = alternate_hw_config;
311 	evsel->first_wildcard_match = first_wildcard_match;
312 
313 	if (config_terms)
314 		list_splice_init(config_terms, &evsel->config_terms);
315 
316 	if (list)
317 		list_add_tail(&evsel->core.node, list);
318 
319 	if (has_user_cpus)
320 		evsel__warn_user_requested_cpus(evsel, user_cpus);
321 
322 	return evsel;
323 out_err:
324 	perf_cpu_map__put(cpus);
325 	perf_cpu_map__put(pmu_cpus);
326 	zfree(&evsel->name);
327 	zfree(&evsel->metric_id);
328 	free(evsel);
329 	return NULL;
330 }
331 
332 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
333 				      const char *name, const char *metric_id,
334 				      struct perf_pmu *pmu)
335 {
336 	return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
337 			   metric_id, pmu, /*config_terms=*/NULL,
338 			   /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
339 			   /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
340 }
341 
342 static int add_event(struct list_head *list, int *idx,
343 		     struct perf_event_attr *attr, const char *name,
344 		     const char *metric_id, struct list_head *config_terms,
345 		     u64 alternate_hw_config)
346 {
347 	return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
348 			   /*pmu=*/NULL, config_terms,
349 			   /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
350 			   alternate_hw_config) ? 0 : -ENOMEM;
351 }
352 
353 /**
354  * parse_aliases - search names for entries beginning or equalling str ignoring
355  *                 case. If mutliple entries in names match str then the longest
356  *                 is chosen.
357  * @str: The needle to look for.
358  * @names: The haystack to search.
359  * @size: The size of the haystack.
360  * @longest: Out argument giving the length of the matching entry.
361  */
362 static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size,
363 			 int *longest)
364 {
365 	*longest = -1;
366 	for (int i = 0; i < size; i++) {
367 		for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) {
368 			int n = strlen(names[i][j]);
369 
370 			if (n > *longest && !strncasecmp(str, names[i][j], n))
371 				*longest = n;
372 		}
373 		if (*longest > 0)
374 			return i;
375 	}
376 
377 	return -1;
378 }
379 
380 typedef int config_term_func_t(struct perf_event_attr *attr,
381 			       struct parse_events_term *term,
382 			       struct parse_events_state *parse_state);
383 static int config_term_common(struct perf_event_attr *attr,
384 			      struct parse_events_term *term,
385 			      struct parse_events_state *parse_state);
386 static int config_attr(struct perf_event_attr *attr,
387 		       const struct parse_events_terms *head,
388 		       struct parse_events_state *parse_state,
389 		       config_term_func_t config_term);
390 
391 /**
392  * parse_events__decode_legacy_cache - Search name for the legacy cache event
393  *                                     name composed of 1, 2 or 3 hyphen
394  *                                     separated sections. The first section is
395  *                                     the cache type while the others are the
396  *                                     optional op and optional result. To make
397  *                                     life hard the names in the table also
398  *                                     contain hyphens and the longest name
399  *                                     should always be selected.
400  */
401 int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config)
402 {
403 	int len, cache_type = -1, cache_op = -1, cache_result = -1;
404 	const char *name_end = &name[strlen(name) + 1];
405 	const char *str = name;
406 
407 	cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len);
408 	if (cache_type == -1)
409 		return -EINVAL;
410 	str += len + 1;
411 
412 	if (str < name_end) {
413 		cache_op = parse_aliases(str, evsel__hw_cache_op,
414 					PERF_COUNT_HW_CACHE_OP_MAX, &len);
415 		if (cache_op >= 0) {
416 			if (!evsel__is_cache_op_valid(cache_type, cache_op))
417 				return -EINVAL;
418 			str += len + 1;
419 		} else {
420 			cache_result = parse_aliases(str, evsel__hw_cache_result,
421 						PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
422 			if (cache_result >= 0)
423 				str += len + 1;
424 		}
425 	}
426 	if (str < name_end) {
427 		if (cache_op < 0) {
428 			cache_op = parse_aliases(str, evsel__hw_cache_op,
429 						PERF_COUNT_HW_CACHE_OP_MAX, &len);
430 			if (cache_op >= 0) {
431 				if (!evsel__is_cache_op_valid(cache_type, cache_op))
432 					return -EINVAL;
433 			}
434 		} else if (cache_result < 0) {
435 			cache_result = parse_aliases(str, evsel__hw_cache_result,
436 						PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
437 		}
438 	}
439 
440 	/*
441 	 * Fall back to reads:
442 	 */
443 	if (cache_op == -1)
444 		cache_op = PERF_COUNT_HW_CACHE_OP_READ;
445 
446 	/*
447 	 * Fall back to accesses:
448 	 */
449 	if (cache_result == -1)
450 		cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
451 
452 	*config = cache_type | (cache_op << 8) | (cache_result << 16);
453 	if (perf_pmus__supports_extended_type())
454 		*config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT;
455 	return 0;
456 }
457 
458 /**
459  * parse_events__filter_pmu - returns false if a wildcard PMU should be
460  *                            considered, true if it should be filtered.
461  */
462 bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
463 			      const struct perf_pmu *pmu)
464 {
465 	if (parse_state->pmu_filter == NULL)
466 		return false;
467 
468 	return strcmp(parse_state->pmu_filter, pmu->name) != 0;
469 }
470 
471 static int parse_events_add_pmu(struct parse_events_state *parse_state,
472 				struct list_head *list, struct perf_pmu *pmu,
473 				const struct parse_events_terms *const_parsed_terms,
474 				struct evsel *first_wildcard_match, u64 alternate_hw_config);
475 
476 int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
477 			   struct parse_events_state *parse_state,
478 			   struct parse_events_terms *parsed_terms)
479 {
480 	struct perf_pmu *pmu = NULL;
481 	bool found_supported = false;
482 	const char *config_name = get_config_name(parsed_terms);
483 	const char *metric_id = get_config_metric_id(parsed_terms);
484 	struct perf_cpu_map *cpus = get_config_cpu(parsed_terms, parse_state->fake_pmu);
485 	int ret = 0;
486 	struct evsel *first_wildcard_match = NULL;
487 
488 	while ((pmu = perf_pmus__scan_for_event(pmu, name)) != NULL) {
489 		LIST_HEAD(config_terms);
490 		struct perf_event_attr attr;
491 
492 		if (parse_events__filter_pmu(parse_state, pmu))
493 			continue;
494 
495 		if (perf_pmu__have_event(pmu, name)) {
496 			/*
497 			 * The PMU has the event so add as not a legacy cache
498 			 * event.
499 			 */
500 			ret = parse_events_add_pmu(parse_state, list, pmu,
501 						   parsed_terms,
502 						   first_wildcard_match,
503 						   /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
504 			if (ret)
505 				goto out_err;
506 			if (first_wildcard_match == NULL)
507 				first_wildcard_match =
508 					container_of(list->prev, struct evsel, core.node);
509 			continue;
510 		}
511 
512 		if (!pmu->is_core) {
513 			/* Legacy cache events are only supported by core PMUs. */
514 			continue;
515 		}
516 
517 		memset(&attr, 0, sizeof(attr));
518 		attr.type = PERF_TYPE_HW_CACHE;
519 
520 		ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config);
521 		if (ret)
522 			return ret;
523 
524 		found_supported = true;
525 
526 		if (parsed_terms) {
527 			if (config_attr(&attr, parsed_terms, parse_state, config_term_common)) {
528 				ret = -EINVAL;
529 				goto out_err;
530 			}
531 			if (get_config_terms(parsed_terms, &config_terms)) {
532 				ret = -ENOMEM;
533 				goto out_err;
534 			}
535 		}
536 
537 		if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name,
538 				metric_id, pmu, &config_terms, first_wildcard_match,
539 				cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) == NULL)
540 			ret = -ENOMEM;
541 
542 		if (first_wildcard_match == NULL)
543 			first_wildcard_match = container_of(list->prev, struct evsel, core.node);
544 		free_config_terms(&config_terms);
545 		if (ret)
546 			goto out_err;
547 	}
548 out_err:
549 	perf_cpu_map__put(cpus);
550 	return found_supported ? 0 : -EINVAL;
551 }
552 
553 static void tracepoint_error(struct parse_events_error *e, int err,
554 			     const char *sys, const char *name, int column)
555 {
556 	const char *str;
557 	char help[BUFSIZ];
558 
559 	if (!e)
560 		return;
561 
562 	/*
563 	 * We get error directly from syscall errno ( > 0),
564 	 * or from encoded pointer's error ( < 0).
565 	 */
566 	err = abs(err);
567 
568 	switch (err) {
569 	case EACCES:
570 		str = "can't access trace events";
571 		break;
572 	case ENOENT:
573 		str = "unknown tracepoint";
574 		break;
575 	default:
576 		str = "failed to add tracepoint";
577 		break;
578 	}
579 
580 	tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
581 	parse_events_error__handle(e, column, strdup(str), strdup(help));
582 }
583 
584 static int add_tracepoint(struct parse_events_state *parse_state,
585 			  struct list_head *list,
586 			  const char *sys_name, const char *evt_name,
587 			  struct parse_events_error *err,
588 			  struct parse_events_terms *head_config, void *loc_)
589 {
590 	YYLTYPE *loc = loc_;
591 	struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++,
592 					       !parse_state->fake_tp);
593 
594 	if (IS_ERR(evsel)) {
595 		tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column);
596 		return PTR_ERR(evsel);
597 	}
598 
599 	if (head_config) {
600 		LIST_HEAD(config_terms);
601 
602 		if (get_config_terms(head_config, &config_terms))
603 			return -ENOMEM;
604 		list_splice(&config_terms, &evsel->config_terms);
605 	}
606 
607 	list_add_tail(&evsel->core.node, list);
608 	return 0;
609 }
610 
611 struct add_tracepoint_multi_args {
612 	struct parse_events_state *parse_state;
613 	struct list_head *list;
614 	const char *sys_glob;
615 	const char *evt_glob;
616 	struct parse_events_error *err;
617 	struct parse_events_terms *head_config;
618 	YYLTYPE *loc;
619 	int found;
620 };
621 
622 static int add_tracepoint_multi_event_cb(void *state, const char *sys_name, const char *evt_name)
623 {
624 	struct add_tracepoint_multi_args *args = state;
625 	int ret;
626 
627 	if (!strglobmatch(evt_name, args->evt_glob))
628 		return 0;
629 
630 	args->found++;
631 	ret = add_tracepoint(args->parse_state, args->list, sys_name, evt_name,
632 			     args->err, args->head_config, args->loc);
633 
634 	return ret;
635 }
636 
637 static int add_tracepoint_multi_event(struct add_tracepoint_multi_args *args, const char *sys_name)
638 {
639 	if (strpbrk(args->evt_glob, "*?") == NULL) {
640 		/* Not a glob. */
641 		args->found++;
642 		return add_tracepoint(args->parse_state, args->list, sys_name, args->evt_glob,
643 				      args->err, args->head_config, args->loc);
644 	}
645 
646 	return tp_pmu__for_each_tp_event(sys_name, args, add_tracepoint_multi_event_cb);
647 }
648 
649 static int add_tracepoint_multi_sys_cb(void *state, const char *sys_name)
650 {
651 	struct add_tracepoint_multi_args *args = state;
652 
653 	if (!strglobmatch(sys_name, args->sys_glob))
654 		return 0;
655 
656 	return add_tracepoint_multi_event(args, sys_name);
657 }
658 
659 static int add_tracepoint_multi_sys(struct parse_events_state *parse_state,
660 				    struct list_head *list,
661 				    const char *sys_glob, const char *evt_glob,
662 				    struct parse_events_error *err,
663 				    struct parse_events_terms *head_config, YYLTYPE *loc)
664 {
665 	struct add_tracepoint_multi_args args = {
666 		.parse_state = parse_state,
667 		.list = list,
668 		.sys_glob = sys_glob,
669 		.evt_glob = evt_glob,
670 		.err = err,
671 		.head_config = head_config,
672 		.loc = loc,
673 		.found = 0,
674 	};
675 	int ret;
676 
677 	if (strpbrk(sys_glob, "*?") == NULL) {
678 		/* Not a glob. */
679 		ret = add_tracepoint_multi_event(&args, sys_glob);
680 	} else {
681 		ret = tp_pmu__for_each_tp_sys(&args, add_tracepoint_multi_sys_cb);
682 	}
683 	if (args.found == 0) {
684 		tracepoint_error(err, ENOENT, sys_glob, evt_glob, loc->first_column);
685 		return -ENOENT;
686 	}
687 	return ret;
688 }
689 
690 size_t default_breakpoint_len(void)
691 {
692 #if defined(__i386__)
693 	static int len;
694 
695 	if (len == 0) {
696 		struct perf_env env = {};
697 
698 		perf_env__init(&env);
699 		len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long);
700 		perf_env__exit(&env);
701 	}
702 	return len;
703 #elif defined(__aarch64__)
704 	return 4;
705 #else
706 	return sizeof(long);
707 #endif
708 }
709 
710 static int
711 parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
712 {
713 	int i;
714 
715 	for (i = 0; i < 3; i++) {
716 		if (!type || !type[i])
717 			break;
718 
719 #define CHECK_SET_TYPE(bit)		\
720 do {					\
721 	if (attr->bp_type & bit)	\
722 		return -EINVAL;		\
723 	else				\
724 		attr->bp_type |= bit;	\
725 } while (0)
726 
727 		switch (type[i]) {
728 		case 'r':
729 			CHECK_SET_TYPE(HW_BREAKPOINT_R);
730 			break;
731 		case 'w':
732 			CHECK_SET_TYPE(HW_BREAKPOINT_W);
733 			break;
734 		case 'x':
735 			CHECK_SET_TYPE(HW_BREAKPOINT_X);
736 			break;
737 		default:
738 			return -EINVAL;
739 		}
740 	}
741 
742 #undef CHECK_SET_TYPE
743 
744 	if (!attr->bp_type) /* Default */
745 		attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
746 
747 	return 0;
748 }
749 
750 int parse_events_add_breakpoint(struct parse_events_state *parse_state,
751 				struct list_head *list,
752 				u64 addr, char *type, u64 len,
753 				struct parse_events_terms *head_config)
754 {
755 	struct perf_event_attr attr;
756 	LIST_HEAD(config_terms);
757 	const char *name;
758 
759 	memset(&attr, 0, sizeof(attr));
760 	attr.bp_addr = addr;
761 
762 	if (parse_breakpoint_type(type, &attr))
763 		return -EINVAL;
764 
765 	/* Provide some defaults if len is not specified */
766 	if (!len) {
767 		if (attr.bp_type == HW_BREAKPOINT_X)
768 			len = default_breakpoint_len();
769 		else
770 			len = HW_BREAKPOINT_LEN_4;
771 	}
772 
773 	attr.bp_len = len;
774 
775 	attr.type = PERF_TYPE_BREAKPOINT;
776 	attr.sample_period = 1;
777 
778 	if (head_config) {
779 		if (config_attr(&attr, head_config, parse_state, config_term_common))
780 			return -EINVAL;
781 
782 		if (get_config_terms(head_config, &config_terms))
783 			return -ENOMEM;
784 	}
785 
786 	name = get_config_name(head_config);
787 
788 	return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL,
789 			&config_terms, /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
790 }
791 
792 static int check_type_val(struct parse_events_term *term,
793 			  struct parse_events_error *err,
794 			  enum parse_events__term_val_type type)
795 {
796 	if (type == term->type_val)
797 		return 0;
798 
799 	if (err) {
800 		parse_events_error__handle(err, term->err_val,
801 					type == PARSE_EVENTS__TERM_TYPE_NUM
802 					? strdup("expected numeric value")
803 					: strdup("expected string value"),
804 					NULL);
805 	}
806 	return -EINVAL;
807 }
808 
809 static bool config_term_shrinked;
810 
811 const char *parse_events__term_type_str(enum parse_events__term_type term_type)
812 {
813 	/*
814 	 * Update according to parse-events.l
815 	 */
816 	static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
817 		[PARSE_EVENTS__TERM_TYPE_USER]			= "<sysfs term>",
818 		[PARSE_EVENTS__TERM_TYPE_CONFIG]		= "config",
819 		[PARSE_EVENTS__TERM_TYPE_CONFIG1]		= "config1",
820 		[PARSE_EVENTS__TERM_TYPE_CONFIG2]		= "config2",
821 		[PARSE_EVENTS__TERM_TYPE_CONFIG3]		= "config3",
822 		[PARSE_EVENTS__TERM_TYPE_NAME]			= "name",
823 		[PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD]		= "period",
824 		[PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ]		= "freq",
825 		[PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE]	= "branch_type",
826 		[PARSE_EVENTS__TERM_TYPE_TIME]			= "time",
827 		[PARSE_EVENTS__TERM_TYPE_CALLGRAPH]		= "call-graph",
828 		[PARSE_EVENTS__TERM_TYPE_STACKSIZE]		= "stack-size",
829 		[PARSE_EVENTS__TERM_TYPE_NOINHERIT]		= "no-inherit",
830 		[PARSE_EVENTS__TERM_TYPE_INHERIT]		= "inherit",
831 		[PARSE_EVENTS__TERM_TYPE_MAX_STACK]		= "max-stack",
832 		[PARSE_EVENTS__TERM_TYPE_MAX_EVENTS]		= "nr",
833 		[PARSE_EVENTS__TERM_TYPE_OVERWRITE]		= "overwrite",
834 		[PARSE_EVENTS__TERM_TYPE_NOOVERWRITE]		= "no-overwrite",
835 		[PARSE_EVENTS__TERM_TYPE_DRV_CFG]		= "driver-config",
836 		[PARSE_EVENTS__TERM_TYPE_PERCORE]		= "percore",
837 		[PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT]		= "aux-output",
838 		[PARSE_EVENTS__TERM_TYPE_AUX_ACTION]		= "aux-action",
839 		[PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE]	= "aux-sample-size",
840 		[PARSE_EVENTS__TERM_TYPE_METRIC_ID]		= "metric-id",
841 		[PARSE_EVENTS__TERM_TYPE_RAW]                   = "raw",
842 		[PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE]          = "legacy-cache",
843 		[PARSE_EVENTS__TERM_TYPE_HARDWARE]              = "hardware",
844 		[PARSE_EVENTS__TERM_TYPE_CPU]			= "cpu",
845 	};
846 	if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR)
847 		return "unknown term";
848 
849 	return config_term_names[term_type];
850 }
851 
852 static bool
853 config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err)
854 {
855 	char *err_str;
856 
857 	if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
858 		parse_events_error__handle(err, -1,
859 					strdup("Invalid term_type"), NULL);
860 		return false;
861 	}
862 	if (!config_term_shrinked)
863 		return true;
864 
865 	switch (term_type) {
866 	case PARSE_EVENTS__TERM_TYPE_CONFIG:
867 	case PARSE_EVENTS__TERM_TYPE_CONFIG1:
868 	case PARSE_EVENTS__TERM_TYPE_CONFIG2:
869 	case PARSE_EVENTS__TERM_TYPE_CONFIG3:
870 	case PARSE_EVENTS__TERM_TYPE_NAME:
871 	case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
872 	case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
873 	case PARSE_EVENTS__TERM_TYPE_PERCORE:
874 	case PARSE_EVENTS__TERM_TYPE_CPU:
875 		return true;
876 	case PARSE_EVENTS__TERM_TYPE_USER:
877 	case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
878 	case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
879 	case PARSE_EVENTS__TERM_TYPE_TIME:
880 	case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
881 	case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
882 	case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
883 	case PARSE_EVENTS__TERM_TYPE_INHERIT:
884 	case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
885 	case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
886 	case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
887 	case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
888 	case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
889 	case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
890 	case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
891 	case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
892 	case PARSE_EVENTS__TERM_TYPE_RAW:
893 	case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
894 	case PARSE_EVENTS__TERM_TYPE_HARDWARE:
895 	default:
896 		if (!err)
897 			return false;
898 
899 		/* term_type is validated so indexing is safe */
900 		if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
901 			     parse_events__term_type_str(term_type)) >= 0)
902 			parse_events_error__handle(err, -1, err_str, NULL);
903 		return false;
904 	}
905 }
906 
907 void parse_events__shrink_config_terms(void)
908 {
909 	config_term_shrinked = true;
910 }
911 
912 static int config_term_common(struct perf_event_attr *attr,
913 			      struct parse_events_term *term,
914 			      struct parse_events_state *parse_state)
915 {
916 #define CHECK_TYPE_VAL(type)								\
917 do {											\
918 	if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_ ## type))	\
919 		return -EINVAL;								\
920 } while (0)
921 
922 	switch (term->type_term) {
923 	case PARSE_EVENTS__TERM_TYPE_CONFIG:
924 		CHECK_TYPE_VAL(NUM);
925 		attr->config = term->val.num;
926 		break;
927 	case PARSE_EVENTS__TERM_TYPE_CONFIG1:
928 		CHECK_TYPE_VAL(NUM);
929 		attr->config1 = term->val.num;
930 		break;
931 	case PARSE_EVENTS__TERM_TYPE_CONFIG2:
932 		CHECK_TYPE_VAL(NUM);
933 		attr->config2 = term->val.num;
934 		break;
935 	case PARSE_EVENTS__TERM_TYPE_CONFIG3:
936 		CHECK_TYPE_VAL(NUM);
937 		attr->config3 = term->val.num;
938 		break;
939 	case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
940 		CHECK_TYPE_VAL(NUM);
941 		break;
942 	case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
943 		CHECK_TYPE_VAL(NUM);
944 		break;
945 	case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
946 		CHECK_TYPE_VAL(STR);
947 		if (strcmp(term->val.str, "no") &&
948 		    parse_branch_str(term->val.str,
949 				    &attr->branch_sample_type)) {
950 			parse_events_error__handle(parse_state->error, term->err_val,
951 					strdup("invalid branch sample type"),
952 					NULL);
953 			return -EINVAL;
954 		}
955 		break;
956 	case PARSE_EVENTS__TERM_TYPE_TIME:
957 		CHECK_TYPE_VAL(NUM);
958 		if (term->val.num > 1) {
959 			parse_events_error__handle(parse_state->error, term->err_val,
960 						strdup("expected 0 or 1"),
961 						NULL);
962 			return -EINVAL;
963 		}
964 		break;
965 	case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
966 		CHECK_TYPE_VAL(STR);
967 		break;
968 	case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
969 		CHECK_TYPE_VAL(NUM);
970 		break;
971 	case PARSE_EVENTS__TERM_TYPE_INHERIT:
972 		CHECK_TYPE_VAL(NUM);
973 		break;
974 	case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
975 		CHECK_TYPE_VAL(NUM);
976 		break;
977 	case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
978 		CHECK_TYPE_VAL(NUM);
979 		break;
980 	case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
981 		CHECK_TYPE_VAL(NUM);
982 		break;
983 	case PARSE_EVENTS__TERM_TYPE_NAME:
984 		CHECK_TYPE_VAL(STR);
985 		break;
986 	case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
987 		CHECK_TYPE_VAL(STR);
988 		break;
989 	case PARSE_EVENTS__TERM_TYPE_RAW:
990 		CHECK_TYPE_VAL(STR);
991 		break;
992 	case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
993 		CHECK_TYPE_VAL(NUM);
994 		break;
995 	case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
996 		CHECK_TYPE_VAL(NUM);
997 		break;
998 	case PARSE_EVENTS__TERM_TYPE_PERCORE:
999 		CHECK_TYPE_VAL(NUM);
1000 		if ((unsigned int)term->val.num > 1) {
1001 			parse_events_error__handle(parse_state->error, term->err_val,
1002 						strdup("expected 0 or 1"),
1003 						NULL);
1004 			return -EINVAL;
1005 		}
1006 		break;
1007 	case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1008 		CHECK_TYPE_VAL(NUM);
1009 		break;
1010 	case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1011 		CHECK_TYPE_VAL(STR);
1012 		break;
1013 	case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1014 		CHECK_TYPE_VAL(NUM);
1015 		if (term->val.num > UINT_MAX) {
1016 			parse_events_error__handle(parse_state->error, term->err_val,
1017 						strdup("too big"),
1018 						NULL);
1019 			return -EINVAL;
1020 		}
1021 		break;
1022 	case PARSE_EVENTS__TERM_TYPE_CPU: {
1023 		struct perf_cpu_map *map;
1024 
1025 		if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
1026 			if (term->val.num >= (u64)cpu__max_present_cpu().cpu) {
1027 				parse_events_error__handle(parse_state->error, term->err_val,
1028 							strdup("too big"),
1029 							/*help=*/NULL);
1030 				return -EINVAL;
1031 			}
1032 			break;
1033 		}
1034 		assert(term->type_val == PARSE_EVENTS__TERM_TYPE_STR);
1035 		if (perf_pmus__find(term->val.str) != NULL)
1036 			break;
1037 
1038 		map = perf_cpu_map__new(term->val.str);
1039 		if (!map && !parse_state->fake_pmu) {
1040 			parse_events_error__handle(parse_state->error, term->err_val,
1041 						   strdup("not a valid PMU or CPU number"),
1042 						   /*help=*/NULL);
1043 			return -EINVAL;
1044 		}
1045 		perf_cpu_map__put(map);
1046 		break;
1047 	}
1048 	case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1049 	case PARSE_EVENTS__TERM_TYPE_USER:
1050 	case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1051 	case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1052 	default:
1053 		parse_events_error__handle(parse_state->error, term->err_term,
1054 					strdup(parse_events__term_type_str(term->type_term)),
1055 					parse_events_formats_error_string(NULL));
1056 		return -EINVAL;
1057 	}
1058 
1059 	/*
1060 	 * Check term availability after basic checking so
1061 	 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
1062 	 *
1063 	 * If check availability at the entry of this function,
1064 	 * user will see "'<sysfs term>' is not usable in 'perf stat'"
1065 	 * if an invalid config term is provided for legacy events
1066 	 * (for example, instructions/badterm/...), which is confusing.
1067 	 */
1068 	if (!config_term_avail(term->type_term, parse_state->error))
1069 		return -EINVAL;
1070 	return 0;
1071 #undef CHECK_TYPE_VAL
1072 }
1073 
1074 static int config_term_pmu(struct perf_event_attr *attr,
1075 			   struct parse_events_term *term,
1076 			   struct parse_events_state *parse_state)
1077 {
1078 	if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) {
1079 		struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
1080 
1081 		if (!pmu) {
1082 			char *err_str;
1083 
1084 			if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
1085 				parse_events_error__handle(parse_state->error, term->err_term,
1086 							   err_str, /*help=*/NULL);
1087 			return -EINVAL;
1088 		}
1089 		/*
1090 		 * Rewrite the PMU event to a legacy cache one unless the PMU
1091 		 * doesn't support legacy cache events or the event is present
1092 		 * within the PMU.
1093 		 */
1094 		if (perf_pmu__supports_legacy_cache(pmu) &&
1095 		    !perf_pmu__have_event(pmu, term->config)) {
1096 			attr->type = PERF_TYPE_HW_CACHE;
1097 			return parse_events__decode_legacy_cache(term->config, pmu->type,
1098 								 &attr->config);
1099 		} else {
1100 			term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
1101 			term->no_value = true;
1102 		}
1103 	}
1104 	if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) {
1105 		struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
1106 
1107 		if (!pmu) {
1108 			char *err_str;
1109 
1110 			if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
1111 				parse_events_error__handle(parse_state->error, term->err_term,
1112 							   err_str, /*help=*/NULL);
1113 			return -EINVAL;
1114 		}
1115 		/*
1116 		 * If the PMU has a sysfs or json event prefer it over
1117 		 * legacy. ARM requires this.
1118 		 */
1119 		if (perf_pmu__have_event(pmu, term->config)) {
1120 			term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
1121 			term->no_value = true;
1122 			term->alternate_hw_config = true;
1123 		} else {
1124 			attr->type = PERF_TYPE_HARDWARE;
1125 			attr->config = term->val.num;
1126 			if (perf_pmus__supports_extended_type())
1127 				attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
1128 		}
1129 		return 0;
1130 	}
1131 	if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
1132 	    term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) {
1133 		/*
1134 		 * Always succeed for sysfs terms, as we dont know
1135 		 * at this point what type they need to have.
1136 		 */
1137 		return 0;
1138 	}
1139 	return config_term_common(attr, term, parse_state);
1140 }
1141 
1142 static int config_term_tracepoint(struct perf_event_attr *attr,
1143 				  struct parse_events_term *term,
1144 				  struct parse_events_state *parse_state)
1145 {
1146 	switch (term->type_term) {
1147 	case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1148 	case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1149 	case PARSE_EVENTS__TERM_TYPE_INHERIT:
1150 	case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1151 	case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1152 	case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1153 	case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1154 	case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1155 	case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1156 	case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1157 	case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1158 		return config_term_common(attr, term, parse_state);
1159 	case PARSE_EVENTS__TERM_TYPE_USER:
1160 	case PARSE_EVENTS__TERM_TYPE_CONFIG:
1161 	case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1162 	case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1163 	case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1164 	case PARSE_EVENTS__TERM_TYPE_NAME:
1165 	case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1166 	case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1167 	case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1168 	case PARSE_EVENTS__TERM_TYPE_TIME:
1169 	case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1170 	case PARSE_EVENTS__TERM_TYPE_PERCORE:
1171 	case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1172 	case PARSE_EVENTS__TERM_TYPE_RAW:
1173 	case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1174 	case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1175 	case PARSE_EVENTS__TERM_TYPE_CPU:
1176 	default:
1177 		parse_events_error__handle(parse_state->error, term->err_term,
1178 					strdup(parse_events__term_type_str(term->type_term)),
1179 					strdup("valid terms: call-graph,stack-size\n")
1180 				);
1181 		return -EINVAL;
1182 	}
1183 
1184 	return 0;
1185 }
1186 
1187 static int config_attr(struct perf_event_attr *attr,
1188 		       const struct parse_events_terms *head,
1189 		       struct parse_events_state *parse_state,
1190 		       config_term_func_t config_term)
1191 {
1192 	struct parse_events_term *term;
1193 
1194 	list_for_each_entry(term, &head->terms, list)
1195 		if (config_term(attr, term, parse_state))
1196 			return -EINVAL;
1197 
1198 	return 0;
1199 }
1200 
1201 static int get_config_terms(const struct parse_events_terms *head_config,
1202 			    struct list_head *head_terms)
1203 {
1204 #define ADD_CONFIG_TERM(__type, __weak)				\
1205 	struct evsel_config_term *__t;			\
1206 								\
1207 	__t = zalloc(sizeof(*__t));				\
1208 	if (!__t)						\
1209 		return -ENOMEM;					\
1210 								\
1211 	INIT_LIST_HEAD(&__t->list);				\
1212 	__t->type       = EVSEL__CONFIG_TERM_ ## __type;	\
1213 	__t->weak	= __weak;				\
1214 	list_add_tail(&__t->list, head_terms)
1215 
1216 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak)	\
1217 do {								\
1218 	ADD_CONFIG_TERM(__type, __weak);			\
1219 	__t->val.__name = __val;				\
1220 } while (0)
1221 
1222 #define ADD_CONFIG_TERM_STR(__type, __val, __weak)		\
1223 do {								\
1224 	ADD_CONFIG_TERM(__type, __weak);			\
1225 	__t->val.str = strdup(__val);				\
1226 	if (!__t->val.str) {					\
1227 		zfree(&__t);					\
1228 		return -ENOMEM;					\
1229 	}							\
1230 	__t->free_str = true;					\
1231 } while (0)
1232 
1233 	struct parse_events_term *term;
1234 
1235 	list_for_each_entry(term, &head_config->terms, list) {
1236 		switch (term->type_term) {
1237 		case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1238 			ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
1239 			break;
1240 		case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1241 			ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
1242 			break;
1243 		case PARSE_EVENTS__TERM_TYPE_TIME:
1244 			ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
1245 			break;
1246 		case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1247 			ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
1248 			break;
1249 		case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1250 			ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
1251 			break;
1252 		case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1253 			ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
1254 					    term->val.num, term->weak);
1255 			break;
1256 		case PARSE_EVENTS__TERM_TYPE_INHERIT:
1257 			ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1258 					    term->val.num ? 1 : 0, term->weak);
1259 			break;
1260 		case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1261 			ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1262 					    term->val.num ? 0 : 1, term->weak);
1263 			break;
1264 		case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1265 			ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
1266 					    term->val.num, term->weak);
1267 			break;
1268 		case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1269 			ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
1270 					    term->val.num, term->weak);
1271 			break;
1272 		case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1273 			ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1274 					    term->val.num ? 1 : 0, term->weak);
1275 			break;
1276 		case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1277 			ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1278 					    term->val.num ? 0 : 1, term->weak);
1279 			break;
1280 		case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1281 			ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
1282 			break;
1283 		case PARSE_EVENTS__TERM_TYPE_PERCORE:
1284 			ADD_CONFIG_TERM_VAL(PERCORE, percore,
1285 					    term->val.num ? true : false, term->weak);
1286 			break;
1287 		case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1288 			ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
1289 					    term->val.num ? 1 : 0, term->weak);
1290 			break;
1291 		case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1292 			ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak);
1293 			break;
1294 		case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1295 			ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
1296 					    term->val.num, term->weak);
1297 			break;
1298 		case PARSE_EVENTS__TERM_TYPE_USER:
1299 		case PARSE_EVENTS__TERM_TYPE_CONFIG:
1300 		case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1301 		case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1302 		case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1303 		case PARSE_EVENTS__TERM_TYPE_NAME:
1304 		case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1305 		case PARSE_EVENTS__TERM_TYPE_RAW:
1306 		case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1307 		case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1308 		case PARSE_EVENTS__TERM_TYPE_CPU:
1309 		default:
1310 			break;
1311 		}
1312 	}
1313 	return 0;
1314 }
1315 
1316 /*
1317  * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
1318  * each bit of attr->config that the user has changed.
1319  */
1320 static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config,
1321 			   struct list_head *head_terms)
1322 {
1323 	struct parse_events_term *term;
1324 	u64 bits = 0;
1325 	int type;
1326 
1327 	list_for_each_entry(term, &head_config->terms, list) {
1328 		switch (term->type_term) {
1329 		case PARSE_EVENTS__TERM_TYPE_USER:
1330 			type = perf_pmu__format_type(pmu, term->config);
1331 			if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
1332 				continue;
1333 			bits |= perf_pmu__format_bits(pmu, term->config);
1334 			break;
1335 		case PARSE_EVENTS__TERM_TYPE_CONFIG:
1336 			bits = ~(u64)0;
1337 			break;
1338 		case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1339 		case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1340 		case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1341 		case PARSE_EVENTS__TERM_TYPE_NAME:
1342 		case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1343 		case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1344 		case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1345 		case PARSE_EVENTS__TERM_TYPE_TIME:
1346 		case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1347 		case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1348 		case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1349 		case PARSE_EVENTS__TERM_TYPE_INHERIT:
1350 		case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1351 		case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1352 		case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1353 		case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1354 		case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1355 		case PARSE_EVENTS__TERM_TYPE_PERCORE:
1356 		case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1357 		case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1358 		case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1359 		case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1360 		case PARSE_EVENTS__TERM_TYPE_RAW:
1361 		case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1362 		case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1363 		case PARSE_EVENTS__TERM_TYPE_CPU:
1364 		default:
1365 			break;
1366 		}
1367 	}
1368 
1369 	if (bits)
1370 		ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
1371 
1372 #undef ADD_CONFIG_TERM
1373 	return 0;
1374 }
1375 
1376 int parse_events_add_tracepoint(struct parse_events_state *parse_state,
1377 				struct list_head *list,
1378 				const char *sys, const char *event,
1379 				struct parse_events_error *err,
1380 				struct parse_events_terms *head_config, void *loc_)
1381 {
1382 	YYLTYPE *loc = loc_;
1383 
1384 	if (head_config) {
1385 		struct perf_event_attr attr;
1386 
1387 		if (config_attr(&attr, head_config, parse_state, config_term_tracepoint))
1388 			return -EINVAL;
1389 	}
1390 
1391 	return add_tracepoint_multi_sys(parse_state, list, sys, event,
1392 					err, head_config, loc);
1393 }
1394 
1395 static int __parse_events_add_numeric(struct parse_events_state *parse_state,
1396 				struct list_head *list,
1397 				struct perf_pmu *pmu, u32 type, u32 extended_type,
1398 				u64 config, const struct parse_events_terms *head_config,
1399 				struct evsel *first_wildcard_match)
1400 {
1401 	struct perf_event_attr attr;
1402 	LIST_HEAD(config_terms);
1403 	const char *name, *metric_id;
1404 	struct perf_cpu_map *cpus;
1405 	int ret;
1406 
1407 	memset(&attr, 0, sizeof(attr));
1408 	attr.type = type;
1409 	attr.config = config;
1410 	if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) {
1411 		assert(perf_pmus__supports_extended_type());
1412 		attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT;
1413 	}
1414 
1415 	if (head_config) {
1416 		if (config_attr(&attr, head_config, parse_state, config_term_common))
1417 			return -EINVAL;
1418 
1419 		if (get_config_terms(head_config, &config_terms))
1420 			return -ENOMEM;
1421 	}
1422 
1423 	name = get_config_name(head_config);
1424 	metric_id = get_config_metric_id(head_config);
1425 	cpus = get_config_cpu(head_config, parse_state->fake_pmu);
1426 	ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name,
1427 			metric_id, pmu, &config_terms, first_wildcard_match,
1428 			cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) ? 0 : -ENOMEM;
1429 	perf_cpu_map__put(cpus);
1430 	free_config_terms(&config_terms);
1431 	return ret;
1432 }
1433 
1434 int parse_events_add_numeric(struct parse_events_state *parse_state,
1435 			     struct list_head *list,
1436 			     u32 type, u64 config,
1437 			     const struct parse_events_terms *head_config,
1438 			     bool wildcard)
1439 {
1440 	struct perf_pmu *pmu = NULL;
1441 	bool found_supported = false;
1442 
1443 	/* Wildcards on numeric values are only supported by core PMUs. */
1444 	if (wildcard && perf_pmus__supports_extended_type()) {
1445 		struct evsel *first_wildcard_match = NULL;
1446 		while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
1447 			int ret;
1448 
1449 			found_supported = true;
1450 			if (parse_events__filter_pmu(parse_state, pmu))
1451 				continue;
1452 
1453 			ret = __parse_events_add_numeric(parse_state, list, pmu,
1454 							 type, pmu->type,
1455 							 config, head_config,
1456 							 first_wildcard_match);
1457 			if (ret)
1458 				return ret;
1459 			if (first_wildcard_match == NULL)
1460 				first_wildcard_match =
1461 					container_of(list->prev, struct evsel, core.node);
1462 		}
1463 		if (found_supported)
1464 			return 0;
1465 	}
1466 	return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type),
1467 					type, /*extended_type=*/0, config, head_config,
1468 					/*first_wildcard_match=*/NULL);
1469 }
1470 
1471 static bool config_term_percore(struct list_head *config_terms)
1472 {
1473 	struct evsel_config_term *term;
1474 
1475 	list_for_each_entry(term, config_terms, list) {
1476 		if (term->type == EVSEL__CONFIG_TERM_PERCORE)
1477 			return term->val.percore;
1478 	}
1479 
1480 	return false;
1481 }
1482 
1483 static int parse_events_add_pmu(struct parse_events_state *parse_state,
1484 				struct list_head *list, struct perf_pmu *pmu,
1485 				const struct parse_events_terms *const_parsed_terms,
1486 				struct evsel *first_wildcard_match, u64 alternate_hw_config)
1487 {
1488 	struct perf_event_attr attr;
1489 	struct perf_pmu_info info;
1490 	struct evsel *evsel;
1491 	struct parse_events_error *err = parse_state->error;
1492 	LIST_HEAD(config_terms);
1493 	struct parse_events_terms parsed_terms;
1494 	bool alias_rewrote_terms = false;
1495 	struct perf_cpu_map *term_cpu = NULL;
1496 
1497 	if (verbose > 1) {
1498 		struct strbuf sb;
1499 
1500 		strbuf_init(&sb, /*hint=*/ 0);
1501 		if (pmu->selectable && const_parsed_terms &&
1502 		    list_empty(&const_parsed_terms->terms)) {
1503 			strbuf_addf(&sb, "%s//", pmu->name);
1504 		} else {
1505 			strbuf_addf(&sb, "%s/", pmu->name);
1506 			parse_events_terms__to_strbuf(const_parsed_terms, &sb);
1507 			strbuf_addch(&sb, '/');
1508 		}
1509 		fprintf(stderr, "Attempt to add: %s\n", sb.buf);
1510 		strbuf_release(&sb);
1511 	}
1512 
1513 	memset(&attr, 0, sizeof(attr));
1514 	if (pmu->perf_event_attr_init_default)
1515 		pmu->perf_event_attr_init_default(pmu, &attr);
1516 
1517 	attr.type = pmu->type;
1518 
1519 	if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) {
1520 		evsel = __add_event(list, &parse_state->idx, &attr,
1521 				    /*init_attr=*/true, /*name=*/NULL,
1522 				    /*metric_id=*/NULL, pmu,
1523 				    /*config_terms=*/NULL, first_wildcard_match,
1524 				    /*cpu_list=*/NULL, alternate_hw_config);
1525 		return evsel ? 0 : -ENOMEM;
1526 	}
1527 
1528 	parse_events_terms__init(&parsed_terms);
1529 	if (const_parsed_terms) {
1530 		int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
1531 
1532 		if (ret)
1533 			return ret;
1534 	}
1535 	fix_raw(&parsed_terms, pmu);
1536 
1537 	/* Configure attr/terms with a known PMU, this will set hardcoded terms. */
1538 	if (config_attr(&attr, &parsed_terms, parse_state, config_term_pmu)) {
1539 		parse_events_terms__exit(&parsed_terms);
1540 		return -EINVAL;
1541 	}
1542 
1543 	/* Look for event names in the terms and rewrite into format based terms. */
1544 	if (perf_pmu__check_alias(pmu, &parsed_terms,
1545 				  &info, &alias_rewrote_terms,
1546 				  &alternate_hw_config, err)) {
1547 		parse_events_terms__exit(&parsed_terms);
1548 		return -EINVAL;
1549 	}
1550 
1551 	if (verbose > 1) {
1552 		struct strbuf sb;
1553 
1554 		strbuf_init(&sb, /*hint=*/ 0);
1555 		parse_events_terms__to_strbuf(&parsed_terms, &sb);
1556 		fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf);
1557 		strbuf_release(&sb);
1558 	}
1559 
1560 	/* Configure attr/terms again if an alias was expanded. */
1561 	if (alias_rewrote_terms &&
1562 	    config_attr(&attr, &parsed_terms, parse_state, config_term_pmu)) {
1563 		parse_events_terms__exit(&parsed_terms);
1564 		return -EINVAL;
1565 	}
1566 
1567 	if (get_config_terms(&parsed_terms, &config_terms)) {
1568 		parse_events_terms__exit(&parsed_terms);
1569 		return -ENOMEM;
1570 	}
1571 
1572 	/*
1573 	 * When using default config, record which bits of attr->config were
1574 	 * changed by the user.
1575 	 */
1576 	if (pmu->perf_event_attr_init_default &&
1577 	    get_config_chgs(pmu, &parsed_terms, &config_terms)) {
1578 		parse_events_terms__exit(&parsed_terms);
1579 		return -ENOMEM;
1580 	}
1581 
1582 	/* Skip configuring hard coded terms that were applied by config_attr. */
1583 	if (perf_pmu__config(pmu, &attr, &parsed_terms, /*apply_hardcoded=*/false,
1584 			     parse_state->error)) {
1585 		free_config_terms(&config_terms);
1586 		parse_events_terms__exit(&parsed_terms);
1587 		return -EINVAL;
1588 	}
1589 
1590 	term_cpu = get_config_cpu(&parsed_terms, parse_state->fake_pmu);
1591 	evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
1592 			    get_config_name(&parsed_terms),
1593 			    get_config_metric_id(&parsed_terms), pmu,
1594 			    &config_terms, first_wildcard_match, term_cpu, alternate_hw_config);
1595 	perf_cpu_map__put(term_cpu);
1596 	if (!evsel) {
1597 		parse_events_terms__exit(&parsed_terms);
1598 		return -ENOMEM;
1599 	}
1600 
1601 	if (evsel->name)
1602 		evsel->use_config_name = true;
1603 
1604 	evsel->percore = config_term_percore(&evsel->config_terms);
1605 
1606 	parse_events_terms__exit(&parsed_terms);
1607 	free((char *)evsel->unit);
1608 	evsel->unit = strdup(info.unit);
1609 	evsel->scale = info.scale;
1610 	evsel->per_pkg = info.per_pkg;
1611 	evsel->snapshot = info.snapshot;
1612 	evsel->retirement_latency.mean = info.retirement_latency_mean;
1613 	evsel->retirement_latency.min = info.retirement_latency_min;
1614 	evsel->retirement_latency.max = info.retirement_latency_max;
1615 
1616 	return 0;
1617 }
1618 
1619 int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
1620 			       const char *event_name, u64 hw_config,
1621 			       const struct parse_events_terms *const_parsed_terms,
1622 			       struct list_head **listp, void *loc_)
1623 {
1624 	struct parse_events_term *term;
1625 	struct list_head *list = NULL;
1626 	struct perf_pmu *pmu = NULL;
1627 	YYLTYPE *loc = loc_;
1628 	int ok = 0;
1629 	const char *config;
1630 	struct parse_events_terms parsed_terms;
1631 	struct evsel *first_wildcard_match = NULL;
1632 
1633 	*listp = NULL;
1634 
1635 	parse_events_terms__init(&parsed_terms);
1636 	if (const_parsed_terms) {
1637 		int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
1638 
1639 		if (ret)
1640 			return ret;
1641 	}
1642 
1643 	config = strdup(event_name);
1644 	if (!config)
1645 		goto out_err;
1646 
1647 	if (parse_events_term__num(&term,
1648 				   PARSE_EVENTS__TERM_TYPE_USER,
1649 				   config, /*num=*/1, /*novalue=*/true,
1650 				   loc, /*loc_val=*/NULL) < 0) {
1651 		zfree(&config);
1652 		goto out_err;
1653 	}
1654 	list_add_tail(&term->list, &parsed_terms.terms);
1655 
1656 	/* Add it for all PMUs that support the alias */
1657 	list = malloc(sizeof(struct list_head));
1658 	if (!list)
1659 		goto out_err;
1660 
1661 	INIT_LIST_HEAD(list);
1662 
1663 	while ((pmu = perf_pmus__scan_for_event(pmu, event_name)) != NULL) {
1664 
1665 		if (parse_events__filter_pmu(parse_state, pmu))
1666 			continue;
1667 
1668 		if (!perf_pmu__have_event(pmu, event_name))
1669 			continue;
1670 
1671 		if (!parse_events_add_pmu(parse_state, list, pmu,
1672 					  &parsed_terms, first_wildcard_match, hw_config)) {
1673 			struct strbuf sb;
1674 
1675 			strbuf_init(&sb, /*hint=*/ 0);
1676 			parse_events_terms__to_strbuf(&parsed_terms, &sb);
1677 			pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf);
1678 			strbuf_release(&sb);
1679 			ok++;
1680 		}
1681 		if (first_wildcard_match == NULL)
1682 			first_wildcard_match = container_of(list->prev, struct evsel, core.node);
1683 	}
1684 
1685 	if (parse_state->fake_pmu) {
1686 		if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms,
1687 					 first_wildcard_match, hw_config)) {
1688 			struct strbuf sb;
1689 
1690 			strbuf_init(&sb, /*hint=*/ 0);
1691 			parse_events_terms__to_strbuf(&parsed_terms, &sb);
1692 			pr_debug("%s -> fake/%s/\n", event_name, sb.buf);
1693 			strbuf_release(&sb);
1694 			ok++;
1695 		}
1696 	}
1697 
1698 out_err:
1699 	parse_events_terms__exit(&parsed_terms);
1700 	if (ok)
1701 		*listp = list;
1702 	else
1703 		free(list);
1704 
1705 	return ok ? 0 : -1;
1706 }
1707 
1708 int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state,
1709 					const char *event_or_pmu,
1710 					const struct parse_events_terms *const_parsed_terms,
1711 					struct list_head **listp,
1712 					void *loc_)
1713 {
1714 	YYLTYPE *loc = loc_;
1715 	struct perf_pmu *pmu;
1716 	int ok = 0;
1717 	char *help;
1718 	struct evsel *first_wildcard_match = NULL;
1719 
1720 	*listp = malloc(sizeof(**listp));
1721 	if (!*listp)
1722 		return -ENOMEM;
1723 
1724 	INIT_LIST_HEAD(*listp);
1725 
1726 	/* Attempt to add to list assuming event_or_pmu is a PMU name. */
1727 	pmu = perf_pmus__find(event_or_pmu);
1728 	if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms,
1729 					 first_wildcard_match,
1730 					 /*alternate_hw_config=*/PERF_COUNT_HW_MAX))
1731 		return 0;
1732 
1733 	if (parse_state->fake_pmu) {
1734 		if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(),
1735 					  const_parsed_terms,
1736 					  first_wildcard_match,
1737 					  /*alternate_hw_config=*/PERF_COUNT_HW_MAX))
1738 			return 0;
1739 	}
1740 
1741 	pmu = NULL;
1742 	/* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */
1743 	while ((pmu = perf_pmus__scan_matching_wildcard(pmu, event_or_pmu)) != NULL) {
1744 
1745 		if (parse_events__filter_pmu(parse_state, pmu))
1746 			continue;
1747 
1748 		if (!parse_events_add_pmu(parse_state, *listp, pmu,
1749 					  const_parsed_terms,
1750 					  first_wildcard_match,
1751 					  /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) {
1752 			ok++;
1753 			parse_state->wild_card_pmus = true;
1754 		}
1755 		if (first_wildcard_match == NULL) {
1756 			first_wildcard_match =
1757 				container_of((*listp)->prev, struct evsel, core.node);
1758 		}
1759 	}
1760 	if (ok)
1761 		return 0;
1762 
1763 	/* Failure to add, assume event_or_pmu is an event name. */
1764 	zfree(listp);
1765 	if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, PERF_COUNT_HW_MAX,
1766 					const_parsed_terms, listp, loc))
1767 		return 0;
1768 
1769 	if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0)
1770 		help = NULL;
1771 	parse_events_error__handle(parse_state->error, loc->first_column,
1772 				strdup("Bad event or PMU"),
1773 				help);
1774 	zfree(listp);
1775 	return -EINVAL;
1776 }
1777 
1778 void parse_events__set_leader(char *name, struct list_head *list)
1779 {
1780 	struct evsel *leader;
1781 
1782 	if (list_empty(list)) {
1783 		WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1784 		return;
1785 	}
1786 
1787 	leader = list_first_entry(list, struct evsel, core.node);
1788 	__perf_evlist__set_leader(list, &leader->core);
1789 	zfree(&leader->group_name);
1790 	leader->group_name = name;
1791 }
1792 
1793 static int parse_events__modifier_list(struct parse_events_state *parse_state,
1794 				       YYLTYPE *loc,
1795 				       struct list_head *list,
1796 				       struct parse_events_modifier mod,
1797 				       bool group)
1798 {
1799 	struct evsel *evsel;
1800 
1801 	if (!group && mod.weak) {
1802 		parse_events_error__handle(parse_state->error, loc->first_column,
1803 					   strdup("Weak modifier is for use with groups"), NULL);
1804 		return -EINVAL;
1805 	}
1806 
1807 	__evlist__for_each_entry(list, evsel) {
1808 		/* Translate modifiers into the equivalent evsel excludes. */
1809 		int eu = group ? evsel->core.attr.exclude_user : 0;
1810 		int ek = group ? evsel->core.attr.exclude_kernel : 0;
1811 		int eh = group ? evsel->core.attr.exclude_hv : 0;
1812 		int eH = group ? evsel->core.attr.exclude_host : 0;
1813 		int eG = group ? evsel->core.attr.exclude_guest : 0;
1814 		int exclude = eu | ek | eh;
1815 		int exclude_GH = eG | eH;
1816 
1817 		if (mod.user) {
1818 			if (!exclude)
1819 				exclude = eu = ek = eh = 1;
1820 			eu = 0;
1821 		}
1822 		if (mod.kernel) {
1823 			if (!exclude)
1824 				exclude = eu = ek = eh = 1;
1825 			ek = 0;
1826 		}
1827 		if (mod.hypervisor) {
1828 			if (!exclude)
1829 				exclude = eu = ek = eh = 1;
1830 			eh = 0;
1831 		}
1832 		if (mod.guest) {
1833 			if (!exclude_GH)
1834 				exclude_GH = eG = eH = 1;
1835 			eG = 0;
1836 		}
1837 		if (mod.host) {
1838 			if (!exclude_GH)
1839 				exclude_GH = eG = eH = 1;
1840 			eH = 0;
1841 		}
1842 		if (!exclude_GH && exclude_GH_default) {
1843 			if (perf_host)
1844 				eG = 1;
1845 			else if (perf_guest)
1846 				eH = 1;
1847 		}
1848 
1849 		evsel->core.attr.exclude_user   = eu;
1850 		evsel->core.attr.exclude_kernel = ek;
1851 		evsel->core.attr.exclude_hv     = eh;
1852 		evsel->core.attr.exclude_host   = eH;
1853 		evsel->core.attr.exclude_guest  = eG;
1854 		evsel->exclude_GH               = exclude_GH;
1855 
1856 		/* Simple modifiers copied to the evsel. */
1857 		if (mod.precise) {
1858 			u8 precise = evsel->core.attr.precise_ip + mod.precise;
1859 			/*
1860 			 * precise ip:
1861 			 *
1862 			 *  0 - SAMPLE_IP can have arbitrary skid
1863 			 *  1 - SAMPLE_IP must have constant skid
1864 			 *  2 - SAMPLE_IP requested to have 0 skid
1865 			 *  3 - SAMPLE_IP must have 0 skid
1866 			 *
1867 			 *  See also PERF_RECORD_MISC_EXACT_IP
1868 			 */
1869 			if (precise > 3) {
1870 				char *help;
1871 
1872 				if (asprintf(&help,
1873 					     "Maximum combined precise value is 3, adding precision to \"%s\"",
1874 					     evsel__name(evsel)) > 0) {
1875 					parse_events_error__handle(parse_state->error,
1876 								   loc->first_column,
1877 								   help, NULL);
1878 				}
1879 				return -EINVAL;
1880 			}
1881 			evsel->core.attr.precise_ip = precise;
1882 		}
1883 		if (mod.precise_max)
1884 			evsel->precise_max = 1;
1885 		if (mod.non_idle)
1886 			evsel->core.attr.exclude_idle = 1;
1887 		if (mod.sample_read)
1888 			evsel->sample_read = 1;
1889 		if (mod.pinned && evsel__is_group_leader(evsel))
1890 			evsel->core.attr.pinned = 1;
1891 		if (mod.exclusive && evsel__is_group_leader(evsel))
1892 			evsel->core.attr.exclusive = 1;
1893 		if (mod.weak)
1894 			evsel->weak_group = true;
1895 		if (mod.bpf)
1896 			evsel->bpf_counter = true;
1897 		if (mod.retire_lat)
1898 			evsel->retire_lat = true;
1899 	}
1900 	return 0;
1901 }
1902 
1903 int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc,
1904 				 struct list_head *list,
1905 				 struct parse_events_modifier mod)
1906 {
1907 	return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true);
1908 }
1909 
1910 int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc,
1911 				 struct list_head *list,
1912 				 struct parse_events_modifier mod)
1913 {
1914 	return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false);
1915 }
1916 
1917 int parse_events__set_default_name(struct list_head *list, char *name)
1918 {
1919 	struct evsel *evsel;
1920 	bool used_name = false;
1921 
1922 	__evlist__for_each_entry(list, evsel) {
1923 		if (!evsel->name) {
1924 			evsel->name = used_name ? strdup(name) : name;
1925 			used_name = true;
1926 			if (!evsel->name)
1927 				return -ENOMEM;
1928 		}
1929 	}
1930 	if (!used_name)
1931 		free(name);
1932 	return 0;
1933 }
1934 
1935 static int parse_events__scanner(const char *str,
1936 				 FILE *input,
1937 				 struct parse_events_state *parse_state)
1938 {
1939 	YY_BUFFER_STATE buffer;
1940 	void *scanner;
1941 	int ret;
1942 
1943 	ret = parse_events_lex_init_extra(parse_state, &scanner);
1944 	if (ret)
1945 		return ret;
1946 
1947 	if (str)
1948 		buffer = parse_events__scan_string(str, scanner);
1949 	else
1950 	        parse_events_set_in(input, scanner);
1951 
1952 #ifdef PARSER_DEBUG
1953 	parse_events_debug = 1;
1954 	parse_events_set_debug(1, scanner);
1955 #endif
1956 	ret = parse_events_parse(parse_state, scanner);
1957 
1958 	if (str) {
1959 		parse_events__flush_buffer(buffer, scanner);
1960 		parse_events__delete_buffer(buffer, scanner);
1961 	}
1962 	parse_events_lex_destroy(scanner);
1963 	return ret;
1964 }
1965 
1966 /*
1967  * parse event config string, return a list of event terms.
1968  */
1969 int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input)
1970 {
1971 	struct parse_events_state parse_state = {
1972 		.terms  = NULL,
1973 		.stoken = PE_START_TERMS,
1974 	};
1975 	int ret;
1976 
1977 	ret = parse_events__scanner(str, input, &parse_state);
1978 	if (!ret)
1979 		list_splice(&parse_state.terms->terms, &terms->terms);
1980 
1981 	zfree(&parse_state.terms);
1982 	return ret;
1983 }
1984 
1985 static int evsel__compute_group_pmu_name(struct evsel *evsel,
1986 					  const struct list_head *head)
1987 {
1988 	struct evsel *leader = evsel__leader(evsel);
1989 	struct evsel *pos;
1990 	const char *group_pmu_name;
1991 	struct perf_pmu *pmu = evsel__find_pmu(evsel);
1992 
1993 	if (!pmu) {
1994 		/*
1995 		 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU
1996 		 * is a core PMU, but in heterogeneous systems this is
1997 		 * unknown. For now pick the first core PMU.
1998 		 */
1999 		pmu = perf_pmus__scan_core(NULL);
2000 	}
2001 	if (!pmu) {
2002 		pr_debug("No PMU found for '%s'\n", evsel__name(evsel));
2003 		return -EINVAL;
2004 	}
2005 	group_pmu_name = pmu->name;
2006 	/*
2007 	 * Software events may be in a group with other uncore PMU events. Use
2008 	 * the pmu_name of the first non-software event to avoid breaking the
2009 	 * software event out of the group.
2010 	 *
2011 	 * Aux event leaders, like intel_pt, expect a group with events from
2012 	 * other PMUs, so substitute the AUX event's PMU in this case.
2013 	 */
2014 	if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) {
2015 		struct perf_pmu *leader_pmu = evsel__find_pmu(leader);
2016 
2017 		if (!leader_pmu) {
2018 			/* As with determining pmu above. */
2019 			leader_pmu = perf_pmus__scan_core(NULL);
2020 		}
2021 		/*
2022 		 * Starting with the leader, find the first event with a named
2023 		 * non-software PMU. for_each_group_(member|evsel) isn't used as
2024 		 * the list isn't yet sorted putting evsel's in the same group
2025 		 * together.
2026 		 */
2027 		if (leader_pmu && !perf_pmu__is_software(leader_pmu)) {
2028 			group_pmu_name = leader_pmu->name;
2029 		} else if (leader->core.nr_members > 1) {
2030 			list_for_each_entry(pos, head, core.node) {
2031 				struct perf_pmu *pos_pmu;
2032 
2033 				if (pos == leader || evsel__leader(pos) != leader)
2034 					continue;
2035 				pos_pmu = evsel__find_pmu(pos);
2036 				if (!pos_pmu) {
2037 					/* As with determining pmu above. */
2038 					pos_pmu = perf_pmus__scan_core(NULL);
2039 				}
2040 				if (pos_pmu && !perf_pmu__is_software(pos_pmu)) {
2041 					group_pmu_name = pos_pmu->name;
2042 					break;
2043 				}
2044 			}
2045 		}
2046 	}
2047 	/* Record computed name. */
2048 	evsel->group_pmu_name = strdup(group_pmu_name);
2049 	return evsel->group_pmu_name ? 0 : -ENOMEM;
2050 }
2051 
2052 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
2053 {
2054 	/* Order by insertion index. */
2055 	return lhs->core.idx - rhs->core.idx;
2056 }
2057 
2058 static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r)
2059 {
2060 	const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
2061 	const struct evsel *lhs = container_of(lhs_core, struct evsel, core);
2062 	const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
2063 	const struct evsel *rhs = container_of(rhs_core, struct evsel, core);
2064 	int *force_grouped_idx = _fg_idx;
2065 	int lhs_sort_idx, rhs_sort_idx, ret;
2066 	const char *lhs_pmu_name, *rhs_pmu_name;
2067 
2068 	/*
2069 	 * Get the indexes of the 2 events to sort. If the events are
2070 	 * in groups then the leader's index is used otherwise the
2071 	 * event's index is used. An index may be forced for events that
2072 	 * must be in the same group, namely Intel topdown events.
2073 	 */
2074 	if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) {
2075 		lhs_sort_idx = *force_grouped_idx;
2076 	} else {
2077 		bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1;
2078 
2079 		lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx;
2080 	}
2081 	if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) {
2082 		rhs_sort_idx = *force_grouped_idx;
2083 	} else {
2084 		bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1;
2085 
2086 		rhs_sort_idx = rhs_has_group ? rhs_core->leader->idx : rhs_core->idx;
2087 	}
2088 
2089 	/* If the indices differ then respect the insertion order. */
2090 	if (lhs_sort_idx != rhs_sort_idx)
2091 		return lhs_sort_idx - rhs_sort_idx;
2092 
2093 	/*
2094 	 * Ignoring forcing, lhs_sort_idx == rhs_sort_idx so lhs and rhs should
2095 	 * be in the same group. Events in the same group need to be ordered by
2096 	 * their grouping PMU name as the group will be broken to ensure only
2097 	 * events on the same PMU are programmed together.
2098 	 *
2099 	 * With forcing the lhs_sort_idx == rhs_sort_idx shows that one or both
2100 	 * events are being forced to be at force_group_index. If only one event
2101 	 * is being forced then the other event is the group leader of the group
2102 	 * we're trying to force the event into. Ensure for the force grouped
2103 	 * case that the PMU name ordering is also respected.
2104 	 */
2105 	lhs_pmu_name = lhs->group_pmu_name;
2106 	rhs_pmu_name = rhs->group_pmu_name;
2107 	ret = strcmp(lhs_pmu_name, rhs_pmu_name);
2108 	if (ret)
2109 		return ret;
2110 
2111 	/*
2112 	 * Architecture specific sorting, by default sort events in the same
2113 	 * group with the same PMU by their insertion index. On Intel topdown
2114 	 * constraints must be adhered to - slots first, etc.
2115 	 */
2116 	return arch_evlist__cmp(lhs, rhs);
2117 }
2118 
2119 int __weak arch_evlist__add_required_events(struct list_head *list __always_unused)
2120 {
2121 	return 0;
2122 }
2123 
2124 static int parse_events__sort_events_and_fix_groups(struct list_head *list)
2125 {
2126 	int idx = 0, force_grouped_idx = -1;
2127 	struct evsel *pos, *cur_leader = NULL;
2128 	struct perf_evsel *cur_leaders_grp = NULL;
2129 	bool idx_changed = false;
2130 	int orig_num_leaders = 0, num_leaders = 0;
2131 	int ret;
2132 	struct evsel *force_grouped_leader = NULL;
2133 	bool last_event_was_forced_leader = false;
2134 
2135 	/* On x86 topdown metrics events require a slots event. */
2136 	ret = arch_evlist__add_required_events(list);
2137 	if (ret)
2138 		return ret;
2139 
2140 	/*
2141 	 * Compute index to insert ungrouped events at. Place them where the
2142 	 * first ungrouped event appears.
2143 	 */
2144 	list_for_each_entry(pos, list, core.node) {
2145 		const struct evsel *pos_leader = evsel__leader(pos);
2146 
2147 		ret = evsel__compute_group_pmu_name(pos, list);
2148 		if (ret)
2149 			return ret;
2150 
2151 		if (pos == pos_leader)
2152 			orig_num_leaders++;
2153 
2154 		/*
2155 		 * Ensure indexes are sequential, in particular for multiple
2156 		 * event lists being merged. The indexes are used to detect when
2157 		 * the user order is modified.
2158 		 */
2159 		pos->core.idx = idx++;
2160 
2161 		/*
2162 		 * Remember an index to sort all forced grouped events
2163 		 * together to. Use the group leader as some events
2164 		 * must appear first within the group.
2165 		 */
2166 		if (force_grouped_idx == -1 && arch_evsel__must_be_in_group(pos))
2167 			force_grouped_idx = pos_leader->core.idx;
2168 	}
2169 
2170 	/* Sort events. */
2171 	list_sort(&force_grouped_idx, list, evlist__cmp);
2172 
2173 	/*
2174 	 * Recompute groups, splitting for PMUs and adding groups for events
2175 	 * that require them.
2176 	 */
2177 	idx = 0;
2178 	list_for_each_entry(pos, list, core.node) {
2179 		const struct evsel *pos_leader = evsel__leader(pos);
2180 		const char *pos_pmu_name = pos->group_pmu_name;
2181 		const char *cur_leader_pmu_name;
2182 		bool pos_force_grouped = force_grouped_idx != -1 &&
2183 			arch_evsel__must_be_in_group(pos);
2184 
2185 		/* Reset index and nr_members. */
2186 		if (pos->core.idx != idx)
2187 			idx_changed = true;
2188 		pos->core.idx = idx++;
2189 		pos->core.nr_members = 0;
2190 
2191 		/*
2192 		 * Set the group leader respecting the given groupings and that
2193 		 * groups can't span PMUs.
2194 		 */
2195 		if (!cur_leader) {
2196 			cur_leader = pos;
2197 			cur_leaders_grp = &pos->core;
2198 			if (pos_force_grouped)
2199 				force_grouped_leader = pos;
2200 		}
2201 
2202 		cur_leader_pmu_name = cur_leader->group_pmu_name;
2203 		if (strcmp(cur_leader_pmu_name, pos_pmu_name)) {
2204 			/* PMU changed so the group/leader must change. */
2205 			cur_leader = pos;
2206 			cur_leaders_grp = pos->core.leader;
2207 			if (pos_force_grouped && force_grouped_leader == NULL)
2208 				force_grouped_leader = pos;
2209 		} else if (cur_leaders_grp != pos->core.leader) {
2210 			bool split_even_if_last_leader_was_forced = true;
2211 
2212 			/*
2213 			 * Event is for a different group. If the last event was
2214 			 * the forced group leader then subsequent group events
2215 			 * and forced events should be in the same group. If
2216 			 * there are no other forced group events then the
2217 			 * forced group leader wasn't really being forced into a
2218 			 * group, it just set arch_evsel__must_be_in_group, and
2219 			 * we don't want the group to split here.
2220 			 */
2221 			if (force_grouped_idx != -1 && last_event_was_forced_leader) {
2222 				struct evsel *pos2 = pos;
2223 				/*
2224 				 * Search the whole list as the group leaders
2225 				 * aren't currently valid.
2226 				 */
2227 				list_for_each_entry_continue(pos2, list, core.node) {
2228 					if (pos->core.leader == pos2->core.leader &&
2229 					    arch_evsel__must_be_in_group(pos2)) {
2230 						split_even_if_last_leader_was_forced = false;
2231 						break;
2232 					}
2233 				}
2234 			}
2235 			if (!last_event_was_forced_leader || split_even_if_last_leader_was_forced) {
2236 				if (pos_force_grouped) {
2237 					if (force_grouped_leader) {
2238 						cur_leader = force_grouped_leader;
2239 						cur_leaders_grp = force_grouped_leader->core.leader;
2240 					} else {
2241 						cur_leader = force_grouped_leader = pos;
2242 						cur_leaders_grp = &pos->core;
2243 					}
2244 				} else {
2245 					cur_leader = pos;
2246 					cur_leaders_grp = pos->core.leader;
2247 				}
2248 			}
2249 		}
2250 		if (pos_leader != cur_leader) {
2251 			/* The leader changed so update it. */
2252 			evsel__set_leader(pos, cur_leader);
2253 		}
2254 		last_event_was_forced_leader = (force_grouped_leader == pos);
2255 	}
2256 	list_for_each_entry(pos, list, core.node) {
2257 		struct evsel *pos_leader = evsel__leader(pos);
2258 
2259 		if (pos == pos_leader)
2260 			num_leaders++;
2261 		pos_leader->core.nr_members++;
2262 	}
2263 	return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0;
2264 }
2265 
2266 int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter,
2267 		   struct parse_events_error *err, bool fake_pmu,
2268 		   bool warn_if_reordered, bool fake_tp)
2269 {
2270 	struct parse_events_state parse_state = {
2271 		.list	  = LIST_HEAD_INIT(parse_state.list),
2272 		.idx	  = evlist->core.nr_entries,
2273 		.error	  = err,
2274 		.stoken	  = PE_START_EVENTS,
2275 		.fake_pmu = fake_pmu,
2276 		.fake_tp  = fake_tp,
2277 		.pmu_filter = pmu_filter,
2278 		.match_legacy_cache_terms = true,
2279 	};
2280 	int ret, ret2;
2281 
2282 	ret = parse_events__scanner(str, /*input=*/ NULL, &parse_state);
2283 
2284 	if (!ret && list_empty(&parse_state.list)) {
2285 		WARN_ONCE(true, "WARNING: event parser found nothing\n");
2286 		return -1;
2287 	}
2288 
2289 	ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list);
2290 	if (ret2 < 0)
2291 		return ret;
2292 
2293 	/*
2294 	 * Add list to the evlist even with errors to allow callers to clean up.
2295 	 */
2296 	evlist__splice_list_tail(evlist, &parse_state.list);
2297 
2298 	if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) {
2299 		pr_warning("WARNING: events were regrouped to match PMUs\n");
2300 
2301 		if (verbose > 0) {
2302 			struct strbuf sb = STRBUF_INIT;
2303 
2304 			evlist__uniquify_evsel_names(evlist, &stat_config);
2305 			evlist__format_evsels(evlist, &sb, 2048);
2306 			pr_debug("evlist after sorting/fixing: '%s'\n", sb.buf);
2307 			strbuf_release(&sb);
2308 		}
2309 	}
2310 	if (!ret) {
2311 		struct evsel *last;
2312 
2313 		last = evlist__last(evlist);
2314 		last->cmdline_group_boundary = true;
2315 
2316 		return 0;
2317 	}
2318 
2319 	/*
2320 	 * There are 2 users - builtin-record and builtin-test objects.
2321 	 * Both call evlist__delete in case of error, so we dont
2322 	 * need to bother.
2323 	 */
2324 	return ret;
2325 }
2326 
2327 int parse_event(struct evlist *evlist, const char *str)
2328 {
2329 	struct parse_events_error err;
2330 	int ret;
2331 
2332 	parse_events_error__init(&err);
2333 	ret = parse_events(evlist, str, &err);
2334 	parse_events_error__exit(&err);
2335 	return ret;
2336 }
2337 
2338 struct parse_events_error_entry {
2339 	/** @list: The list the error is part of. */
2340 	struct list_head list;
2341 	/** @idx: index in the parsed string */
2342 	int   idx;
2343 	/** @str: string to display at the index */
2344 	char *str;
2345 	/** @help: optional help string */
2346 	char *help;
2347 };
2348 
2349 void parse_events_error__init(struct parse_events_error *err)
2350 {
2351 	INIT_LIST_HEAD(&err->list);
2352 }
2353 
2354 void parse_events_error__exit(struct parse_events_error *err)
2355 {
2356 	struct parse_events_error_entry *pos, *tmp;
2357 
2358 	list_for_each_entry_safe(pos, tmp, &err->list, list) {
2359 		zfree(&pos->str);
2360 		zfree(&pos->help);
2361 		list_del_init(&pos->list);
2362 		free(pos);
2363 	}
2364 }
2365 
2366 void parse_events_error__handle(struct parse_events_error *err, int idx,
2367 				char *str, char *help)
2368 {
2369 	struct parse_events_error_entry *entry;
2370 
2371 	if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n"))
2372 		goto out_free;
2373 
2374 	entry = zalloc(sizeof(*entry));
2375 	if (!entry) {
2376 		pr_err("Failed to allocate memory for event parsing error: %s (%s)\n",
2377 			str, help ?: "<no help>");
2378 		goto out_free;
2379 	}
2380 	entry->idx = idx;
2381 	entry->str = str;
2382 	entry->help = help;
2383 	list_add(&entry->list, &err->list);
2384 	return;
2385 out_free:
2386 	free(str);
2387 	free(help);
2388 }
2389 
2390 #define MAX_WIDTH 1000
2391 static int get_term_width(void)
2392 {
2393 	struct winsize ws;
2394 
2395 	get_term_dimensions(&ws);
2396 	return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
2397 }
2398 
2399 static void __parse_events_error__print(int err_idx, const char *err_str,
2400 					const char *err_help, const char *event)
2401 {
2402 	const char *str = "invalid or unsupported event: ";
2403 	char _buf[MAX_WIDTH];
2404 	char *buf = (char *) event;
2405 	int idx = 0;
2406 	if (err_str) {
2407 		/* -2 for extra '' in the final fprintf */
2408 		int width       = get_term_width() - 2;
2409 		int len_event   = strlen(event);
2410 		int len_str, max_len, cut = 0;
2411 
2412 		/*
2413 		 * Maximum error index indent, we will cut
2414 		 * the event string if it's bigger.
2415 		 */
2416 		int max_err_idx = 13;
2417 
2418 		/*
2419 		 * Let's be specific with the message when
2420 		 * we have the precise error.
2421 		 */
2422 		str     = "event syntax error: ";
2423 		len_str = strlen(str);
2424 		max_len = width - len_str;
2425 
2426 		buf = _buf;
2427 
2428 		/* We're cutting from the beginning. */
2429 		if (err_idx > max_err_idx)
2430 			cut = err_idx - max_err_idx;
2431 
2432 		strncpy(buf, event + cut, max_len);
2433 
2434 		/* Mark cut parts with '..' on both sides. */
2435 		if (cut)
2436 			buf[0] = buf[1] = '.';
2437 
2438 		if ((len_event - cut) > max_len) {
2439 			buf[max_len - 1] = buf[max_len - 2] = '.';
2440 			buf[max_len] = 0;
2441 		}
2442 
2443 		idx = len_str + err_idx - cut;
2444 	}
2445 
2446 	fprintf(stderr, "%s'%s'\n", str, buf);
2447 	if (idx) {
2448 		fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str);
2449 		if (err_help)
2450 			fprintf(stderr, "\n%s\n", err_help);
2451 	}
2452 }
2453 
2454 void parse_events_error__print(const struct parse_events_error *err,
2455 			       const char *event)
2456 {
2457 	struct parse_events_error_entry *pos;
2458 	bool first = true;
2459 
2460 	list_for_each_entry(pos, &err->list, list) {
2461 		if (!first)
2462 			fputs("\n", stderr);
2463 		__parse_events_error__print(pos->idx, pos->str, pos->help, event);
2464 		first = false;
2465 	}
2466 }
2467 
2468 /*
2469  * In the list of errors err, do any of the error strings (str) contain the
2470  * given needle string?
2471  */
2472 bool parse_events_error__contains(const struct parse_events_error *err,
2473 				  const char *needle)
2474 {
2475 	struct parse_events_error_entry *pos;
2476 
2477 	list_for_each_entry(pos, &err->list, list) {
2478 		if (strstr(pos->str, needle) != NULL)
2479 			return true;
2480 	}
2481 	return false;
2482 }
2483 
2484 #undef MAX_WIDTH
2485 
2486 int parse_events_option(const struct option *opt, const char *str,
2487 			int unset __maybe_unused)
2488 {
2489 	struct parse_events_option_args *args = opt->value;
2490 	struct parse_events_error err;
2491 	int ret;
2492 
2493 	parse_events_error__init(&err);
2494 	ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err,
2495 			     /*fake_pmu=*/false, /*warn_if_reordered=*/true,
2496 			     /*fake_tp=*/false);
2497 
2498 	if (ret) {
2499 		parse_events_error__print(&err, str);
2500 		fprintf(stderr, "Run 'perf list' for a list of valid events\n");
2501 	}
2502 	parse_events_error__exit(&err);
2503 
2504 	return ret;
2505 }
2506 
2507 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset)
2508 {
2509 	struct parse_events_option_args *args = opt->value;
2510 	int ret;
2511 
2512 	if (*args->evlistp == NULL) {
2513 		*args->evlistp = evlist__new();
2514 
2515 		if (*args->evlistp == NULL) {
2516 			fprintf(stderr, "Not enough memory to create evlist\n");
2517 			return -1;
2518 		}
2519 	}
2520 	ret = parse_events_option(opt, str, unset);
2521 	if (ret) {
2522 		evlist__delete(*args->evlistp);
2523 		*args->evlistp = NULL;
2524 	}
2525 
2526 	return ret;
2527 }
2528 
2529 static int
2530 foreach_evsel_in_last_glob(struct evlist *evlist,
2531 			   int (*func)(struct evsel *evsel,
2532 				       const void *arg),
2533 			   const void *arg)
2534 {
2535 	struct evsel *last = NULL;
2536 	int err;
2537 
2538 	/*
2539 	 * Don't return when list_empty, give func a chance to report
2540 	 * error when it found last == NULL.
2541 	 *
2542 	 * So no need to WARN here, let *func do this.
2543 	 */
2544 	if (evlist->core.nr_entries > 0)
2545 		last = evlist__last(evlist);
2546 
2547 	do {
2548 		err = (*func)(last, arg);
2549 		if (err)
2550 			return -1;
2551 		if (!last)
2552 			return 0;
2553 
2554 		if (last->core.node.prev == &evlist->core.entries)
2555 			return 0;
2556 		last = list_entry(last->core.node.prev, struct evsel, core.node);
2557 	} while (!last->cmdline_group_boundary);
2558 
2559 	return 0;
2560 }
2561 
2562 /* Will a tracepoint filter work for str or should a BPF filter be used? */
2563 static bool is_possible_tp_filter(const char *str)
2564 {
2565 	return strstr(str, "uid") == NULL;
2566 }
2567 
2568 static int set_filter(struct evsel *evsel, const void *arg)
2569 {
2570 	const char *str = arg;
2571 	int nr_addr_filters = 0;
2572 	struct perf_pmu *pmu;
2573 
2574 	if (evsel == NULL) {
2575 		fprintf(stderr,
2576 			"--filter option should follow a -e tracepoint or HW tracer option\n");
2577 		return -1;
2578 	}
2579 
2580 	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && is_possible_tp_filter(str)) {
2581 		if (evsel__append_tp_filter(evsel, str) < 0) {
2582 			fprintf(stderr,
2583 				"not enough memory to hold filter string\n");
2584 			return -1;
2585 		}
2586 
2587 		return 0;
2588 	}
2589 
2590 	pmu = evsel__find_pmu(evsel);
2591 	if (pmu) {
2592 		perf_pmu__scan_file(pmu, "nr_addr_filters",
2593 				    "%d", &nr_addr_filters);
2594 	}
2595 	if (!nr_addr_filters)
2596 		return perf_bpf_filter__parse(&evsel->bpf_filters, str);
2597 
2598 	if (evsel__append_addr_filter(evsel, str) < 0) {
2599 		fprintf(stderr,
2600 			"not enough memory to hold filter string\n");
2601 		return -1;
2602 	}
2603 
2604 	return 0;
2605 }
2606 
2607 int parse_filter(const struct option *opt, const char *str,
2608 		 int unset __maybe_unused)
2609 {
2610 	struct evlist *evlist = *(struct evlist **)opt->value;
2611 
2612 	return foreach_evsel_in_last_glob(evlist, set_filter,
2613 					  (const void *)str);
2614 }
2615 
2616 int parse_uid_filter(struct evlist *evlist, uid_t uid)
2617 {
2618 	struct option opt = {
2619 		.value = &evlist,
2620 	};
2621 	char buf[128];
2622 	int ret;
2623 
2624 	snprintf(buf, sizeof(buf), "uid == %d", uid);
2625 	ret = parse_filter(&opt, buf, /*unset=*/0);
2626 	if (ret) {
2627 		if (use_browser >= 1) {
2628 			/*
2629 			 * Use ui__warning so a pop up appears above the
2630 			 * underlying BPF error message.
2631 			 */
2632 			ui__warning("Failed to add UID filtering that uses BPF filtering.\n");
2633 		} else {
2634 			fprintf(stderr, "Failed to add UID filtering that uses BPF filtering.\n");
2635 		}
2636 	}
2637 	return ret;
2638 }
2639 
2640 static int add_exclude_perf_filter(struct evsel *evsel,
2641 				   const void *arg __maybe_unused)
2642 {
2643 	char new_filter[64];
2644 
2645 	if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2646 		fprintf(stderr,
2647 			"--exclude-perf option should follow a -e tracepoint option\n");
2648 		return -1;
2649 	}
2650 
2651 	snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid());
2652 
2653 	if (evsel__append_tp_filter(evsel, new_filter) < 0) {
2654 		fprintf(stderr,
2655 			"not enough memory to hold filter string\n");
2656 		return -1;
2657 	}
2658 
2659 	return 0;
2660 }
2661 
2662 int exclude_perf(const struct option *opt,
2663 		 const char *arg __maybe_unused,
2664 		 int unset __maybe_unused)
2665 {
2666 	struct evlist *evlist = *(struct evlist **)opt->value;
2667 
2668 	return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter,
2669 					  NULL);
2670 }
2671 
2672 int parse_events__is_hardcoded_term(struct parse_events_term *term)
2673 {
2674 	return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
2675 }
2676 
2677 static int new_term(struct parse_events_term **_term,
2678 		    struct parse_events_term *temp,
2679 		    char *str, u64 num)
2680 {
2681 	struct parse_events_term *term;
2682 
2683 	term = malloc(sizeof(*term));
2684 	if (!term)
2685 		return -ENOMEM;
2686 
2687 	*term = *temp;
2688 	INIT_LIST_HEAD(&term->list);
2689 	term->weak = false;
2690 
2691 	switch (term->type_val) {
2692 	case PARSE_EVENTS__TERM_TYPE_NUM:
2693 		term->val.num = num;
2694 		break;
2695 	case PARSE_EVENTS__TERM_TYPE_STR:
2696 		term->val.str = str;
2697 		break;
2698 	default:
2699 		free(term);
2700 		return -EINVAL;
2701 	}
2702 
2703 	*_term = term;
2704 	return 0;
2705 }
2706 
2707 int parse_events_term__num(struct parse_events_term **term,
2708 			   enum parse_events__term_type type_term,
2709 			   const char *config, u64 num,
2710 			   bool no_value,
2711 			   void *loc_term_, void *loc_val_)
2712 {
2713 	YYLTYPE *loc_term = loc_term_;
2714 	YYLTYPE *loc_val = loc_val_;
2715 
2716 	struct parse_events_term temp = {
2717 		.type_val  = PARSE_EVENTS__TERM_TYPE_NUM,
2718 		.type_term = type_term,
2719 		.config    = config ? : strdup(parse_events__term_type_str(type_term)),
2720 		.no_value  = no_value,
2721 		.err_term  = loc_term ? loc_term->first_column : 0,
2722 		.err_val   = loc_val  ? loc_val->first_column  : 0,
2723 	};
2724 
2725 	return new_term(term, &temp, /*str=*/NULL, num);
2726 }
2727 
2728 int parse_events_term__str(struct parse_events_term **term,
2729 			   enum parse_events__term_type type_term,
2730 			   char *config, char *str,
2731 			   void *loc_term_, void *loc_val_)
2732 {
2733 	YYLTYPE *loc_term = loc_term_;
2734 	YYLTYPE *loc_val = loc_val_;
2735 
2736 	struct parse_events_term temp = {
2737 		.type_val  = PARSE_EVENTS__TERM_TYPE_STR,
2738 		.type_term = type_term,
2739 		.config    = config,
2740 		.err_term  = loc_term ? loc_term->first_column : 0,
2741 		.err_val   = loc_val  ? loc_val->first_column  : 0,
2742 	};
2743 
2744 	return new_term(term, &temp, str, /*num=*/0);
2745 }
2746 
2747 int parse_events_term__term(struct parse_events_term **term,
2748 			    enum parse_events__term_type term_lhs,
2749 			    enum parse_events__term_type term_rhs,
2750 			    void *loc_term, void *loc_val)
2751 {
2752 	return parse_events_term__str(term, term_lhs, NULL,
2753 				      strdup(parse_events__term_type_str(term_rhs)),
2754 				      loc_term, loc_val);
2755 }
2756 
2757 int parse_events_term__clone(struct parse_events_term **new,
2758 			     const struct parse_events_term *term)
2759 {
2760 	char *str;
2761 	struct parse_events_term temp = *term;
2762 
2763 	temp.used = false;
2764 	if (term->config) {
2765 		temp.config = strdup(term->config);
2766 		if (!temp.config)
2767 			return -ENOMEM;
2768 	}
2769 	if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2770 		return new_term(new, &temp, /*str=*/NULL, term->val.num);
2771 
2772 	str = strdup(term->val.str);
2773 	if (!str) {
2774 		zfree(&temp.config);
2775 		return -ENOMEM;
2776 	}
2777 	return new_term(new, &temp, str, /*num=*/0);
2778 }
2779 
2780 void parse_events_term__delete(struct parse_events_term *term)
2781 {
2782 	if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
2783 		zfree(&term->val.str);
2784 
2785 	zfree(&term->config);
2786 	free(term);
2787 }
2788 
2789 static int parse_events_terms__copy(const struct parse_events_terms *src,
2790 				    struct parse_events_terms *dest)
2791 {
2792 	struct parse_events_term *term;
2793 
2794 	list_for_each_entry (term, &src->terms, list) {
2795 		struct parse_events_term *n;
2796 		int ret;
2797 
2798 		ret = parse_events_term__clone(&n, term);
2799 		if (ret)
2800 			return ret;
2801 
2802 		list_add_tail(&n->list, &dest->terms);
2803 	}
2804 	return 0;
2805 }
2806 
2807 void parse_events_terms__init(struct parse_events_terms *terms)
2808 {
2809 	INIT_LIST_HEAD(&terms->terms);
2810 }
2811 
2812 void parse_events_terms__exit(struct parse_events_terms *terms)
2813 {
2814 	struct parse_events_term *term, *h;
2815 
2816 	list_for_each_entry_safe(term, h, &terms->terms, list) {
2817 		list_del_init(&term->list);
2818 		parse_events_term__delete(term);
2819 	}
2820 }
2821 
2822 void parse_events_terms__delete(struct parse_events_terms *terms)
2823 {
2824 	if (!terms)
2825 		return;
2826 	parse_events_terms__exit(terms);
2827 	free(terms);
2828 }
2829 
2830 int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb)
2831 {
2832 	struct parse_events_term *term;
2833 	bool first = true;
2834 
2835 	if (!terms)
2836 		return 0;
2837 
2838 	list_for_each_entry(term, &terms->terms, list) {
2839 		int ret;
2840 
2841 		if (!first) {
2842 			ret = strbuf_addch(sb, ',');
2843 			if (ret < 0)
2844 				return ret;
2845 		}
2846 		first = false;
2847 
2848 		if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2849 			if (term->no_value) {
2850 				assert(term->val.num == 1);
2851 				ret = strbuf_addf(sb, "%s", term->config);
2852 			} else
2853 				ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num);
2854 		else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
2855 			if (term->config) {
2856 				ret = strbuf_addf(sb, "%s=", term->config);
2857 				if (ret < 0)
2858 					return ret;
2859 			} else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) {
2860 				ret = strbuf_addf(sb, "%s=",
2861 						  parse_events__term_type_str(term->type_term));
2862 				if (ret < 0)
2863 					return ret;
2864 			}
2865 			assert(!term->no_value);
2866 			ret = strbuf_addf(sb, "%s", term->val.str);
2867 		}
2868 		if (ret < 0)
2869 			return ret;
2870 	}
2871 	return 0;
2872 }
2873 
2874 static void config_terms_list(char *buf, size_t buf_sz)
2875 {
2876 	int i;
2877 	bool first = true;
2878 
2879 	buf[0] = '\0';
2880 	for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
2881 		const char *name = parse_events__term_type_str(i);
2882 
2883 		if (!config_term_avail(i, NULL))
2884 			continue;
2885 		if (!name)
2886 			continue;
2887 		if (name[0] == '<')
2888 			continue;
2889 
2890 		if (strlen(buf) + strlen(name) + 2 >= buf_sz)
2891 			return;
2892 
2893 		if (!first)
2894 			strcat(buf, ",");
2895 		else
2896 			first = false;
2897 		strcat(buf, name);
2898 	}
2899 }
2900 
2901 /*
2902  * Return string contains valid config terms of an event.
2903  * @additional_terms: For terms such as PMU sysfs terms.
2904  */
2905 char *parse_events_formats_error_string(char *additional_terms)
2906 {
2907 	char *str;
2908 	/* "no-overwrite" is the longest name */
2909 	char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
2910 			  (sizeof("no-overwrite") - 1)];
2911 
2912 	config_terms_list(static_terms, sizeof(static_terms));
2913 	/* valid terms */
2914 	if (additional_terms) {
2915 		if (asprintf(&str, "valid terms: %s,%s",
2916 			     additional_terms, static_terms) < 0)
2917 			goto fail;
2918 	} else {
2919 		if (asprintf(&str, "valid terms: %s", static_terms) < 0)
2920 			goto fail;
2921 	}
2922 	return str;
2923 
2924 fail:
2925 	return NULL;
2926 }
2927