1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/hw_breakpoint.h>
3 #include <linux/err.h>
4 #include <linux/list_sort.h>
5 #include <linux/zalloc.h>
6 #include <dirent.h>
7 #include <errno.h>
8 #include <sys/ioctl.h>
9 #include <sys/param.h>
10 #include "cpumap.h"
11 #include "term.h"
12 #include "env.h"
13 #include "evlist.h"
14 #include "evsel.h"
15 #include <subcmd/parse-options.h>
16 #include "parse-events.h"
17 #include "string2.h"
18 #include "strbuf.h"
19 #include "debug.h"
20 #include <perf/cpumap.h>
21 #include <util/parse-events-bison.h>
22 #include <util/parse-events-flex.h>
23 #include "pmu.h"
24 #include "pmus.h"
25 #include "tp_pmu.h"
26 #include "asm/bug.h"
27 #include "ui/ui.h"
28 #include "util/parse-branch-options.h"
29 #include "util/evsel_config.h"
30 #include "util/event.h"
31 #include "util/bpf-filter.h"
32 #include "util/stat.h"
33 #include "util/util.h"
34 #include "tracepoint.h"
35 #include <api/fs/tracing_path.h>
36
37 #define MAX_NAME_LEN 100
38
39 static int get_config_terms(const struct parse_events_terms *head_config,
40 struct list_head *head_terms);
41 static int parse_events_terms__copy(const struct parse_events_terms *src,
42 struct parse_events_terms *dest);
43
44 const struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
45 [PERF_COUNT_HW_CPU_CYCLES] = {
46 .symbol = "cpu-cycles",
47 .alias = "cycles",
48 },
49 [PERF_COUNT_HW_INSTRUCTIONS] = {
50 .symbol = "instructions",
51 .alias = "",
52 },
53 [PERF_COUNT_HW_CACHE_REFERENCES] = {
54 .symbol = "cache-references",
55 .alias = "",
56 },
57 [PERF_COUNT_HW_CACHE_MISSES] = {
58 .symbol = "cache-misses",
59 .alias = "",
60 },
61 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
62 .symbol = "branch-instructions",
63 .alias = "branches",
64 },
65 [PERF_COUNT_HW_BRANCH_MISSES] = {
66 .symbol = "branch-misses",
67 .alias = "",
68 },
69 [PERF_COUNT_HW_BUS_CYCLES] = {
70 .symbol = "bus-cycles",
71 .alias = "",
72 },
73 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
74 .symbol = "stalled-cycles-frontend",
75 .alias = "idle-cycles-frontend",
76 },
77 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
78 .symbol = "stalled-cycles-backend",
79 .alias = "idle-cycles-backend",
80 },
81 [PERF_COUNT_HW_REF_CPU_CYCLES] = {
82 .symbol = "ref-cycles",
83 .alias = "",
84 },
85 };
86
87 static const char *const event_types[] = {
88 [PERF_TYPE_HARDWARE] = "hardware",
89 [PERF_TYPE_SOFTWARE] = "software",
90 [PERF_TYPE_TRACEPOINT] = "tracepoint",
91 [PERF_TYPE_HW_CACHE] = "hardware-cache",
92 [PERF_TYPE_RAW] = "raw",
93 [PERF_TYPE_BREAKPOINT] = "breakpoint",
94 };
95
event_type(size_t type)96 const char *event_type(size_t type)
97 {
98 if (type >= PERF_TYPE_MAX)
99 return "unknown";
100
101 return event_types[type];
102 }
103
get_config_str(const struct parse_events_terms * head_terms,enum parse_events__term_type type_term)104 static char *get_config_str(const struct parse_events_terms *head_terms,
105 enum parse_events__term_type type_term)
106 {
107 struct parse_events_term *term;
108
109 if (!head_terms)
110 return NULL;
111
112 list_for_each_entry(term, &head_terms->terms, list)
113 if (term->type_term == type_term)
114 return term->val.str;
115
116 return NULL;
117 }
118
get_config_metric_id(const struct parse_events_terms * head_terms)119 static char *get_config_metric_id(const struct parse_events_terms *head_terms)
120 {
121 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
122 }
123
get_config_name(const struct parse_events_terms * head_terms)124 static char *get_config_name(const struct parse_events_terms *head_terms)
125 {
126 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
127 }
128
get_config_cpu(const struct parse_events_terms * head_terms)129 static struct perf_cpu_map *get_config_cpu(const struct parse_events_terms *head_terms)
130 {
131 struct parse_events_term *term;
132 struct perf_cpu_map *cpus = NULL;
133
134 if (!head_terms)
135 return NULL;
136
137 list_for_each_entry(term, &head_terms->terms, list) {
138 if (term->type_term == PARSE_EVENTS__TERM_TYPE_CPU) {
139 struct perf_cpu_map *term_cpus;
140
141 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
142 term_cpus = perf_cpu_map__new_int(term->val.num);
143 } else {
144 struct perf_pmu *pmu = perf_pmus__find(term->val.str);
145
146 if (pmu && perf_cpu_map__is_empty(pmu->cpus))
147 term_cpus = pmu->is_core ? cpu_map__online() : NULL;
148 else if (pmu)
149 term_cpus = perf_cpu_map__get(pmu->cpus);
150 else
151 term_cpus = perf_cpu_map__new(term->val.str);
152 }
153 perf_cpu_map__merge(&cpus, term_cpus);
154 perf_cpu_map__put(term_cpus);
155 }
156 }
157
158 return cpus;
159 }
160
161 /**
162 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that
163 * matches the raw's string value. If the string value matches an
164 * event then change the term to be an event, if not then change it to
165 * be a config term. For example, "read" may be an event of the PMU or
166 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of
167 * the event can be determined and we don't need to scan all PMUs
168 * ahead-of-time.
169 * @config_terms: the list of terms that may contain a raw term.
170 * @pmu: the PMU to scan for events from.
171 */
fix_raw(struct parse_events_terms * config_terms,struct perf_pmu * pmu)172 static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu)
173 {
174 struct parse_events_term *term;
175
176 list_for_each_entry(term, &config_terms->terms, list) {
177 u64 num;
178
179 if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW)
180 continue;
181
182 if (perf_pmu__have_event(pmu, term->val.str)) {
183 zfree(&term->config);
184 term->config = term->val.str;
185 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
186 term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
187 term->val.num = 1;
188 term->no_value = true;
189 continue;
190 }
191
192 zfree(&term->config);
193 term->config = strdup("config");
194 errno = 0;
195 num = strtoull(term->val.str + 1, NULL, 16);
196 assert(errno == 0);
197 free(term->val.str);
198 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
199 term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG;
200 term->val.num = num;
201 term->no_value = false;
202 }
203 }
204
205 static struct evsel *
__add_event(struct list_head * list,int * idx,struct perf_event_attr * attr,bool init_attr,const char * name,const char * metric_id,struct perf_pmu * pmu,struct list_head * config_terms,struct evsel * first_wildcard_match,struct perf_cpu_map * user_cpus,u64 alternate_hw_config)206 __add_event(struct list_head *list, int *idx,
207 struct perf_event_attr *attr,
208 bool init_attr,
209 const char *name, const char *metric_id, struct perf_pmu *pmu,
210 struct list_head *config_terms, struct evsel *first_wildcard_match,
211 struct perf_cpu_map *user_cpus, u64 alternate_hw_config)
212 {
213 struct evsel *evsel;
214 bool is_pmu_core;
215 struct perf_cpu_map *cpus, *pmu_cpus;
216 bool has_user_cpus = !perf_cpu_map__is_empty(user_cpus);
217
218 /*
219 * Ensure the first_wildcard_match's PMU matches that of the new event
220 * being added. Otherwise try to match with another event further down
221 * the evlist.
222 */
223 if (first_wildcard_match) {
224 struct evsel *pos = list_prev_entry(first_wildcard_match, core.node);
225
226 first_wildcard_match = NULL;
227 list_for_each_entry_continue(pos, list, core.node) {
228 if (perf_pmu__name_no_suffix_match(pos->pmu, pmu->name)) {
229 first_wildcard_match = pos;
230 break;
231 }
232 if (pos->pmu->is_core && (!pmu || pmu->is_core)) {
233 first_wildcard_match = pos;
234 break;
235 }
236 }
237 }
238
239 if (pmu) {
240 perf_pmu__warn_invalid_formats(pmu);
241 if (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX) {
242 perf_pmu__warn_invalid_config(pmu, attr->config, name,
243 PERF_PMU_FORMAT_VALUE_CONFIG, "config");
244 perf_pmu__warn_invalid_config(pmu, attr->config1, name,
245 PERF_PMU_FORMAT_VALUE_CONFIG1, "config1");
246 perf_pmu__warn_invalid_config(pmu, attr->config2, name,
247 PERF_PMU_FORMAT_VALUE_CONFIG2, "config2");
248 perf_pmu__warn_invalid_config(pmu, attr->config3, name,
249 PERF_PMU_FORMAT_VALUE_CONFIG3, "config3");
250 }
251 }
252 /*
253 * If a PMU wasn't given, such as for legacy events, find now that
254 * warnings won't be generated.
255 */
256 if (!pmu)
257 pmu = perf_pmus__find_by_attr(attr);
258
259 if (pmu) {
260 is_pmu_core = pmu->is_core;
261 pmu_cpus = perf_cpu_map__get(pmu->cpus);
262 if (perf_cpu_map__is_empty(pmu_cpus))
263 pmu_cpus = cpu_map__online();
264 } else {
265 is_pmu_core = (attr->type == PERF_TYPE_HARDWARE ||
266 attr->type == PERF_TYPE_HW_CACHE);
267 pmu_cpus = is_pmu_core ? cpu_map__online() : NULL;
268 }
269
270 if (has_user_cpus)
271 cpus = perf_cpu_map__get(user_cpus);
272 else
273 cpus = perf_cpu_map__get(pmu_cpus);
274
275 if (init_attr)
276 event_attr_init(attr);
277
278 evsel = evsel__new_idx(attr, *idx);
279 if (!evsel)
280 goto out_err;
281
282 if (name) {
283 evsel->name = strdup(name);
284 if (!evsel->name)
285 goto out_err;
286 }
287
288 if (metric_id) {
289 evsel->metric_id = strdup(metric_id);
290 if (!evsel->metric_id)
291 goto out_err;
292 }
293
294 (*idx)++;
295 evsel->core.cpus = cpus;
296 evsel->core.pmu_cpus = pmu_cpus;
297 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
298 evsel->core.is_pmu_core = is_pmu_core;
299 evsel->pmu = pmu;
300 evsel->alternate_hw_config = alternate_hw_config;
301 evsel->first_wildcard_match = first_wildcard_match;
302
303 if (config_terms)
304 list_splice_init(config_terms, &evsel->config_terms);
305
306 if (list)
307 list_add_tail(&evsel->core.node, list);
308
309 if (has_user_cpus)
310 evsel__warn_user_requested_cpus(evsel, user_cpus);
311
312 return evsel;
313 out_err:
314 perf_cpu_map__put(cpus);
315 perf_cpu_map__put(pmu_cpus);
316 zfree(&evsel->name);
317 zfree(&evsel->metric_id);
318 free(evsel);
319 return NULL;
320 }
321
parse_events__add_event(int idx,struct perf_event_attr * attr,const char * name,const char * metric_id,struct perf_pmu * pmu)322 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
323 const char *name, const char *metric_id,
324 struct perf_pmu *pmu)
325 {
326 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
327 metric_id, pmu, /*config_terms=*/NULL,
328 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
329 /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
330 }
331
add_event(struct list_head * list,int * idx,struct perf_event_attr * attr,const char * name,const char * metric_id,struct list_head * config_terms,u64 alternate_hw_config)332 static int add_event(struct list_head *list, int *idx,
333 struct perf_event_attr *attr, const char *name,
334 const char *metric_id, struct list_head *config_terms,
335 u64 alternate_hw_config)
336 {
337 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
338 /*pmu=*/NULL, config_terms,
339 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
340 alternate_hw_config) ? 0 : -ENOMEM;
341 }
342
343 /**
344 * parse_aliases - search names for entries beginning or equalling str ignoring
345 * case. If mutliple entries in names match str then the longest
346 * is chosen.
347 * @str: The needle to look for.
348 * @names: The haystack to search.
349 * @size: The size of the haystack.
350 * @longest: Out argument giving the length of the matching entry.
351 */
parse_aliases(const char * str,const char * const names[][EVSEL__MAX_ALIASES],int size,int * longest)352 static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size,
353 int *longest)
354 {
355 *longest = -1;
356 for (int i = 0; i < size; i++) {
357 for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) {
358 int n = strlen(names[i][j]);
359
360 if (n > *longest && !strncasecmp(str, names[i][j], n))
361 *longest = n;
362 }
363 if (*longest > 0)
364 return i;
365 }
366
367 return -1;
368 }
369
370 typedef int config_term_func_t(struct perf_event_attr *attr,
371 struct parse_events_term *term,
372 struct parse_events_error *err);
373 static int config_term_common(struct perf_event_attr *attr,
374 struct parse_events_term *term,
375 struct parse_events_error *err);
376 static int config_attr(struct perf_event_attr *attr,
377 const struct parse_events_terms *head,
378 struct parse_events_error *err,
379 config_term_func_t config_term);
380
381 /**
382 * parse_events__decode_legacy_cache - Search name for the legacy cache event
383 * name composed of 1, 2 or 3 hyphen
384 * separated sections. The first section is
385 * the cache type while the others are the
386 * optional op and optional result. To make
387 * life hard the names in the table also
388 * contain hyphens and the longest name
389 * should always be selected.
390 */
parse_events__decode_legacy_cache(const char * name,int extended_pmu_type,__u64 * config)391 int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config)
392 {
393 int len, cache_type = -1, cache_op = -1, cache_result = -1;
394 const char *name_end = &name[strlen(name) + 1];
395 const char *str = name;
396
397 cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len);
398 if (cache_type == -1)
399 return -EINVAL;
400 str += len + 1;
401
402 if (str < name_end) {
403 cache_op = parse_aliases(str, evsel__hw_cache_op,
404 PERF_COUNT_HW_CACHE_OP_MAX, &len);
405 if (cache_op >= 0) {
406 if (!evsel__is_cache_op_valid(cache_type, cache_op))
407 return -EINVAL;
408 str += len + 1;
409 } else {
410 cache_result = parse_aliases(str, evsel__hw_cache_result,
411 PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
412 if (cache_result >= 0)
413 str += len + 1;
414 }
415 }
416 if (str < name_end) {
417 if (cache_op < 0) {
418 cache_op = parse_aliases(str, evsel__hw_cache_op,
419 PERF_COUNT_HW_CACHE_OP_MAX, &len);
420 if (cache_op >= 0) {
421 if (!evsel__is_cache_op_valid(cache_type, cache_op))
422 return -EINVAL;
423 }
424 } else if (cache_result < 0) {
425 cache_result = parse_aliases(str, evsel__hw_cache_result,
426 PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
427 }
428 }
429
430 /*
431 * Fall back to reads:
432 */
433 if (cache_op == -1)
434 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
435
436 /*
437 * Fall back to accesses:
438 */
439 if (cache_result == -1)
440 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
441
442 *config = cache_type | (cache_op << 8) | (cache_result << 16);
443 if (perf_pmus__supports_extended_type())
444 *config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT;
445 return 0;
446 }
447
448 /**
449 * parse_events__filter_pmu - returns false if a wildcard PMU should be
450 * considered, true if it should be filtered.
451 */
parse_events__filter_pmu(const struct parse_events_state * parse_state,const struct perf_pmu * pmu)452 bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
453 const struct perf_pmu *pmu)
454 {
455 if (parse_state->pmu_filter == NULL)
456 return false;
457
458 return strcmp(parse_state->pmu_filter, pmu->name) != 0;
459 }
460
461 static int parse_events_add_pmu(struct parse_events_state *parse_state,
462 struct list_head *list, struct perf_pmu *pmu,
463 const struct parse_events_terms *const_parsed_terms,
464 struct evsel *first_wildcard_match, u64 alternate_hw_config);
465
parse_events_add_cache(struct list_head * list,int * idx,const char * name,struct parse_events_state * parse_state,struct parse_events_terms * parsed_terms)466 int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
467 struct parse_events_state *parse_state,
468 struct parse_events_terms *parsed_terms)
469 {
470 struct perf_pmu *pmu = NULL;
471 bool found_supported = false;
472 const char *config_name = get_config_name(parsed_terms);
473 const char *metric_id = get_config_metric_id(parsed_terms);
474 struct perf_cpu_map *cpus = get_config_cpu(parsed_terms);
475 int ret = 0;
476 struct evsel *first_wildcard_match = NULL;
477
478 while ((pmu = perf_pmus__scan_for_event(pmu, name)) != NULL) {
479 LIST_HEAD(config_terms);
480 struct perf_event_attr attr;
481
482 if (parse_events__filter_pmu(parse_state, pmu))
483 continue;
484
485 if (perf_pmu__have_event(pmu, name)) {
486 /*
487 * The PMU has the event so add as not a legacy cache
488 * event.
489 */
490 ret = parse_events_add_pmu(parse_state, list, pmu,
491 parsed_terms,
492 first_wildcard_match,
493 /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
494 if (ret)
495 goto out_err;
496 if (first_wildcard_match == NULL)
497 first_wildcard_match =
498 container_of(list->prev, struct evsel, core.node);
499 continue;
500 }
501
502 if (!pmu->is_core) {
503 /* Legacy cache events are only supported by core PMUs. */
504 continue;
505 }
506
507 memset(&attr, 0, sizeof(attr));
508 attr.type = PERF_TYPE_HW_CACHE;
509
510 ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config);
511 if (ret)
512 return ret;
513
514 found_supported = true;
515
516 if (parsed_terms) {
517 if (config_attr(&attr, parsed_terms, parse_state->error,
518 config_term_common)) {
519 ret = -EINVAL;
520 goto out_err;
521 }
522 if (get_config_terms(parsed_terms, &config_terms)) {
523 ret = -ENOMEM;
524 goto out_err;
525 }
526 }
527
528 if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name,
529 metric_id, pmu, &config_terms, first_wildcard_match,
530 cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) == NULL)
531 ret = -ENOMEM;
532
533 if (first_wildcard_match == NULL)
534 first_wildcard_match = container_of(list->prev, struct evsel, core.node);
535 free_config_terms(&config_terms);
536 if (ret)
537 goto out_err;
538 }
539 out_err:
540 perf_cpu_map__put(cpus);
541 return found_supported ? 0 : -EINVAL;
542 }
543
tracepoint_error(struct parse_events_error * e,int err,const char * sys,const char * name,int column)544 static void tracepoint_error(struct parse_events_error *e, int err,
545 const char *sys, const char *name, int column)
546 {
547 const char *str;
548 char help[BUFSIZ];
549
550 if (!e)
551 return;
552
553 /*
554 * We get error directly from syscall errno ( > 0),
555 * or from encoded pointer's error ( < 0).
556 */
557 err = abs(err);
558
559 switch (err) {
560 case EACCES:
561 str = "can't access trace events";
562 break;
563 case ENOENT:
564 str = "unknown tracepoint";
565 break;
566 default:
567 str = "failed to add tracepoint";
568 break;
569 }
570
571 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
572 parse_events_error__handle(e, column, strdup(str), strdup(help));
573 }
574
add_tracepoint(struct parse_events_state * parse_state,struct list_head * list,const char * sys_name,const char * evt_name,struct parse_events_error * err,struct parse_events_terms * head_config,void * loc_)575 static int add_tracepoint(struct parse_events_state *parse_state,
576 struct list_head *list,
577 const char *sys_name, const char *evt_name,
578 struct parse_events_error *err,
579 struct parse_events_terms *head_config, void *loc_)
580 {
581 YYLTYPE *loc = loc_;
582 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++,
583 !parse_state->fake_tp);
584
585 if (IS_ERR(evsel)) {
586 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column);
587 return PTR_ERR(evsel);
588 }
589
590 if (head_config) {
591 LIST_HEAD(config_terms);
592
593 if (get_config_terms(head_config, &config_terms))
594 return -ENOMEM;
595 list_splice(&config_terms, &evsel->config_terms);
596 }
597
598 list_add_tail(&evsel->core.node, list);
599 return 0;
600 }
601
602 struct add_tracepoint_multi_args {
603 struct parse_events_state *parse_state;
604 struct list_head *list;
605 const char *sys_glob;
606 const char *evt_glob;
607 struct parse_events_error *err;
608 struct parse_events_terms *head_config;
609 YYLTYPE *loc;
610 int found;
611 };
612
add_tracepoint_multi_event_cb(void * state,const char * sys_name,const char * evt_name)613 static int add_tracepoint_multi_event_cb(void *state, const char *sys_name, const char *evt_name)
614 {
615 struct add_tracepoint_multi_args *args = state;
616 int ret;
617
618 if (!strglobmatch(evt_name, args->evt_glob))
619 return 0;
620
621 args->found++;
622 ret = add_tracepoint(args->parse_state, args->list, sys_name, evt_name,
623 args->err, args->head_config, args->loc);
624
625 return ret;
626 }
627
add_tracepoint_multi_event(struct add_tracepoint_multi_args * args,const char * sys_name)628 static int add_tracepoint_multi_event(struct add_tracepoint_multi_args *args, const char *sys_name)
629 {
630 if (strpbrk(args->evt_glob, "*?") == NULL) {
631 /* Not a glob. */
632 args->found++;
633 return add_tracepoint(args->parse_state, args->list, sys_name, args->evt_glob,
634 args->err, args->head_config, args->loc);
635 }
636
637 return tp_pmu__for_each_tp_event(sys_name, args, add_tracepoint_multi_event_cb);
638 }
639
add_tracepoint_multi_sys_cb(void * state,const char * sys_name)640 static int add_tracepoint_multi_sys_cb(void *state, const char *sys_name)
641 {
642 struct add_tracepoint_multi_args *args = state;
643
644 if (!strglobmatch(sys_name, args->sys_glob))
645 return 0;
646
647 return add_tracepoint_multi_event(args, sys_name);
648 }
649
add_tracepoint_multi_sys(struct parse_events_state * parse_state,struct list_head * list,const char * sys_glob,const char * evt_glob,struct parse_events_error * err,struct parse_events_terms * head_config,YYLTYPE * loc)650 static int add_tracepoint_multi_sys(struct parse_events_state *parse_state,
651 struct list_head *list,
652 const char *sys_glob, const char *evt_glob,
653 struct parse_events_error *err,
654 struct parse_events_terms *head_config, YYLTYPE *loc)
655 {
656 struct add_tracepoint_multi_args args = {
657 .parse_state = parse_state,
658 .list = list,
659 .sys_glob = sys_glob,
660 .evt_glob = evt_glob,
661 .err = err,
662 .head_config = head_config,
663 .loc = loc,
664 .found = 0,
665 };
666 int ret;
667
668 if (strpbrk(sys_glob, "*?") == NULL) {
669 /* Not a glob. */
670 ret = add_tracepoint_multi_event(&args, sys_glob);
671 } else {
672 ret = tp_pmu__for_each_tp_sys(&args, add_tracepoint_multi_sys_cb);
673 }
674 if (args.found == 0) {
675 tracepoint_error(err, ENOENT, sys_glob, evt_glob, loc->first_column);
676 return -ENOENT;
677 }
678 return ret;
679 }
680
default_breakpoint_len(void)681 size_t default_breakpoint_len(void)
682 {
683 #if defined(__i386__)
684 static int len;
685
686 if (len == 0) {
687 struct perf_env env = {};
688
689 perf_env__init(&env);
690 len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long);
691 perf_env__exit(&env);
692 }
693 return len;
694 #elif defined(__aarch64__)
695 return 4;
696 #else
697 return sizeof(long);
698 #endif
699 }
700
701 static int
parse_breakpoint_type(const char * type,struct perf_event_attr * attr)702 parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
703 {
704 int i;
705
706 for (i = 0; i < 3; i++) {
707 if (!type || !type[i])
708 break;
709
710 #define CHECK_SET_TYPE(bit) \
711 do { \
712 if (attr->bp_type & bit) \
713 return -EINVAL; \
714 else \
715 attr->bp_type |= bit; \
716 } while (0)
717
718 switch (type[i]) {
719 case 'r':
720 CHECK_SET_TYPE(HW_BREAKPOINT_R);
721 break;
722 case 'w':
723 CHECK_SET_TYPE(HW_BREAKPOINT_W);
724 break;
725 case 'x':
726 CHECK_SET_TYPE(HW_BREAKPOINT_X);
727 break;
728 default:
729 return -EINVAL;
730 }
731 }
732
733 #undef CHECK_SET_TYPE
734
735 if (!attr->bp_type) /* Default */
736 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
737
738 return 0;
739 }
740
parse_events_add_breakpoint(struct parse_events_state * parse_state,struct list_head * list,u64 addr,char * type,u64 len,struct parse_events_terms * head_config)741 int parse_events_add_breakpoint(struct parse_events_state *parse_state,
742 struct list_head *list,
743 u64 addr, char *type, u64 len,
744 struct parse_events_terms *head_config)
745 {
746 struct perf_event_attr attr;
747 LIST_HEAD(config_terms);
748 const char *name;
749
750 memset(&attr, 0, sizeof(attr));
751 attr.bp_addr = addr;
752
753 if (parse_breakpoint_type(type, &attr))
754 return -EINVAL;
755
756 /* Provide some defaults if len is not specified */
757 if (!len) {
758 if (attr.bp_type == HW_BREAKPOINT_X)
759 len = default_breakpoint_len();
760 else
761 len = HW_BREAKPOINT_LEN_4;
762 }
763
764 attr.bp_len = len;
765
766 attr.type = PERF_TYPE_BREAKPOINT;
767 attr.sample_period = 1;
768
769 if (head_config) {
770 if (config_attr(&attr, head_config, parse_state->error,
771 config_term_common))
772 return -EINVAL;
773
774 if (get_config_terms(head_config, &config_terms))
775 return -ENOMEM;
776 }
777
778 name = get_config_name(head_config);
779
780 return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL,
781 &config_terms, /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
782 }
783
check_type_val(struct parse_events_term * term,struct parse_events_error * err,enum parse_events__term_val_type type)784 static int check_type_val(struct parse_events_term *term,
785 struct parse_events_error *err,
786 enum parse_events__term_val_type type)
787 {
788 if (type == term->type_val)
789 return 0;
790
791 if (err) {
792 parse_events_error__handle(err, term->err_val,
793 type == PARSE_EVENTS__TERM_TYPE_NUM
794 ? strdup("expected numeric value")
795 : strdup("expected string value"),
796 NULL);
797 }
798 return -EINVAL;
799 }
800
801 static bool config_term_shrinked;
802
parse_events__term_type_str(enum parse_events__term_type term_type)803 const char *parse_events__term_type_str(enum parse_events__term_type term_type)
804 {
805 /*
806 * Update according to parse-events.l
807 */
808 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
809 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>",
810 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config",
811 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1",
812 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2",
813 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3",
814 [PARSE_EVENTS__TERM_TYPE_NAME] = "name",
815 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period",
816 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq",
817 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type",
818 [PARSE_EVENTS__TERM_TYPE_TIME] = "time",
819 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph",
820 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size",
821 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit",
822 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit",
823 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack",
824 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr",
825 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite",
826 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite",
827 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
828 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
829 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
830 [PARSE_EVENTS__TERM_TYPE_AUX_ACTION] = "aux-action",
831 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
832 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
833 [PARSE_EVENTS__TERM_TYPE_RAW] = "raw",
834 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache",
835 [PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware",
836 [PARSE_EVENTS__TERM_TYPE_CPU] = "cpu",
837 };
838 if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR)
839 return "unknown term";
840
841 return config_term_names[term_type];
842 }
843
844 static bool
config_term_avail(enum parse_events__term_type term_type,struct parse_events_error * err)845 config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err)
846 {
847 char *err_str;
848
849 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
850 parse_events_error__handle(err, -1,
851 strdup("Invalid term_type"), NULL);
852 return false;
853 }
854 if (!config_term_shrinked)
855 return true;
856
857 switch (term_type) {
858 case PARSE_EVENTS__TERM_TYPE_CONFIG:
859 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
860 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
861 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
862 case PARSE_EVENTS__TERM_TYPE_NAME:
863 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
864 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
865 case PARSE_EVENTS__TERM_TYPE_PERCORE:
866 case PARSE_EVENTS__TERM_TYPE_CPU:
867 return true;
868 case PARSE_EVENTS__TERM_TYPE_USER:
869 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
870 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
871 case PARSE_EVENTS__TERM_TYPE_TIME:
872 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
873 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
874 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
875 case PARSE_EVENTS__TERM_TYPE_INHERIT:
876 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
877 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
878 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
879 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
880 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
881 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
882 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
883 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
884 case PARSE_EVENTS__TERM_TYPE_RAW:
885 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
886 case PARSE_EVENTS__TERM_TYPE_HARDWARE:
887 default:
888 if (!err)
889 return false;
890
891 /* term_type is validated so indexing is safe */
892 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
893 parse_events__term_type_str(term_type)) >= 0)
894 parse_events_error__handle(err, -1, err_str, NULL);
895 return false;
896 }
897 }
898
parse_events__shrink_config_terms(void)899 void parse_events__shrink_config_terms(void)
900 {
901 config_term_shrinked = true;
902 }
903
config_term_common(struct perf_event_attr * attr,struct parse_events_term * term,struct parse_events_error * err)904 static int config_term_common(struct perf_event_attr *attr,
905 struct parse_events_term *term,
906 struct parse_events_error *err)
907 {
908 #define CHECK_TYPE_VAL(type) \
909 do { \
910 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
911 return -EINVAL; \
912 } while (0)
913
914 switch (term->type_term) {
915 case PARSE_EVENTS__TERM_TYPE_CONFIG:
916 CHECK_TYPE_VAL(NUM);
917 attr->config = term->val.num;
918 break;
919 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
920 CHECK_TYPE_VAL(NUM);
921 attr->config1 = term->val.num;
922 break;
923 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
924 CHECK_TYPE_VAL(NUM);
925 attr->config2 = term->val.num;
926 break;
927 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
928 CHECK_TYPE_VAL(NUM);
929 attr->config3 = term->val.num;
930 break;
931 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
932 CHECK_TYPE_VAL(NUM);
933 break;
934 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
935 CHECK_TYPE_VAL(NUM);
936 break;
937 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
938 CHECK_TYPE_VAL(STR);
939 if (strcmp(term->val.str, "no") &&
940 parse_branch_str(term->val.str,
941 &attr->branch_sample_type)) {
942 parse_events_error__handle(err, term->err_val,
943 strdup("invalid branch sample type"),
944 NULL);
945 return -EINVAL;
946 }
947 break;
948 case PARSE_EVENTS__TERM_TYPE_TIME:
949 CHECK_TYPE_VAL(NUM);
950 if (term->val.num > 1) {
951 parse_events_error__handle(err, term->err_val,
952 strdup("expected 0 or 1"),
953 NULL);
954 return -EINVAL;
955 }
956 break;
957 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
958 CHECK_TYPE_VAL(STR);
959 break;
960 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
961 CHECK_TYPE_VAL(NUM);
962 break;
963 case PARSE_EVENTS__TERM_TYPE_INHERIT:
964 CHECK_TYPE_VAL(NUM);
965 break;
966 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
967 CHECK_TYPE_VAL(NUM);
968 break;
969 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
970 CHECK_TYPE_VAL(NUM);
971 break;
972 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
973 CHECK_TYPE_VAL(NUM);
974 break;
975 case PARSE_EVENTS__TERM_TYPE_NAME:
976 CHECK_TYPE_VAL(STR);
977 break;
978 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
979 CHECK_TYPE_VAL(STR);
980 break;
981 case PARSE_EVENTS__TERM_TYPE_RAW:
982 CHECK_TYPE_VAL(STR);
983 break;
984 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
985 CHECK_TYPE_VAL(NUM);
986 break;
987 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
988 CHECK_TYPE_VAL(NUM);
989 break;
990 case PARSE_EVENTS__TERM_TYPE_PERCORE:
991 CHECK_TYPE_VAL(NUM);
992 if ((unsigned int)term->val.num > 1) {
993 parse_events_error__handle(err, term->err_val,
994 strdup("expected 0 or 1"),
995 NULL);
996 return -EINVAL;
997 }
998 break;
999 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1000 CHECK_TYPE_VAL(NUM);
1001 break;
1002 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1003 CHECK_TYPE_VAL(STR);
1004 break;
1005 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1006 CHECK_TYPE_VAL(NUM);
1007 if (term->val.num > UINT_MAX) {
1008 parse_events_error__handle(err, term->err_val,
1009 strdup("too big"),
1010 NULL);
1011 return -EINVAL;
1012 }
1013 break;
1014 case PARSE_EVENTS__TERM_TYPE_CPU: {
1015 struct perf_cpu_map *map;
1016
1017 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
1018 if (term->val.num >= (u64)cpu__max_present_cpu().cpu) {
1019 parse_events_error__handle(err, term->err_val,
1020 strdup("too big"),
1021 /*help=*/NULL);
1022 return -EINVAL;
1023 }
1024 break;
1025 }
1026 assert(term->type_val == PARSE_EVENTS__TERM_TYPE_STR);
1027 if (perf_pmus__find(term->val.str) != NULL)
1028 break;
1029
1030 map = perf_cpu_map__new(term->val.str);
1031 if (!map) {
1032 parse_events_error__handle(err, term->err_val,
1033 strdup("not a valid PMU or CPU number"),
1034 /*help=*/NULL);
1035 return -EINVAL;
1036 }
1037 perf_cpu_map__put(map);
1038 break;
1039 }
1040 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1041 case PARSE_EVENTS__TERM_TYPE_USER:
1042 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1043 case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1044 default:
1045 parse_events_error__handle(err, term->err_term,
1046 strdup(parse_events__term_type_str(term->type_term)),
1047 parse_events_formats_error_string(NULL));
1048 return -EINVAL;
1049 }
1050
1051 /*
1052 * Check term availability after basic checking so
1053 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
1054 *
1055 * If check availability at the entry of this function,
1056 * user will see "'<sysfs term>' is not usable in 'perf stat'"
1057 * if an invalid config term is provided for legacy events
1058 * (for example, instructions/badterm/...), which is confusing.
1059 */
1060 if (!config_term_avail(term->type_term, err))
1061 return -EINVAL;
1062 return 0;
1063 #undef CHECK_TYPE_VAL
1064 }
1065
config_term_pmu(struct perf_event_attr * attr,struct parse_events_term * term,struct parse_events_error * err)1066 static int config_term_pmu(struct perf_event_attr *attr,
1067 struct parse_events_term *term,
1068 struct parse_events_error *err)
1069 {
1070 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) {
1071 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
1072
1073 if (!pmu) {
1074 char *err_str;
1075
1076 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
1077 parse_events_error__handle(err, term->err_term,
1078 err_str, /*help=*/NULL);
1079 return -EINVAL;
1080 }
1081 /*
1082 * Rewrite the PMU event to a legacy cache one unless the PMU
1083 * doesn't support legacy cache events or the event is present
1084 * within the PMU.
1085 */
1086 if (perf_pmu__supports_legacy_cache(pmu) &&
1087 !perf_pmu__have_event(pmu, term->config)) {
1088 attr->type = PERF_TYPE_HW_CACHE;
1089 return parse_events__decode_legacy_cache(term->config, pmu->type,
1090 &attr->config);
1091 } else {
1092 term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
1093 term->no_value = true;
1094 }
1095 }
1096 if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) {
1097 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
1098
1099 if (!pmu) {
1100 char *err_str;
1101
1102 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
1103 parse_events_error__handle(err, term->err_term,
1104 err_str, /*help=*/NULL);
1105 return -EINVAL;
1106 }
1107 /*
1108 * If the PMU has a sysfs or json event prefer it over
1109 * legacy. ARM requires this.
1110 */
1111 if (perf_pmu__have_event(pmu, term->config)) {
1112 term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
1113 term->no_value = true;
1114 term->alternate_hw_config = true;
1115 } else {
1116 attr->type = PERF_TYPE_HARDWARE;
1117 attr->config = term->val.num;
1118 if (perf_pmus__supports_extended_type())
1119 attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
1120 }
1121 return 0;
1122 }
1123 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
1124 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) {
1125 /*
1126 * Always succeed for sysfs terms, as we dont know
1127 * at this point what type they need to have.
1128 */
1129 return 0;
1130 }
1131 return config_term_common(attr, term, err);
1132 }
1133
config_term_tracepoint(struct perf_event_attr * attr,struct parse_events_term * term,struct parse_events_error * err)1134 static int config_term_tracepoint(struct perf_event_attr *attr,
1135 struct parse_events_term *term,
1136 struct parse_events_error *err)
1137 {
1138 switch (term->type_term) {
1139 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1140 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1141 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1142 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1143 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1144 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1145 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1146 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1147 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1148 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1149 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1150 return config_term_common(attr, term, err);
1151 case PARSE_EVENTS__TERM_TYPE_USER:
1152 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1153 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1154 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1155 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1156 case PARSE_EVENTS__TERM_TYPE_NAME:
1157 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1158 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1159 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1160 case PARSE_EVENTS__TERM_TYPE_TIME:
1161 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1162 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1163 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1164 case PARSE_EVENTS__TERM_TYPE_RAW:
1165 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1166 case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1167 case PARSE_EVENTS__TERM_TYPE_CPU:
1168 default:
1169 if (err) {
1170 parse_events_error__handle(err, term->err_term,
1171 strdup(parse_events__term_type_str(term->type_term)),
1172 strdup("valid terms: call-graph,stack-size\n")
1173 );
1174 }
1175 return -EINVAL;
1176 }
1177
1178 return 0;
1179 }
1180
config_attr(struct perf_event_attr * attr,const struct parse_events_terms * head,struct parse_events_error * err,config_term_func_t config_term)1181 static int config_attr(struct perf_event_attr *attr,
1182 const struct parse_events_terms *head,
1183 struct parse_events_error *err,
1184 config_term_func_t config_term)
1185 {
1186 struct parse_events_term *term;
1187
1188 list_for_each_entry(term, &head->terms, list)
1189 if (config_term(attr, term, err))
1190 return -EINVAL;
1191
1192 return 0;
1193 }
1194
get_config_terms(const struct parse_events_terms * head_config,struct list_head * head_terms)1195 static int get_config_terms(const struct parse_events_terms *head_config,
1196 struct list_head *head_terms)
1197 {
1198 #define ADD_CONFIG_TERM(__type, __weak) \
1199 struct evsel_config_term *__t; \
1200 \
1201 __t = zalloc(sizeof(*__t)); \
1202 if (!__t) \
1203 return -ENOMEM; \
1204 \
1205 INIT_LIST_HEAD(&__t->list); \
1206 __t->type = EVSEL__CONFIG_TERM_ ## __type; \
1207 __t->weak = __weak; \
1208 list_add_tail(&__t->list, head_terms)
1209
1210 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \
1211 do { \
1212 ADD_CONFIG_TERM(__type, __weak); \
1213 __t->val.__name = __val; \
1214 } while (0)
1215
1216 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \
1217 do { \
1218 ADD_CONFIG_TERM(__type, __weak); \
1219 __t->val.str = strdup(__val); \
1220 if (!__t->val.str) { \
1221 zfree(&__t); \
1222 return -ENOMEM; \
1223 } \
1224 __t->free_str = true; \
1225 } while (0)
1226
1227 struct parse_events_term *term;
1228
1229 list_for_each_entry(term, &head_config->terms, list) {
1230 switch (term->type_term) {
1231 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1232 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
1233 break;
1234 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1235 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
1236 break;
1237 case PARSE_EVENTS__TERM_TYPE_TIME:
1238 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
1239 break;
1240 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1241 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
1242 break;
1243 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1244 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
1245 break;
1246 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1247 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
1248 term->val.num, term->weak);
1249 break;
1250 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1251 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1252 term->val.num ? 1 : 0, term->weak);
1253 break;
1254 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1255 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1256 term->val.num ? 0 : 1, term->weak);
1257 break;
1258 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1259 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
1260 term->val.num, term->weak);
1261 break;
1262 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1263 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
1264 term->val.num, term->weak);
1265 break;
1266 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1267 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1268 term->val.num ? 1 : 0, term->weak);
1269 break;
1270 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1271 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1272 term->val.num ? 0 : 1, term->weak);
1273 break;
1274 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1275 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
1276 break;
1277 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1278 ADD_CONFIG_TERM_VAL(PERCORE, percore,
1279 term->val.num ? true : false, term->weak);
1280 break;
1281 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1282 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
1283 term->val.num ? 1 : 0, term->weak);
1284 break;
1285 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1286 ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak);
1287 break;
1288 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1289 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
1290 term->val.num, term->weak);
1291 break;
1292 case PARSE_EVENTS__TERM_TYPE_USER:
1293 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1294 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1295 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1296 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1297 case PARSE_EVENTS__TERM_TYPE_NAME:
1298 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1299 case PARSE_EVENTS__TERM_TYPE_RAW:
1300 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1301 case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1302 case PARSE_EVENTS__TERM_TYPE_CPU:
1303 default:
1304 break;
1305 }
1306 }
1307 return 0;
1308 }
1309
1310 /*
1311 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
1312 * each bit of attr->config that the user has changed.
1313 */
get_config_chgs(struct perf_pmu * pmu,struct parse_events_terms * head_config,struct list_head * head_terms)1314 static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config,
1315 struct list_head *head_terms)
1316 {
1317 struct parse_events_term *term;
1318 u64 bits = 0;
1319 int type;
1320
1321 list_for_each_entry(term, &head_config->terms, list) {
1322 switch (term->type_term) {
1323 case PARSE_EVENTS__TERM_TYPE_USER:
1324 type = perf_pmu__format_type(pmu, term->config);
1325 if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
1326 continue;
1327 bits |= perf_pmu__format_bits(pmu, term->config);
1328 break;
1329 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1330 bits = ~(u64)0;
1331 break;
1332 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1333 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1334 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1335 case PARSE_EVENTS__TERM_TYPE_NAME:
1336 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1337 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1338 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1339 case PARSE_EVENTS__TERM_TYPE_TIME:
1340 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1341 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1342 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1343 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1344 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1345 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1346 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1347 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1348 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1349 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1350 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1351 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1352 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1353 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1354 case PARSE_EVENTS__TERM_TYPE_RAW:
1355 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1356 case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1357 case PARSE_EVENTS__TERM_TYPE_CPU:
1358 default:
1359 break;
1360 }
1361 }
1362
1363 if (bits)
1364 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
1365
1366 #undef ADD_CONFIG_TERM
1367 return 0;
1368 }
1369
parse_events_add_tracepoint(struct parse_events_state * parse_state,struct list_head * list,const char * sys,const char * event,struct parse_events_error * err,struct parse_events_terms * head_config,void * loc_)1370 int parse_events_add_tracepoint(struct parse_events_state *parse_state,
1371 struct list_head *list,
1372 const char *sys, const char *event,
1373 struct parse_events_error *err,
1374 struct parse_events_terms *head_config, void *loc_)
1375 {
1376 YYLTYPE *loc = loc_;
1377
1378 if (head_config) {
1379 struct perf_event_attr attr;
1380
1381 if (config_attr(&attr, head_config, err,
1382 config_term_tracepoint))
1383 return -EINVAL;
1384 }
1385
1386 return add_tracepoint_multi_sys(parse_state, list, sys, event,
1387 err, head_config, loc);
1388 }
1389
__parse_events_add_numeric(struct parse_events_state * parse_state,struct list_head * list,struct perf_pmu * pmu,u32 type,u32 extended_type,u64 config,const struct parse_events_terms * head_config,struct evsel * first_wildcard_match)1390 static int __parse_events_add_numeric(struct parse_events_state *parse_state,
1391 struct list_head *list,
1392 struct perf_pmu *pmu, u32 type, u32 extended_type,
1393 u64 config, const struct parse_events_terms *head_config,
1394 struct evsel *first_wildcard_match)
1395 {
1396 struct perf_event_attr attr;
1397 LIST_HEAD(config_terms);
1398 const char *name, *metric_id;
1399 struct perf_cpu_map *cpus;
1400 int ret;
1401
1402 memset(&attr, 0, sizeof(attr));
1403 attr.type = type;
1404 attr.config = config;
1405 if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) {
1406 assert(perf_pmus__supports_extended_type());
1407 attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT;
1408 }
1409
1410 if (head_config) {
1411 if (config_attr(&attr, head_config, parse_state->error,
1412 config_term_common))
1413 return -EINVAL;
1414
1415 if (get_config_terms(head_config, &config_terms))
1416 return -ENOMEM;
1417 }
1418
1419 name = get_config_name(head_config);
1420 metric_id = get_config_metric_id(head_config);
1421 cpus = get_config_cpu(head_config);
1422 ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name,
1423 metric_id, pmu, &config_terms, first_wildcard_match,
1424 cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) ? 0 : -ENOMEM;
1425 perf_cpu_map__put(cpus);
1426 free_config_terms(&config_terms);
1427 return ret;
1428 }
1429
parse_events_add_numeric(struct parse_events_state * parse_state,struct list_head * list,u32 type,u64 config,const struct parse_events_terms * head_config,bool wildcard)1430 int parse_events_add_numeric(struct parse_events_state *parse_state,
1431 struct list_head *list,
1432 u32 type, u64 config,
1433 const struct parse_events_terms *head_config,
1434 bool wildcard)
1435 {
1436 struct perf_pmu *pmu = NULL;
1437 bool found_supported = false;
1438
1439 /* Wildcards on numeric values are only supported by core PMUs. */
1440 if (wildcard && perf_pmus__supports_extended_type()) {
1441 struct evsel *first_wildcard_match = NULL;
1442 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
1443 int ret;
1444
1445 found_supported = true;
1446 if (parse_events__filter_pmu(parse_state, pmu))
1447 continue;
1448
1449 ret = __parse_events_add_numeric(parse_state, list, pmu,
1450 type, pmu->type,
1451 config, head_config,
1452 first_wildcard_match);
1453 if (ret)
1454 return ret;
1455 if (first_wildcard_match == NULL)
1456 first_wildcard_match =
1457 container_of(list->prev, struct evsel, core.node);
1458 }
1459 if (found_supported)
1460 return 0;
1461 }
1462 return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type),
1463 type, /*extended_type=*/0, config, head_config,
1464 /*first_wildcard_match=*/NULL);
1465 }
1466
config_term_percore(struct list_head * config_terms)1467 static bool config_term_percore(struct list_head *config_terms)
1468 {
1469 struct evsel_config_term *term;
1470
1471 list_for_each_entry(term, config_terms, list) {
1472 if (term->type == EVSEL__CONFIG_TERM_PERCORE)
1473 return term->val.percore;
1474 }
1475
1476 return false;
1477 }
1478
parse_events_add_pmu(struct parse_events_state * parse_state,struct list_head * list,struct perf_pmu * pmu,const struct parse_events_terms * const_parsed_terms,struct evsel * first_wildcard_match,u64 alternate_hw_config)1479 static int parse_events_add_pmu(struct parse_events_state *parse_state,
1480 struct list_head *list, struct perf_pmu *pmu,
1481 const struct parse_events_terms *const_parsed_terms,
1482 struct evsel *first_wildcard_match, u64 alternate_hw_config)
1483 {
1484 struct perf_event_attr attr;
1485 struct perf_pmu_info info;
1486 struct evsel *evsel;
1487 struct parse_events_error *err = parse_state->error;
1488 LIST_HEAD(config_terms);
1489 struct parse_events_terms parsed_terms;
1490 bool alias_rewrote_terms = false;
1491 struct perf_cpu_map *term_cpu = NULL;
1492
1493 if (verbose > 1) {
1494 struct strbuf sb;
1495
1496 strbuf_init(&sb, /*hint=*/ 0);
1497 if (pmu->selectable && const_parsed_terms &&
1498 list_empty(&const_parsed_terms->terms)) {
1499 strbuf_addf(&sb, "%s//", pmu->name);
1500 } else {
1501 strbuf_addf(&sb, "%s/", pmu->name);
1502 parse_events_terms__to_strbuf(const_parsed_terms, &sb);
1503 strbuf_addch(&sb, '/');
1504 }
1505 fprintf(stderr, "Attempt to add: %s\n", sb.buf);
1506 strbuf_release(&sb);
1507 }
1508
1509 memset(&attr, 0, sizeof(attr));
1510 if (pmu->perf_event_attr_init_default)
1511 pmu->perf_event_attr_init_default(pmu, &attr);
1512
1513 attr.type = pmu->type;
1514
1515 if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) {
1516 evsel = __add_event(list, &parse_state->idx, &attr,
1517 /*init_attr=*/true, /*name=*/NULL,
1518 /*metric_id=*/NULL, pmu,
1519 /*config_terms=*/NULL, first_wildcard_match,
1520 /*cpu_list=*/NULL, alternate_hw_config);
1521 return evsel ? 0 : -ENOMEM;
1522 }
1523
1524 parse_events_terms__init(&parsed_terms);
1525 if (const_parsed_terms) {
1526 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
1527
1528 if (ret)
1529 return ret;
1530 }
1531 fix_raw(&parsed_terms, pmu);
1532
1533 /* Configure attr/terms with a known PMU, this will set hardcoded terms. */
1534 if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
1535 parse_events_terms__exit(&parsed_terms);
1536 return -EINVAL;
1537 }
1538
1539 /* Look for event names in the terms and rewrite into format based terms. */
1540 if (perf_pmu__check_alias(pmu, &parsed_terms,
1541 &info, &alias_rewrote_terms,
1542 &alternate_hw_config, err)) {
1543 parse_events_terms__exit(&parsed_terms);
1544 return -EINVAL;
1545 }
1546
1547 if (verbose > 1) {
1548 struct strbuf sb;
1549
1550 strbuf_init(&sb, /*hint=*/ 0);
1551 parse_events_terms__to_strbuf(&parsed_terms, &sb);
1552 fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf);
1553 strbuf_release(&sb);
1554 }
1555
1556 /* Configure attr/terms again if an alias was expanded. */
1557 if (alias_rewrote_terms &&
1558 config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
1559 parse_events_terms__exit(&parsed_terms);
1560 return -EINVAL;
1561 }
1562
1563 if (get_config_terms(&parsed_terms, &config_terms)) {
1564 parse_events_terms__exit(&parsed_terms);
1565 return -ENOMEM;
1566 }
1567
1568 /*
1569 * When using default config, record which bits of attr->config were
1570 * changed by the user.
1571 */
1572 if (pmu->perf_event_attr_init_default &&
1573 get_config_chgs(pmu, &parsed_terms, &config_terms)) {
1574 parse_events_terms__exit(&parsed_terms);
1575 return -ENOMEM;
1576 }
1577
1578 /* Skip configuring hard coded terms that were applied by config_attr. */
1579 if (perf_pmu__config(pmu, &attr, &parsed_terms, /*apply_hardcoded=*/false,
1580 parse_state->error)) {
1581 free_config_terms(&config_terms);
1582 parse_events_terms__exit(&parsed_terms);
1583 return -EINVAL;
1584 }
1585
1586 term_cpu = get_config_cpu(&parsed_terms);
1587 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
1588 get_config_name(&parsed_terms),
1589 get_config_metric_id(&parsed_terms), pmu,
1590 &config_terms, first_wildcard_match, term_cpu, alternate_hw_config);
1591 perf_cpu_map__put(term_cpu);
1592 if (!evsel) {
1593 parse_events_terms__exit(&parsed_terms);
1594 return -ENOMEM;
1595 }
1596
1597 if (evsel->name)
1598 evsel->use_config_name = true;
1599
1600 evsel->percore = config_term_percore(&evsel->config_terms);
1601
1602 parse_events_terms__exit(&parsed_terms);
1603 free((char *)evsel->unit);
1604 evsel->unit = strdup(info.unit);
1605 evsel->scale = info.scale;
1606 evsel->per_pkg = info.per_pkg;
1607 evsel->snapshot = info.snapshot;
1608 evsel->retirement_latency.mean = info.retirement_latency_mean;
1609 evsel->retirement_latency.min = info.retirement_latency_min;
1610 evsel->retirement_latency.max = info.retirement_latency_max;
1611
1612 return 0;
1613 }
1614
parse_events_multi_pmu_add(struct parse_events_state * parse_state,const char * event_name,u64 hw_config,const struct parse_events_terms * const_parsed_terms,struct list_head ** listp,void * loc_)1615 int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
1616 const char *event_name, u64 hw_config,
1617 const struct parse_events_terms *const_parsed_terms,
1618 struct list_head **listp, void *loc_)
1619 {
1620 struct parse_events_term *term;
1621 struct list_head *list = NULL;
1622 struct perf_pmu *pmu = NULL;
1623 YYLTYPE *loc = loc_;
1624 int ok = 0;
1625 const char *config;
1626 struct parse_events_terms parsed_terms;
1627 struct evsel *first_wildcard_match = NULL;
1628
1629 *listp = NULL;
1630
1631 parse_events_terms__init(&parsed_terms);
1632 if (const_parsed_terms) {
1633 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
1634
1635 if (ret)
1636 return ret;
1637 }
1638
1639 config = strdup(event_name);
1640 if (!config)
1641 goto out_err;
1642
1643 if (parse_events_term__num(&term,
1644 PARSE_EVENTS__TERM_TYPE_USER,
1645 config, /*num=*/1, /*novalue=*/true,
1646 loc, /*loc_val=*/NULL) < 0) {
1647 zfree(&config);
1648 goto out_err;
1649 }
1650 list_add_tail(&term->list, &parsed_terms.terms);
1651
1652 /* Add it for all PMUs that support the alias */
1653 list = malloc(sizeof(struct list_head));
1654 if (!list)
1655 goto out_err;
1656
1657 INIT_LIST_HEAD(list);
1658
1659 while ((pmu = perf_pmus__scan_for_event(pmu, event_name)) != NULL) {
1660
1661 if (parse_events__filter_pmu(parse_state, pmu))
1662 continue;
1663
1664 if (!perf_pmu__have_event(pmu, event_name))
1665 continue;
1666
1667 if (!parse_events_add_pmu(parse_state, list, pmu,
1668 &parsed_terms, first_wildcard_match, hw_config)) {
1669 struct strbuf sb;
1670
1671 strbuf_init(&sb, /*hint=*/ 0);
1672 parse_events_terms__to_strbuf(&parsed_terms, &sb);
1673 pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf);
1674 strbuf_release(&sb);
1675 ok++;
1676 }
1677 if (first_wildcard_match == NULL)
1678 first_wildcard_match = container_of(list->prev, struct evsel, core.node);
1679 }
1680
1681 if (parse_state->fake_pmu) {
1682 if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms,
1683 first_wildcard_match, hw_config)) {
1684 struct strbuf sb;
1685
1686 strbuf_init(&sb, /*hint=*/ 0);
1687 parse_events_terms__to_strbuf(&parsed_terms, &sb);
1688 pr_debug("%s -> fake/%s/\n", event_name, sb.buf);
1689 strbuf_release(&sb);
1690 ok++;
1691 }
1692 }
1693
1694 out_err:
1695 parse_events_terms__exit(&parsed_terms);
1696 if (ok)
1697 *listp = list;
1698 else
1699 free(list);
1700
1701 return ok ? 0 : -1;
1702 }
1703
parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state * parse_state,const char * event_or_pmu,const struct parse_events_terms * const_parsed_terms,struct list_head ** listp,void * loc_)1704 int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state,
1705 const char *event_or_pmu,
1706 const struct parse_events_terms *const_parsed_terms,
1707 struct list_head **listp,
1708 void *loc_)
1709 {
1710 YYLTYPE *loc = loc_;
1711 struct perf_pmu *pmu;
1712 int ok = 0;
1713 char *help;
1714 struct evsel *first_wildcard_match = NULL;
1715
1716 *listp = malloc(sizeof(**listp));
1717 if (!*listp)
1718 return -ENOMEM;
1719
1720 INIT_LIST_HEAD(*listp);
1721
1722 /* Attempt to add to list assuming event_or_pmu is a PMU name. */
1723 pmu = perf_pmus__find(event_or_pmu);
1724 if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms,
1725 first_wildcard_match,
1726 /*alternate_hw_config=*/PERF_COUNT_HW_MAX))
1727 return 0;
1728
1729 if (parse_state->fake_pmu) {
1730 if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(),
1731 const_parsed_terms,
1732 first_wildcard_match,
1733 /*alternate_hw_config=*/PERF_COUNT_HW_MAX))
1734 return 0;
1735 }
1736
1737 pmu = NULL;
1738 /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */
1739 while ((pmu = perf_pmus__scan_matching_wildcard(pmu, event_or_pmu)) != NULL) {
1740
1741 if (parse_events__filter_pmu(parse_state, pmu))
1742 continue;
1743
1744 if (!parse_events_add_pmu(parse_state, *listp, pmu,
1745 const_parsed_terms,
1746 first_wildcard_match,
1747 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) {
1748 ok++;
1749 parse_state->wild_card_pmus = true;
1750 }
1751 if (first_wildcard_match == NULL) {
1752 first_wildcard_match =
1753 container_of((*listp)->prev, struct evsel, core.node);
1754 }
1755 }
1756 if (ok)
1757 return 0;
1758
1759 /* Failure to add, assume event_or_pmu is an event name. */
1760 zfree(listp);
1761 if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, PERF_COUNT_HW_MAX,
1762 const_parsed_terms, listp, loc))
1763 return 0;
1764
1765 if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0)
1766 help = NULL;
1767 parse_events_error__handle(parse_state->error, loc->first_column,
1768 strdup("Bad event or PMU"),
1769 help);
1770 zfree(listp);
1771 return -EINVAL;
1772 }
1773
parse_events__set_leader(char * name,struct list_head * list)1774 void parse_events__set_leader(char *name, struct list_head *list)
1775 {
1776 struct evsel *leader;
1777
1778 if (list_empty(list)) {
1779 WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1780 return;
1781 }
1782
1783 leader = list_first_entry(list, struct evsel, core.node);
1784 __perf_evlist__set_leader(list, &leader->core);
1785 zfree(&leader->group_name);
1786 leader->group_name = name;
1787 }
1788
parse_events__modifier_list(struct parse_events_state * parse_state,YYLTYPE * loc,struct list_head * list,struct parse_events_modifier mod,bool group)1789 static int parse_events__modifier_list(struct parse_events_state *parse_state,
1790 YYLTYPE *loc,
1791 struct list_head *list,
1792 struct parse_events_modifier mod,
1793 bool group)
1794 {
1795 struct evsel *evsel;
1796
1797 if (!group && mod.weak) {
1798 parse_events_error__handle(parse_state->error, loc->first_column,
1799 strdup("Weak modifier is for use with groups"), NULL);
1800 return -EINVAL;
1801 }
1802
1803 __evlist__for_each_entry(list, evsel) {
1804 /* Translate modifiers into the equivalent evsel excludes. */
1805 int eu = group ? evsel->core.attr.exclude_user : 0;
1806 int ek = group ? evsel->core.attr.exclude_kernel : 0;
1807 int eh = group ? evsel->core.attr.exclude_hv : 0;
1808 int eH = group ? evsel->core.attr.exclude_host : 0;
1809 int eG = group ? evsel->core.attr.exclude_guest : 0;
1810 int exclude = eu | ek | eh;
1811 int exclude_GH = eG | eH;
1812
1813 if (mod.user) {
1814 if (!exclude)
1815 exclude = eu = ek = eh = 1;
1816 eu = 0;
1817 }
1818 if (mod.kernel) {
1819 if (!exclude)
1820 exclude = eu = ek = eh = 1;
1821 ek = 0;
1822 }
1823 if (mod.hypervisor) {
1824 if (!exclude)
1825 exclude = eu = ek = eh = 1;
1826 eh = 0;
1827 }
1828 if (mod.guest) {
1829 if (!exclude_GH)
1830 exclude_GH = eG = eH = 1;
1831 eG = 0;
1832 }
1833 if (mod.host) {
1834 if (!exclude_GH)
1835 exclude_GH = eG = eH = 1;
1836 eH = 0;
1837 }
1838 if (!exclude_GH && exclude_GH_default) {
1839 if (perf_host)
1840 eG = 1;
1841 else if (perf_guest)
1842 eH = 1;
1843 }
1844
1845 evsel->core.attr.exclude_user = eu;
1846 evsel->core.attr.exclude_kernel = ek;
1847 evsel->core.attr.exclude_hv = eh;
1848 evsel->core.attr.exclude_host = eH;
1849 evsel->core.attr.exclude_guest = eG;
1850 evsel->exclude_GH = exclude_GH;
1851
1852 /* Simple modifiers copied to the evsel. */
1853 if (mod.precise) {
1854 u8 precise = evsel->core.attr.precise_ip + mod.precise;
1855 /*
1856 * precise ip:
1857 *
1858 * 0 - SAMPLE_IP can have arbitrary skid
1859 * 1 - SAMPLE_IP must have constant skid
1860 * 2 - SAMPLE_IP requested to have 0 skid
1861 * 3 - SAMPLE_IP must have 0 skid
1862 *
1863 * See also PERF_RECORD_MISC_EXACT_IP
1864 */
1865 if (precise > 3) {
1866 char *help;
1867
1868 if (asprintf(&help,
1869 "Maximum combined precise value is 3, adding precision to \"%s\"",
1870 evsel__name(evsel)) > 0) {
1871 parse_events_error__handle(parse_state->error,
1872 loc->first_column,
1873 help, NULL);
1874 }
1875 return -EINVAL;
1876 }
1877 evsel->core.attr.precise_ip = precise;
1878 }
1879 if (mod.precise_max)
1880 evsel->precise_max = 1;
1881 if (mod.non_idle)
1882 evsel->core.attr.exclude_idle = 1;
1883 if (mod.sample_read)
1884 evsel->sample_read = 1;
1885 if (mod.pinned && evsel__is_group_leader(evsel))
1886 evsel->core.attr.pinned = 1;
1887 if (mod.exclusive && evsel__is_group_leader(evsel))
1888 evsel->core.attr.exclusive = 1;
1889 if (mod.weak)
1890 evsel->weak_group = true;
1891 if (mod.bpf)
1892 evsel->bpf_counter = true;
1893 if (mod.retire_lat)
1894 evsel->retire_lat = true;
1895 }
1896 return 0;
1897 }
1898
parse_events__modifier_group(struct parse_events_state * parse_state,void * loc,struct list_head * list,struct parse_events_modifier mod)1899 int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc,
1900 struct list_head *list,
1901 struct parse_events_modifier mod)
1902 {
1903 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true);
1904 }
1905
parse_events__modifier_event(struct parse_events_state * parse_state,void * loc,struct list_head * list,struct parse_events_modifier mod)1906 int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc,
1907 struct list_head *list,
1908 struct parse_events_modifier mod)
1909 {
1910 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false);
1911 }
1912
parse_events__set_default_name(struct list_head * list,char * name)1913 int parse_events__set_default_name(struct list_head *list, char *name)
1914 {
1915 struct evsel *evsel;
1916 bool used_name = false;
1917
1918 __evlist__for_each_entry(list, evsel) {
1919 if (!evsel->name) {
1920 evsel->name = used_name ? strdup(name) : name;
1921 used_name = true;
1922 if (!evsel->name)
1923 return -ENOMEM;
1924 }
1925 }
1926 if (!used_name)
1927 free(name);
1928 return 0;
1929 }
1930
parse_events__scanner(const char * str,FILE * input,struct parse_events_state * parse_state)1931 static int parse_events__scanner(const char *str,
1932 FILE *input,
1933 struct parse_events_state *parse_state)
1934 {
1935 YY_BUFFER_STATE buffer;
1936 void *scanner;
1937 int ret;
1938
1939 ret = parse_events_lex_init_extra(parse_state, &scanner);
1940 if (ret)
1941 return ret;
1942
1943 if (str)
1944 buffer = parse_events__scan_string(str, scanner);
1945 else
1946 parse_events_set_in(input, scanner);
1947
1948 #ifdef PARSER_DEBUG
1949 parse_events_debug = 1;
1950 parse_events_set_debug(1, scanner);
1951 #endif
1952 ret = parse_events_parse(parse_state, scanner);
1953
1954 if (str) {
1955 parse_events__flush_buffer(buffer, scanner);
1956 parse_events__delete_buffer(buffer, scanner);
1957 }
1958 parse_events_lex_destroy(scanner);
1959 return ret;
1960 }
1961
1962 /*
1963 * parse event config string, return a list of event terms.
1964 */
parse_events_terms(struct parse_events_terms * terms,const char * str,FILE * input)1965 int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input)
1966 {
1967 struct parse_events_state parse_state = {
1968 .terms = NULL,
1969 .stoken = PE_START_TERMS,
1970 };
1971 int ret;
1972
1973 ret = parse_events__scanner(str, input, &parse_state);
1974 if (!ret)
1975 list_splice(&parse_state.terms->terms, &terms->terms);
1976
1977 zfree(&parse_state.terms);
1978 return ret;
1979 }
1980
evsel__compute_group_pmu_name(struct evsel * evsel,const struct list_head * head)1981 static int evsel__compute_group_pmu_name(struct evsel *evsel,
1982 const struct list_head *head)
1983 {
1984 struct evsel *leader = evsel__leader(evsel);
1985 struct evsel *pos;
1986 const char *group_pmu_name;
1987 struct perf_pmu *pmu = evsel__find_pmu(evsel);
1988
1989 if (!pmu) {
1990 /*
1991 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU
1992 * is a core PMU, but in heterogeneous systems this is
1993 * unknown. For now pick the first core PMU.
1994 */
1995 pmu = perf_pmus__scan_core(NULL);
1996 }
1997 if (!pmu) {
1998 pr_debug("No PMU found for '%s'\n", evsel__name(evsel));
1999 return -EINVAL;
2000 }
2001 group_pmu_name = pmu->name;
2002 /*
2003 * Software events may be in a group with other uncore PMU events. Use
2004 * the pmu_name of the first non-software event to avoid breaking the
2005 * software event out of the group.
2006 *
2007 * Aux event leaders, like intel_pt, expect a group with events from
2008 * other PMUs, so substitute the AUX event's PMU in this case.
2009 */
2010 if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) {
2011 struct perf_pmu *leader_pmu = evsel__find_pmu(leader);
2012
2013 if (!leader_pmu) {
2014 /* As with determining pmu above. */
2015 leader_pmu = perf_pmus__scan_core(NULL);
2016 }
2017 /*
2018 * Starting with the leader, find the first event with a named
2019 * non-software PMU. for_each_group_(member|evsel) isn't used as
2020 * the list isn't yet sorted putting evsel's in the same group
2021 * together.
2022 */
2023 if (leader_pmu && !perf_pmu__is_software(leader_pmu)) {
2024 group_pmu_name = leader_pmu->name;
2025 } else if (leader->core.nr_members > 1) {
2026 list_for_each_entry(pos, head, core.node) {
2027 struct perf_pmu *pos_pmu;
2028
2029 if (pos == leader || evsel__leader(pos) != leader)
2030 continue;
2031 pos_pmu = evsel__find_pmu(pos);
2032 if (!pos_pmu) {
2033 /* As with determining pmu above. */
2034 pos_pmu = perf_pmus__scan_core(NULL);
2035 }
2036 if (pos_pmu && !perf_pmu__is_software(pos_pmu)) {
2037 group_pmu_name = pos_pmu->name;
2038 break;
2039 }
2040 }
2041 }
2042 }
2043 /* Record computed name. */
2044 evsel->group_pmu_name = strdup(group_pmu_name);
2045 return evsel->group_pmu_name ? 0 : -ENOMEM;
2046 }
2047
arch_evlist__cmp(const struct evsel * lhs,const struct evsel * rhs)2048 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
2049 {
2050 /* Order by insertion index. */
2051 return lhs->core.idx - rhs->core.idx;
2052 }
2053
evlist__cmp(void * _fg_idx,const struct list_head * l,const struct list_head * r)2054 static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r)
2055 {
2056 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
2057 const struct evsel *lhs = container_of(lhs_core, struct evsel, core);
2058 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
2059 const struct evsel *rhs = container_of(rhs_core, struct evsel, core);
2060 int *force_grouped_idx = _fg_idx;
2061 int lhs_sort_idx, rhs_sort_idx, ret;
2062 const char *lhs_pmu_name, *rhs_pmu_name;
2063
2064 /*
2065 * Get the indexes of the 2 events to sort. If the events are
2066 * in groups then the leader's index is used otherwise the
2067 * event's index is used. An index may be forced for events that
2068 * must be in the same group, namely Intel topdown events.
2069 */
2070 if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) {
2071 lhs_sort_idx = *force_grouped_idx;
2072 } else {
2073 bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1;
2074
2075 lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx;
2076 }
2077 if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) {
2078 rhs_sort_idx = *force_grouped_idx;
2079 } else {
2080 bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1;
2081
2082 rhs_sort_idx = rhs_has_group ? rhs_core->leader->idx : rhs_core->idx;
2083 }
2084
2085 /* If the indices differ then respect the insertion order. */
2086 if (lhs_sort_idx != rhs_sort_idx)
2087 return lhs_sort_idx - rhs_sort_idx;
2088
2089 /*
2090 * Ignoring forcing, lhs_sort_idx == rhs_sort_idx so lhs and rhs should
2091 * be in the same group. Events in the same group need to be ordered by
2092 * their grouping PMU name as the group will be broken to ensure only
2093 * events on the same PMU are programmed together.
2094 *
2095 * With forcing the lhs_sort_idx == rhs_sort_idx shows that one or both
2096 * events are being forced to be at force_group_index. If only one event
2097 * is being forced then the other event is the group leader of the group
2098 * we're trying to force the event into. Ensure for the force grouped
2099 * case that the PMU name ordering is also respected.
2100 */
2101 lhs_pmu_name = lhs->group_pmu_name;
2102 rhs_pmu_name = rhs->group_pmu_name;
2103 ret = strcmp(lhs_pmu_name, rhs_pmu_name);
2104 if (ret)
2105 return ret;
2106
2107 /*
2108 * Architecture specific sorting, by default sort events in the same
2109 * group with the same PMU by their insertion index. On Intel topdown
2110 * constraints must be adhered to - slots first, etc.
2111 */
2112 return arch_evlist__cmp(lhs, rhs);
2113 }
2114
arch_evlist__add_required_events(struct list_head * list __always_unused)2115 int __weak arch_evlist__add_required_events(struct list_head *list __always_unused)
2116 {
2117 return 0;
2118 }
2119
parse_events__sort_events_and_fix_groups(struct list_head * list)2120 static int parse_events__sort_events_and_fix_groups(struct list_head *list)
2121 {
2122 int idx = 0, force_grouped_idx = -1;
2123 struct evsel *pos, *cur_leader = NULL;
2124 struct perf_evsel *cur_leaders_grp = NULL;
2125 bool idx_changed = false;
2126 int orig_num_leaders = 0, num_leaders = 0;
2127 int ret;
2128 struct evsel *force_grouped_leader = NULL;
2129 bool last_event_was_forced_leader = false;
2130
2131 /* On x86 topdown metrics events require a slots event. */
2132 ret = arch_evlist__add_required_events(list);
2133 if (ret)
2134 return ret;
2135
2136 /*
2137 * Compute index to insert ungrouped events at. Place them where the
2138 * first ungrouped event appears.
2139 */
2140 list_for_each_entry(pos, list, core.node) {
2141 const struct evsel *pos_leader = evsel__leader(pos);
2142
2143 ret = evsel__compute_group_pmu_name(pos, list);
2144 if (ret)
2145 return ret;
2146
2147 if (pos == pos_leader)
2148 orig_num_leaders++;
2149
2150 /*
2151 * Ensure indexes are sequential, in particular for multiple
2152 * event lists being merged. The indexes are used to detect when
2153 * the user order is modified.
2154 */
2155 pos->core.idx = idx++;
2156
2157 /*
2158 * Remember an index to sort all forced grouped events
2159 * together to. Use the group leader as some events
2160 * must appear first within the group.
2161 */
2162 if (force_grouped_idx == -1 && arch_evsel__must_be_in_group(pos))
2163 force_grouped_idx = pos_leader->core.idx;
2164 }
2165
2166 /* Sort events. */
2167 list_sort(&force_grouped_idx, list, evlist__cmp);
2168
2169 /*
2170 * Recompute groups, splitting for PMUs and adding groups for events
2171 * that require them.
2172 */
2173 idx = 0;
2174 list_for_each_entry(pos, list, core.node) {
2175 const struct evsel *pos_leader = evsel__leader(pos);
2176 const char *pos_pmu_name = pos->group_pmu_name;
2177 const char *cur_leader_pmu_name;
2178 bool pos_force_grouped = force_grouped_idx != -1 &&
2179 arch_evsel__must_be_in_group(pos);
2180
2181 /* Reset index and nr_members. */
2182 if (pos->core.idx != idx)
2183 idx_changed = true;
2184 pos->core.idx = idx++;
2185 pos->core.nr_members = 0;
2186
2187 /*
2188 * Set the group leader respecting the given groupings and that
2189 * groups can't span PMUs.
2190 */
2191 if (!cur_leader) {
2192 cur_leader = pos;
2193 cur_leaders_grp = &pos->core;
2194 if (pos_force_grouped)
2195 force_grouped_leader = pos;
2196 }
2197
2198 cur_leader_pmu_name = cur_leader->group_pmu_name;
2199 if (strcmp(cur_leader_pmu_name, pos_pmu_name)) {
2200 /* PMU changed so the group/leader must change. */
2201 cur_leader = pos;
2202 cur_leaders_grp = pos->core.leader;
2203 if (pos_force_grouped && force_grouped_leader == NULL)
2204 force_grouped_leader = pos;
2205 } else if (cur_leaders_grp != pos->core.leader) {
2206 bool split_even_if_last_leader_was_forced = true;
2207
2208 /*
2209 * Event is for a different group. If the last event was
2210 * the forced group leader then subsequent group events
2211 * and forced events should be in the same group. If
2212 * there are no other forced group events then the
2213 * forced group leader wasn't really being forced into a
2214 * group, it just set arch_evsel__must_be_in_group, and
2215 * we don't want the group to split here.
2216 */
2217 if (force_grouped_idx != -1 && last_event_was_forced_leader) {
2218 struct evsel *pos2 = pos;
2219 /*
2220 * Search the whole list as the group leaders
2221 * aren't currently valid.
2222 */
2223 list_for_each_entry_continue(pos2, list, core.node) {
2224 if (pos->core.leader == pos2->core.leader &&
2225 arch_evsel__must_be_in_group(pos2)) {
2226 split_even_if_last_leader_was_forced = false;
2227 break;
2228 }
2229 }
2230 }
2231 if (!last_event_was_forced_leader || split_even_if_last_leader_was_forced) {
2232 if (pos_force_grouped) {
2233 if (force_grouped_leader) {
2234 cur_leader = force_grouped_leader;
2235 cur_leaders_grp = force_grouped_leader->core.leader;
2236 } else {
2237 cur_leader = force_grouped_leader = pos;
2238 cur_leaders_grp = &pos->core;
2239 }
2240 } else {
2241 cur_leader = pos;
2242 cur_leaders_grp = pos->core.leader;
2243 }
2244 }
2245 }
2246 if (pos_leader != cur_leader) {
2247 /* The leader changed so update it. */
2248 evsel__set_leader(pos, cur_leader);
2249 }
2250 last_event_was_forced_leader = (force_grouped_leader == pos);
2251 }
2252 list_for_each_entry(pos, list, core.node) {
2253 struct evsel *pos_leader = evsel__leader(pos);
2254
2255 if (pos == pos_leader)
2256 num_leaders++;
2257 pos_leader->core.nr_members++;
2258 }
2259 return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0;
2260 }
2261
__parse_events(struct evlist * evlist,const char * str,const char * pmu_filter,struct parse_events_error * err,bool fake_pmu,bool warn_if_reordered,bool fake_tp)2262 int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter,
2263 struct parse_events_error *err, bool fake_pmu,
2264 bool warn_if_reordered, bool fake_tp)
2265 {
2266 struct parse_events_state parse_state = {
2267 .list = LIST_HEAD_INIT(parse_state.list),
2268 .idx = evlist->core.nr_entries,
2269 .error = err,
2270 .stoken = PE_START_EVENTS,
2271 .fake_pmu = fake_pmu,
2272 .fake_tp = fake_tp,
2273 .pmu_filter = pmu_filter,
2274 .match_legacy_cache_terms = true,
2275 };
2276 int ret, ret2;
2277
2278 ret = parse_events__scanner(str, /*input=*/ NULL, &parse_state);
2279
2280 if (!ret && list_empty(&parse_state.list)) {
2281 WARN_ONCE(true, "WARNING: event parser found nothing\n");
2282 return -1;
2283 }
2284
2285 ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list);
2286 if (ret2 < 0)
2287 return ret;
2288
2289 /*
2290 * Add list to the evlist even with errors to allow callers to clean up.
2291 */
2292 evlist__splice_list_tail(evlist, &parse_state.list);
2293
2294 if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) {
2295 pr_warning("WARNING: events were regrouped to match PMUs\n");
2296
2297 if (verbose > 0) {
2298 struct strbuf sb = STRBUF_INIT;
2299
2300 evlist__uniquify_evsel_names(evlist, &stat_config);
2301 evlist__format_evsels(evlist, &sb, 2048);
2302 pr_debug("evlist after sorting/fixing: '%s'\n", sb.buf);
2303 strbuf_release(&sb);
2304 }
2305 }
2306 if (!ret) {
2307 struct evsel *last;
2308
2309 last = evlist__last(evlist);
2310 last->cmdline_group_boundary = true;
2311
2312 return 0;
2313 }
2314
2315 /*
2316 * There are 2 users - builtin-record and builtin-test objects.
2317 * Both call evlist__delete in case of error, so we dont
2318 * need to bother.
2319 */
2320 return ret;
2321 }
2322
parse_event(struct evlist * evlist,const char * str)2323 int parse_event(struct evlist *evlist, const char *str)
2324 {
2325 struct parse_events_error err;
2326 int ret;
2327
2328 parse_events_error__init(&err);
2329 ret = parse_events(evlist, str, &err);
2330 parse_events_error__exit(&err);
2331 return ret;
2332 }
2333
2334 struct parse_events_error_entry {
2335 /** @list: The list the error is part of. */
2336 struct list_head list;
2337 /** @idx: index in the parsed string */
2338 int idx;
2339 /** @str: string to display at the index */
2340 char *str;
2341 /** @help: optional help string */
2342 char *help;
2343 };
2344
parse_events_error__init(struct parse_events_error * err)2345 void parse_events_error__init(struct parse_events_error *err)
2346 {
2347 INIT_LIST_HEAD(&err->list);
2348 }
2349
parse_events_error__exit(struct parse_events_error * err)2350 void parse_events_error__exit(struct parse_events_error *err)
2351 {
2352 struct parse_events_error_entry *pos, *tmp;
2353
2354 list_for_each_entry_safe(pos, tmp, &err->list, list) {
2355 zfree(&pos->str);
2356 zfree(&pos->help);
2357 list_del_init(&pos->list);
2358 free(pos);
2359 }
2360 }
2361
parse_events_error__handle(struct parse_events_error * err,int idx,char * str,char * help)2362 void parse_events_error__handle(struct parse_events_error *err, int idx,
2363 char *str, char *help)
2364 {
2365 struct parse_events_error_entry *entry;
2366
2367 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n"))
2368 goto out_free;
2369
2370 entry = zalloc(sizeof(*entry));
2371 if (!entry) {
2372 pr_err("Failed to allocate memory for event parsing error: %s (%s)\n",
2373 str, help ?: "<no help>");
2374 goto out_free;
2375 }
2376 entry->idx = idx;
2377 entry->str = str;
2378 entry->help = help;
2379 list_add(&entry->list, &err->list);
2380 return;
2381 out_free:
2382 free(str);
2383 free(help);
2384 }
2385
2386 #define MAX_WIDTH 1000
get_term_width(void)2387 static int get_term_width(void)
2388 {
2389 struct winsize ws;
2390
2391 get_term_dimensions(&ws);
2392 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
2393 }
2394
__parse_events_error__print(int err_idx,const char * err_str,const char * err_help,const char * event)2395 static void __parse_events_error__print(int err_idx, const char *err_str,
2396 const char *err_help, const char *event)
2397 {
2398 const char *str = "invalid or unsupported event: ";
2399 char _buf[MAX_WIDTH];
2400 char *buf = (char *) event;
2401 int idx = 0;
2402 if (err_str) {
2403 /* -2 for extra '' in the final fprintf */
2404 int width = get_term_width() - 2;
2405 int len_event = strlen(event);
2406 int len_str, max_len, cut = 0;
2407
2408 /*
2409 * Maximum error index indent, we will cut
2410 * the event string if it's bigger.
2411 */
2412 int max_err_idx = 13;
2413
2414 /*
2415 * Let's be specific with the message when
2416 * we have the precise error.
2417 */
2418 str = "event syntax error: ";
2419 len_str = strlen(str);
2420 max_len = width - len_str;
2421
2422 buf = _buf;
2423
2424 /* We're cutting from the beginning. */
2425 if (err_idx > max_err_idx)
2426 cut = err_idx - max_err_idx;
2427
2428 strncpy(buf, event + cut, max_len);
2429
2430 /* Mark cut parts with '..' on both sides. */
2431 if (cut)
2432 buf[0] = buf[1] = '.';
2433
2434 if ((len_event - cut) > max_len) {
2435 buf[max_len - 1] = buf[max_len - 2] = '.';
2436 buf[max_len] = 0;
2437 }
2438
2439 idx = len_str + err_idx - cut;
2440 }
2441
2442 fprintf(stderr, "%s'%s'\n", str, buf);
2443 if (idx) {
2444 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str);
2445 if (err_help)
2446 fprintf(stderr, "\n%s\n", err_help);
2447 }
2448 }
2449
parse_events_error__print(const struct parse_events_error * err,const char * event)2450 void parse_events_error__print(const struct parse_events_error *err,
2451 const char *event)
2452 {
2453 struct parse_events_error_entry *pos;
2454 bool first = true;
2455
2456 list_for_each_entry(pos, &err->list, list) {
2457 if (!first)
2458 fputs("\n", stderr);
2459 __parse_events_error__print(pos->idx, pos->str, pos->help, event);
2460 first = false;
2461 }
2462 }
2463
2464 /*
2465 * In the list of errors err, do any of the error strings (str) contain the
2466 * given needle string?
2467 */
parse_events_error__contains(const struct parse_events_error * err,const char * needle)2468 bool parse_events_error__contains(const struct parse_events_error *err,
2469 const char *needle)
2470 {
2471 struct parse_events_error_entry *pos;
2472
2473 list_for_each_entry(pos, &err->list, list) {
2474 if (strstr(pos->str, needle) != NULL)
2475 return true;
2476 }
2477 return false;
2478 }
2479
2480 #undef MAX_WIDTH
2481
parse_events_option(const struct option * opt,const char * str,int unset __maybe_unused)2482 int parse_events_option(const struct option *opt, const char *str,
2483 int unset __maybe_unused)
2484 {
2485 struct parse_events_option_args *args = opt->value;
2486 struct parse_events_error err;
2487 int ret;
2488
2489 parse_events_error__init(&err);
2490 ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err,
2491 /*fake_pmu=*/false, /*warn_if_reordered=*/true,
2492 /*fake_tp=*/false);
2493
2494 if (ret) {
2495 parse_events_error__print(&err, str);
2496 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
2497 }
2498 parse_events_error__exit(&err);
2499
2500 return ret;
2501 }
2502
parse_events_option_new_evlist(const struct option * opt,const char * str,int unset)2503 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset)
2504 {
2505 struct parse_events_option_args *args = opt->value;
2506 int ret;
2507
2508 if (*args->evlistp == NULL) {
2509 *args->evlistp = evlist__new();
2510
2511 if (*args->evlistp == NULL) {
2512 fprintf(stderr, "Not enough memory to create evlist\n");
2513 return -1;
2514 }
2515 }
2516 ret = parse_events_option(opt, str, unset);
2517 if (ret) {
2518 evlist__delete(*args->evlistp);
2519 *args->evlistp = NULL;
2520 }
2521
2522 return ret;
2523 }
2524
2525 static int
foreach_evsel_in_last_glob(struct evlist * evlist,int (* func)(struct evsel * evsel,const void * arg),const void * arg)2526 foreach_evsel_in_last_glob(struct evlist *evlist,
2527 int (*func)(struct evsel *evsel,
2528 const void *arg),
2529 const void *arg)
2530 {
2531 struct evsel *last = NULL;
2532 int err;
2533
2534 /*
2535 * Don't return when list_empty, give func a chance to report
2536 * error when it found last == NULL.
2537 *
2538 * So no need to WARN here, let *func do this.
2539 */
2540 if (evlist->core.nr_entries > 0)
2541 last = evlist__last(evlist);
2542
2543 do {
2544 err = (*func)(last, arg);
2545 if (err)
2546 return -1;
2547 if (!last)
2548 return 0;
2549
2550 if (last->core.node.prev == &evlist->core.entries)
2551 return 0;
2552 last = list_entry(last->core.node.prev, struct evsel, core.node);
2553 } while (!last->cmdline_group_boundary);
2554
2555 return 0;
2556 }
2557
2558 /* Will a tracepoint filter work for str or should a BPF filter be used? */
is_possible_tp_filter(const char * str)2559 static bool is_possible_tp_filter(const char *str)
2560 {
2561 return strstr(str, "uid") == NULL;
2562 }
2563
set_filter(struct evsel * evsel,const void * arg)2564 static int set_filter(struct evsel *evsel, const void *arg)
2565 {
2566 const char *str = arg;
2567 int nr_addr_filters = 0;
2568 struct perf_pmu *pmu;
2569
2570 if (evsel == NULL) {
2571 fprintf(stderr,
2572 "--filter option should follow a -e tracepoint or HW tracer option\n");
2573 return -1;
2574 }
2575
2576 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && is_possible_tp_filter(str)) {
2577 if (evsel__append_tp_filter(evsel, str) < 0) {
2578 fprintf(stderr,
2579 "not enough memory to hold filter string\n");
2580 return -1;
2581 }
2582
2583 return 0;
2584 }
2585
2586 pmu = evsel__find_pmu(evsel);
2587 if (pmu) {
2588 perf_pmu__scan_file(pmu, "nr_addr_filters",
2589 "%d", &nr_addr_filters);
2590 }
2591 if (!nr_addr_filters)
2592 return perf_bpf_filter__parse(&evsel->bpf_filters, str);
2593
2594 if (evsel__append_addr_filter(evsel, str) < 0) {
2595 fprintf(stderr,
2596 "not enough memory to hold filter string\n");
2597 return -1;
2598 }
2599
2600 return 0;
2601 }
2602
parse_filter(const struct option * opt,const char * str,int unset __maybe_unused)2603 int parse_filter(const struct option *opt, const char *str,
2604 int unset __maybe_unused)
2605 {
2606 struct evlist *evlist = *(struct evlist **)opt->value;
2607
2608 return foreach_evsel_in_last_glob(evlist, set_filter,
2609 (const void *)str);
2610 }
2611
parse_uid_filter(struct evlist * evlist,uid_t uid)2612 int parse_uid_filter(struct evlist *evlist, uid_t uid)
2613 {
2614 struct option opt = {
2615 .value = &evlist,
2616 };
2617 char buf[128];
2618 int ret;
2619
2620 snprintf(buf, sizeof(buf), "uid == %d", uid);
2621 ret = parse_filter(&opt, buf, /*unset=*/0);
2622 if (ret) {
2623 if (use_browser >= 1) {
2624 /*
2625 * Use ui__warning so a pop up appears above the
2626 * underlying BPF error message.
2627 */
2628 ui__warning("Failed to add UID filtering that uses BPF filtering.\n");
2629 } else {
2630 fprintf(stderr, "Failed to add UID filtering that uses BPF filtering.\n");
2631 }
2632 }
2633 return ret;
2634 }
2635
add_exclude_perf_filter(struct evsel * evsel,const void * arg __maybe_unused)2636 static int add_exclude_perf_filter(struct evsel *evsel,
2637 const void *arg __maybe_unused)
2638 {
2639 char new_filter[64];
2640
2641 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2642 fprintf(stderr,
2643 "--exclude-perf option should follow a -e tracepoint option\n");
2644 return -1;
2645 }
2646
2647 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid());
2648
2649 if (evsel__append_tp_filter(evsel, new_filter) < 0) {
2650 fprintf(stderr,
2651 "not enough memory to hold filter string\n");
2652 return -1;
2653 }
2654
2655 return 0;
2656 }
2657
exclude_perf(const struct option * opt,const char * arg __maybe_unused,int unset __maybe_unused)2658 int exclude_perf(const struct option *opt,
2659 const char *arg __maybe_unused,
2660 int unset __maybe_unused)
2661 {
2662 struct evlist *evlist = *(struct evlist **)opt->value;
2663
2664 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter,
2665 NULL);
2666 }
2667
parse_events__is_hardcoded_term(struct parse_events_term * term)2668 int parse_events__is_hardcoded_term(struct parse_events_term *term)
2669 {
2670 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
2671 }
2672
new_term(struct parse_events_term ** _term,struct parse_events_term * temp,char * str,u64 num)2673 static int new_term(struct parse_events_term **_term,
2674 struct parse_events_term *temp,
2675 char *str, u64 num)
2676 {
2677 struct parse_events_term *term;
2678
2679 term = malloc(sizeof(*term));
2680 if (!term)
2681 return -ENOMEM;
2682
2683 *term = *temp;
2684 INIT_LIST_HEAD(&term->list);
2685 term->weak = false;
2686
2687 switch (term->type_val) {
2688 case PARSE_EVENTS__TERM_TYPE_NUM:
2689 term->val.num = num;
2690 break;
2691 case PARSE_EVENTS__TERM_TYPE_STR:
2692 term->val.str = str;
2693 break;
2694 default:
2695 free(term);
2696 return -EINVAL;
2697 }
2698
2699 *_term = term;
2700 return 0;
2701 }
2702
parse_events_term__num(struct parse_events_term ** term,enum parse_events__term_type type_term,const char * config,u64 num,bool no_value,void * loc_term_,void * loc_val_)2703 int parse_events_term__num(struct parse_events_term **term,
2704 enum parse_events__term_type type_term,
2705 const char *config, u64 num,
2706 bool no_value,
2707 void *loc_term_, void *loc_val_)
2708 {
2709 YYLTYPE *loc_term = loc_term_;
2710 YYLTYPE *loc_val = loc_val_;
2711
2712 struct parse_events_term temp = {
2713 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
2714 .type_term = type_term,
2715 .config = config ? : strdup(parse_events__term_type_str(type_term)),
2716 .no_value = no_value,
2717 .err_term = loc_term ? loc_term->first_column : 0,
2718 .err_val = loc_val ? loc_val->first_column : 0,
2719 };
2720
2721 return new_term(term, &temp, /*str=*/NULL, num);
2722 }
2723
parse_events_term__str(struct parse_events_term ** term,enum parse_events__term_type type_term,char * config,char * str,void * loc_term_,void * loc_val_)2724 int parse_events_term__str(struct parse_events_term **term,
2725 enum parse_events__term_type type_term,
2726 char *config, char *str,
2727 void *loc_term_, void *loc_val_)
2728 {
2729 YYLTYPE *loc_term = loc_term_;
2730 YYLTYPE *loc_val = loc_val_;
2731
2732 struct parse_events_term temp = {
2733 .type_val = PARSE_EVENTS__TERM_TYPE_STR,
2734 .type_term = type_term,
2735 .config = config,
2736 .err_term = loc_term ? loc_term->first_column : 0,
2737 .err_val = loc_val ? loc_val->first_column : 0,
2738 };
2739
2740 return new_term(term, &temp, str, /*num=*/0);
2741 }
2742
parse_events_term__term(struct parse_events_term ** term,enum parse_events__term_type term_lhs,enum parse_events__term_type term_rhs,void * loc_term,void * loc_val)2743 int parse_events_term__term(struct parse_events_term **term,
2744 enum parse_events__term_type term_lhs,
2745 enum parse_events__term_type term_rhs,
2746 void *loc_term, void *loc_val)
2747 {
2748 return parse_events_term__str(term, term_lhs, NULL,
2749 strdup(parse_events__term_type_str(term_rhs)),
2750 loc_term, loc_val);
2751 }
2752
parse_events_term__clone(struct parse_events_term ** new,const struct parse_events_term * term)2753 int parse_events_term__clone(struct parse_events_term **new,
2754 const struct parse_events_term *term)
2755 {
2756 char *str;
2757 struct parse_events_term temp = *term;
2758
2759 temp.used = false;
2760 if (term->config) {
2761 temp.config = strdup(term->config);
2762 if (!temp.config)
2763 return -ENOMEM;
2764 }
2765 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2766 return new_term(new, &temp, /*str=*/NULL, term->val.num);
2767
2768 str = strdup(term->val.str);
2769 if (!str) {
2770 zfree(&temp.config);
2771 return -ENOMEM;
2772 }
2773 return new_term(new, &temp, str, /*num=*/0);
2774 }
2775
parse_events_term__delete(struct parse_events_term * term)2776 void parse_events_term__delete(struct parse_events_term *term)
2777 {
2778 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
2779 zfree(&term->val.str);
2780
2781 zfree(&term->config);
2782 free(term);
2783 }
2784
parse_events_terms__copy(const struct parse_events_terms * src,struct parse_events_terms * dest)2785 static int parse_events_terms__copy(const struct parse_events_terms *src,
2786 struct parse_events_terms *dest)
2787 {
2788 struct parse_events_term *term;
2789
2790 list_for_each_entry (term, &src->terms, list) {
2791 struct parse_events_term *n;
2792 int ret;
2793
2794 ret = parse_events_term__clone(&n, term);
2795 if (ret)
2796 return ret;
2797
2798 list_add_tail(&n->list, &dest->terms);
2799 }
2800 return 0;
2801 }
2802
parse_events_terms__init(struct parse_events_terms * terms)2803 void parse_events_terms__init(struct parse_events_terms *terms)
2804 {
2805 INIT_LIST_HEAD(&terms->terms);
2806 }
2807
parse_events_terms__exit(struct parse_events_terms * terms)2808 void parse_events_terms__exit(struct parse_events_terms *terms)
2809 {
2810 struct parse_events_term *term, *h;
2811
2812 list_for_each_entry_safe(term, h, &terms->terms, list) {
2813 list_del_init(&term->list);
2814 parse_events_term__delete(term);
2815 }
2816 }
2817
parse_events_terms__delete(struct parse_events_terms * terms)2818 void parse_events_terms__delete(struct parse_events_terms *terms)
2819 {
2820 if (!terms)
2821 return;
2822 parse_events_terms__exit(terms);
2823 free(terms);
2824 }
2825
parse_events_terms__to_strbuf(const struct parse_events_terms * terms,struct strbuf * sb)2826 int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb)
2827 {
2828 struct parse_events_term *term;
2829 bool first = true;
2830
2831 if (!terms)
2832 return 0;
2833
2834 list_for_each_entry(term, &terms->terms, list) {
2835 int ret;
2836
2837 if (!first) {
2838 ret = strbuf_addch(sb, ',');
2839 if (ret < 0)
2840 return ret;
2841 }
2842 first = false;
2843
2844 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2845 if (term->no_value) {
2846 assert(term->val.num == 1);
2847 ret = strbuf_addf(sb, "%s", term->config);
2848 } else
2849 ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num);
2850 else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
2851 if (term->config) {
2852 ret = strbuf_addf(sb, "%s=", term->config);
2853 if (ret < 0)
2854 return ret;
2855 } else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) {
2856 ret = strbuf_addf(sb, "%s=",
2857 parse_events__term_type_str(term->type_term));
2858 if (ret < 0)
2859 return ret;
2860 }
2861 assert(!term->no_value);
2862 ret = strbuf_addf(sb, "%s", term->val.str);
2863 }
2864 if (ret < 0)
2865 return ret;
2866 }
2867 return 0;
2868 }
2869
config_terms_list(char * buf,size_t buf_sz)2870 static void config_terms_list(char *buf, size_t buf_sz)
2871 {
2872 int i;
2873 bool first = true;
2874
2875 buf[0] = '\0';
2876 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
2877 const char *name = parse_events__term_type_str(i);
2878
2879 if (!config_term_avail(i, NULL))
2880 continue;
2881 if (!name)
2882 continue;
2883 if (name[0] == '<')
2884 continue;
2885
2886 if (strlen(buf) + strlen(name) + 2 >= buf_sz)
2887 return;
2888
2889 if (!first)
2890 strcat(buf, ",");
2891 else
2892 first = false;
2893 strcat(buf, name);
2894 }
2895 }
2896
2897 /*
2898 * Return string contains valid config terms of an event.
2899 * @additional_terms: For terms such as PMU sysfs terms.
2900 */
parse_events_formats_error_string(char * additional_terms)2901 char *parse_events_formats_error_string(char *additional_terms)
2902 {
2903 char *str;
2904 /* "no-overwrite" is the longest name */
2905 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
2906 (sizeof("no-overwrite") - 1)];
2907
2908 config_terms_list(static_terms, sizeof(static_terms));
2909 /* valid terms */
2910 if (additional_terms) {
2911 if (asprintf(&str, "valid terms: %s,%s",
2912 additional_terms, static_terms) < 0)
2913 goto fail;
2914 } else {
2915 if (asprintf(&str, "valid terms: %s", static_terms) < 0)
2916 goto fail;
2917 }
2918 return str;
2919
2920 fail:
2921 return NULL;
2922 }
2923