1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/hw_breakpoint.h>
3 #include <linux/err.h>
4 #include <linux/list_sort.h>
5 #include <linux/zalloc.h>
6 #include <dirent.h>
7 #include <errno.h>
8 #include <sys/ioctl.h>
9 #include <sys/param.h>
10 #include "cpumap.h"
11 #include "term.h"
12 #include "env.h"
13 #include "evlist.h"
14 #include "evsel.h"
15 #include <subcmd/parse-options.h>
16 #include "parse-events.h"
17 #include "string2.h"
18 #include "strbuf.h"
19 #include "debug.h"
20 #include <perf/cpumap.h>
21 #include <util/parse-events-bison.h>
22 #include <util/parse-events-flex.h>
23 #include "pmu.h"
24 #include "pmus.h"
25 #include "tp_pmu.h"
26 #include "asm/bug.h"
27 #include "ui/ui.h"
28 #include "util/parse-branch-options.h"
29 #include "util/evsel_config.h"
30 #include "util/event.h"
31 #include "util/bpf-filter.h"
32 #include "util/stat.h"
33 #include "util/tool_pmu.h"
34 #include "util/util.h"
35 #include "tracepoint.h"
36 #include <api/fs/tracing_path.h>
37
38 #define MAX_NAME_LEN 100
39
40 static int get_config_terms(const struct parse_events_terms *head_config,
41 struct list_head *head_terms);
42 static int parse_events_terms__copy(const struct parse_events_terms *src,
43 struct parse_events_terms *dest);
44 static int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb);
45
46 static const char *const event_types[] = {
47 [PERF_TYPE_HARDWARE] = "hardware",
48 [PERF_TYPE_SOFTWARE] = "software",
49 [PERF_TYPE_TRACEPOINT] = "tracepoint",
50 [PERF_TYPE_HW_CACHE] = "hardware-cache",
51 [PERF_TYPE_RAW] = "raw",
52 [PERF_TYPE_BREAKPOINT] = "breakpoint",
53 };
54
event_type(size_t type)55 const char *event_type(size_t type)
56 {
57 if (type >= PERF_TYPE_MAX)
58 return "unknown";
59
60 return event_types[type];
61 }
62
get_config_str(const struct parse_events_terms * head_terms,enum parse_events__term_type type_term)63 static char *get_config_str(const struct parse_events_terms *head_terms,
64 enum parse_events__term_type type_term)
65 {
66 struct parse_events_term *term;
67
68 if (!head_terms)
69 return NULL;
70
71 list_for_each_entry(term, &head_terms->terms, list)
72 if (term->type_term == type_term)
73 return term->val.str;
74
75 return NULL;
76 }
77
get_config_metric_id(const struct parse_events_terms * head_terms)78 static char *get_config_metric_id(const struct parse_events_terms *head_terms)
79 {
80 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
81 }
82
get_config_name(const struct parse_events_terms * head_terms)83 static char *get_config_name(const struct parse_events_terms *head_terms)
84 {
85 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
86 }
87
get_config_cpu(const struct parse_events_terms * head_terms,bool fake_pmu)88 static struct perf_cpu_map *get_config_cpu(const struct parse_events_terms *head_terms,
89 bool fake_pmu)
90 {
91 struct parse_events_term *term;
92 struct perf_cpu_map *cpus = NULL;
93
94 if (!head_terms)
95 return NULL;
96
97 list_for_each_entry(term, &head_terms->terms, list) {
98 struct perf_cpu_map *term_cpus;
99
100 if (term->type_term != PARSE_EVENTS__TERM_TYPE_CPU)
101 continue;
102
103 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
104 term_cpus = perf_cpu_map__new_int(term->val.num);
105 } else {
106 struct perf_pmu *pmu = perf_pmus__find(term->val.str);
107
108 if (pmu) {
109 term_cpus = pmu->is_core && perf_cpu_map__is_empty(pmu->cpus)
110 ? cpu_map__online()
111 : perf_cpu_map__get(pmu->cpus);
112 } else {
113 term_cpus = perf_cpu_map__new(term->val.str);
114 if (!term_cpus && fake_pmu) {
115 /*
116 * Assume the PMU string makes sense on a different
117 * machine and fake a value with all online CPUs.
118 */
119 term_cpus = cpu_map__online();
120 }
121 }
122 }
123 perf_cpu_map__merge(&cpus, term_cpus);
124 perf_cpu_map__put(term_cpus);
125 }
126
127 return cpus;
128 }
129
130 /**
131 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that
132 * matches the raw's string value. If the string value matches an
133 * event then change the term to be an event, if not then change it to
134 * be a config term. For example, "read" may be an event of the PMU or
135 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of
136 * the event can be determined and we don't need to scan all PMUs
137 * ahead-of-time.
138 * @config_terms: the list of terms that may contain a raw term.
139 * @pmu: the PMU to scan for events from.
140 */
fix_raw(struct parse_events_terms * config_terms,struct perf_pmu * pmu)141 static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu)
142 {
143 struct parse_events_term *term;
144
145 list_for_each_entry(term, &config_terms->terms, list) {
146 u64 num;
147
148 if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW)
149 continue;
150
151 if (perf_pmu__have_event(pmu, term->val.str)) {
152 zfree(&term->config);
153 term->config = term->val.str;
154 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
155 term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
156 term->val.num = 1;
157 term->no_value = true;
158 continue;
159 }
160
161 zfree(&term->config);
162 term->config = strdup("config");
163 errno = 0;
164 num = strtoull(term->val.str + 1, NULL, 16);
165 assert(errno == 0);
166 free(term->val.str);
167 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
168 term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG;
169 term->val.num = num;
170 term->no_value = false;
171 }
172 }
173
174 static struct evsel *
__add_event(struct list_head * list,int * idx,struct perf_event_attr * attr,bool init_attr,const char * name,const char * metric_id,struct perf_pmu * pmu,struct list_head * config_terms,struct evsel * first_wildcard_match,struct perf_cpu_map * user_cpus,u64 alternate_hw_config)175 __add_event(struct list_head *list, int *idx,
176 struct perf_event_attr *attr,
177 bool init_attr,
178 const char *name, const char *metric_id, struct perf_pmu *pmu,
179 struct list_head *config_terms, struct evsel *first_wildcard_match,
180 struct perf_cpu_map *user_cpus, u64 alternate_hw_config)
181 {
182 struct evsel *evsel;
183 bool is_pmu_core;
184 struct perf_cpu_map *cpus, *pmu_cpus;
185 bool has_user_cpus = !perf_cpu_map__is_empty(user_cpus);
186
187 /*
188 * Ensure the first_wildcard_match's PMU matches that of the new event
189 * being added. Otherwise try to match with another event further down
190 * the evlist.
191 */
192 if (first_wildcard_match) {
193 struct evsel *pos = list_prev_entry(first_wildcard_match, core.node);
194
195 first_wildcard_match = NULL;
196 list_for_each_entry_continue(pos, list, core.node) {
197 if (perf_pmu__name_no_suffix_match(pos->pmu, pmu->name)) {
198 first_wildcard_match = pos;
199 break;
200 }
201 if (pos->pmu->is_core && (!pmu || pmu->is_core)) {
202 first_wildcard_match = pos;
203 break;
204 }
205 }
206 }
207
208 if (pmu) {
209 perf_pmu__warn_invalid_formats(pmu);
210 if (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX) {
211 perf_pmu__warn_invalid_config(pmu, attr->config, name,
212 PERF_PMU_FORMAT_VALUE_CONFIG, "config");
213 perf_pmu__warn_invalid_config(pmu, attr->config1, name,
214 PERF_PMU_FORMAT_VALUE_CONFIG1, "config1");
215 perf_pmu__warn_invalid_config(pmu, attr->config2, name,
216 PERF_PMU_FORMAT_VALUE_CONFIG2, "config2");
217 perf_pmu__warn_invalid_config(pmu, attr->config3, name,
218 PERF_PMU_FORMAT_VALUE_CONFIG3, "config3");
219 perf_pmu__warn_invalid_config(pmu, attr->config4, name,
220 PERF_PMU_FORMAT_VALUE_CONFIG4, "config4");
221 }
222 }
223 /*
224 * If a PMU wasn't given, such as for legacy events, find now that
225 * warnings won't be generated.
226 */
227 if (!pmu)
228 pmu = perf_pmus__find_by_attr(attr);
229
230 if (pmu) {
231 is_pmu_core = pmu->is_core;
232 pmu_cpus = perf_cpu_map__get(pmu->cpus);
233 if (perf_cpu_map__is_empty(pmu_cpus)) {
234 if (perf_pmu__is_tool(pmu))
235 pmu_cpus = tool_pmu__cpus(attr);
236 else
237 pmu_cpus = cpu_map__online();
238 }
239 } else {
240 is_pmu_core = (attr->type == PERF_TYPE_HARDWARE ||
241 attr->type == PERF_TYPE_HW_CACHE);
242 pmu_cpus = is_pmu_core ? cpu_map__online() : NULL;
243 }
244
245 if (has_user_cpus)
246 cpus = perf_cpu_map__get(user_cpus);
247 else
248 cpus = perf_cpu_map__get(pmu_cpus);
249
250 if (init_attr)
251 event_attr_init(attr);
252
253 evsel = evsel__new_idx(attr, *idx);
254 if (!evsel)
255 goto out_err;
256
257 if (name) {
258 evsel->name = strdup(name);
259 if (!evsel->name)
260 goto out_err;
261 }
262
263 if (metric_id) {
264 evsel->metric_id = strdup(metric_id);
265 if (!evsel->metric_id)
266 goto out_err;
267 }
268
269 (*idx)++;
270 evsel->core.cpus = cpus;
271 evsel->core.pmu_cpus = pmu_cpus;
272 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
273 evsel->core.is_pmu_core = is_pmu_core;
274 evsel->pmu = pmu;
275 evsel->alternate_hw_config = alternate_hw_config;
276 evsel->first_wildcard_match = first_wildcard_match;
277
278 if (config_terms)
279 list_splice_init(config_terms, &evsel->config_terms);
280
281 if (list)
282 list_add_tail(&evsel->core.node, list);
283
284 if (has_user_cpus)
285 evsel__warn_user_requested_cpus(evsel, user_cpus);
286
287 return evsel;
288 out_err:
289 perf_cpu_map__put(cpus);
290 perf_cpu_map__put(pmu_cpus);
291 zfree(&evsel->name);
292 zfree(&evsel->metric_id);
293 free(evsel);
294 return NULL;
295 }
296
parse_events__add_event(int idx,struct perf_event_attr * attr,const char * name,const char * metric_id,struct perf_pmu * pmu)297 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
298 const char *name, const char *metric_id,
299 struct perf_pmu *pmu)
300 {
301 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
302 metric_id, pmu, /*config_terms=*/NULL,
303 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
304 /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
305 }
306
add_event(struct list_head * list,int * idx,struct perf_event_attr * attr,const char * name,const char * metric_id,struct list_head * config_terms,u64 alternate_hw_config)307 static int add_event(struct list_head *list, int *idx,
308 struct perf_event_attr *attr, const char *name,
309 const char *metric_id, struct list_head *config_terms,
310 u64 alternate_hw_config)
311 {
312 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
313 /*pmu=*/NULL, config_terms,
314 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
315 alternate_hw_config) ? 0 : -ENOMEM;
316 }
317
318 /**
319 * parse_aliases - search names for entries beginning or equalling str ignoring
320 * case. If mutliple entries in names match str then the longest
321 * is chosen.
322 * @str: The needle to look for.
323 * @names: The haystack to search.
324 * @size: The size of the haystack.
325 * @longest: Out argument giving the length of the matching entry.
326 */
parse_aliases(const char * str,const char * const names[][EVSEL__MAX_ALIASES],int size,int * longest)327 static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size,
328 int *longest)
329 {
330 *longest = -1;
331 for (int i = 0; i < size; i++) {
332 for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) {
333 int n = strlen(names[i][j]);
334
335 if (n > *longest && !strncasecmp(str, names[i][j], n))
336 *longest = n;
337 }
338 if (*longest > 0)
339 return i;
340 }
341
342 return -1;
343 }
344
345 typedef int config_term_func_t(struct perf_event_attr *attr,
346 struct parse_events_term *term,
347 struct parse_events_state *parse_state);
348 static int config_term_common(struct perf_event_attr *attr,
349 struct parse_events_term *term,
350 struct parse_events_state *parse_state);
351 static int config_attr(struct perf_event_attr *attr,
352 const struct parse_events_terms *head,
353 struct parse_events_state *parse_state,
354 config_term_func_t config_term);
355
356 /**
357 * parse_events__decode_legacy_cache - Search name for the legacy cache event
358 * name composed of 1, 2 or 3 hyphen
359 * separated sections. The first section is
360 * the cache type while the others are the
361 * optional op and optional result. To make
362 * life hard the names in the table also
363 * contain hyphens and the longest name
364 * should always be selected.
365 */
parse_events__decode_legacy_cache(const char * name,int extended_pmu_type,__u64 * config)366 int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config)
367 {
368 int len, cache_type = -1, cache_op = -1, cache_result = -1;
369 const char *name_end = &name[strlen(name) + 1];
370 const char *str = name;
371
372 cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len);
373 if (cache_type == -1)
374 return -EINVAL;
375 str += len + 1;
376
377 if (str < name_end) {
378 cache_op = parse_aliases(str, evsel__hw_cache_op,
379 PERF_COUNT_HW_CACHE_OP_MAX, &len);
380 if (cache_op >= 0) {
381 if (!evsel__is_cache_op_valid(cache_type, cache_op))
382 return -EINVAL;
383 str += len + 1;
384 } else {
385 cache_result = parse_aliases(str, evsel__hw_cache_result,
386 PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
387 if (cache_result >= 0)
388 str += len + 1;
389 }
390 }
391 if (str < name_end) {
392 if (cache_op < 0) {
393 cache_op = parse_aliases(str, evsel__hw_cache_op,
394 PERF_COUNT_HW_CACHE_OP_MAX, &len);
395 if (cache_op >= 0) {
396 if (!evsel__is_cache_op_valid(cache_type, cache_op))
397 return -EINVAL;
398 }
399 } else if (cache_result < 0) {
400 cache_result = parse_aliases(str, evsel__hw_cache_result,
401 PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
402 }
403 }
404
405 /*
406 * Fall back to reads:
407 */
408 if (cache_op == -1)
409 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
410
411 /*
412 * Fall back to accesses:
413 */
414 if (cache_result == -1)
415 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
416
417 *config = cache_type | (cache_op << 8) | (cache_result << 16);
418 if (perf_pmus__supports_extended_type())
419 *config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT;
420 return 0;
421 }
422
423 /**
424 * parse_events__filter_pmu - returns false if a wildcard PMU should be
425 * considered, true if it should be filtered.
426 */
parse_events__filter_pmu(const struct parse_events_state * parse_state,const struct perf_pmu * pmu)427 bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
428 const struct perf_pmu *pmu)
429 {
430 if (parse_state->pmu_filter == NULL)
431 return false;
432
433 return strcmp(parse_state->pmu_filter, pmu->name) != 0;
434 }
435
436 static int parse_events_add_pmu(struct parse_events_state *parse_state,
437 struct list_head *list, struct perf_pmu *pmu,
438 const struct parse_events_terms *const_parsed_terms,
439 struct evsel *first_wildcard_match);
440
tracepoint_error(struct parse_events_error * e,int err,const char * sys,const char * name,int column)441 static void tracepoint_error(struct parse_events_error *e, int err,
442 const char *sys, const char *name, int column)
443 {
444 const char *str;
445 char help[BUFSIZ];
446
447 if (!e)
448 return;
449
450 /*
451 * We get error directly from syscall errno ( > 0),
452 * or from encoded pointer's error ( < 0).
453 */
454 err = abs(err);
455
456 switch (err) {
457 case EACCES:
458 str = "can't access trace events";
459 break;
460 case ENOENT:
461 str = "unknown tracepoint";
462 break;
463 default:
464 str = "failed to add tracepoint";
465 break;
466 }
467
468 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
469 parse_events_error__handle(e, column, strdup(str), strdup(help));
470 }
471
add_tracepoint(struct parse_events_state * parse_state,struct list_head * list,const char * sys_name,const char * evt_name,struct parse_events_error * err,struct parse_events_terms * head_config,void * loc_)472 static int add_tracepoint(struct parse_events_state *parse_state,
473 struct list_head *list,
474 const char *sys_name, const char *evt_name,
475 struct parse_events_error *err,
476 struct parse_events_terms *head_config, void *loc_)
477 {
478 YYLTYPE *loc = loc_;
479 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++,
480 !parse_state->fake_tp);
481
482 if (IS_ERR(evsel)) {
483 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column);
484 return PTR_ERR(evsel);
485 }
486
487 if (head_config) {
488 LIST_HEAD(config_terms);
489
490 if (get_config_terms(head_config, &config_terms))
491 return -ENOMEM;
492 list_splice(&config_terms, &evsel->config_terms);
493 }
494
495 list_add_tail(&evsel->core.node, list);
496 return 0;
497 }
498
499 struct add_tracepoint_multi_args {
500 struct parse_events_state *parse_state;
501 struct list_head *list;
502 const char *sys_glob;
503 const char *evt_glob;
504 struct parse_events_error *err;
505 struct parse_events_terms *head_config;
506 YYLTYPE *loc;
507 int found;
508 };
509
add_tracepoint_multi_event_cb(void * state,const char * sys_name,const char * evt_name)510 static int add_tracepoint_multi_event_cb(void *state, const char *sys_name, const char *evt_name)
511 {
512 struct add_tracepoint_multi_args *args = state;
513 int ret;
514
515 if (!strglobmatch(evt_name, args->evt_glob))
516 return 0;
517
518 args->found++;
519 ret = add_tracepoint(args->parse_state, args->list, sys_name, evt_name,
520 args->err, args->head_config, args->loc);
521
522 return ret;
523 }
524
add_tracepoint_multi_event(struct add_tracepoint_multi_args * args,const char * sys_name)525 static int add_tracepoint_multi_event(struct add_tracepoint_multi_args *args, const char *sys_name)
526 {
527 if (strpbrk(args->evt_glob, "*?") == NULL) {
528 /* Not a glob. */
529 args->found++;
530 return add_tracepoint(args->parse_state, args->list, sys_name, args->evt_glob,
531 args->err, args->head_config, args->loc);
532 }
533
534 return tp_pmu__for_each_tp_event(sys_name, args, add_tracepoint_multi_event_cb);
535 }
536
add_tracepoint_multi_sys_cb(void * state,const char * sys_name)537 static int add_tracepoint_multi_sys_cb(void *state, const char *sys_name)
538 {
539 struct add_tracepoint_multi_args *args = state;
540
541 if (!strglobmatch(sys_name, args->sys_glob))
542 return 0;
543
544 return add_tracepoint_multi_event(args, sys_name);
545 }
546
add_tracepoint_multi_sys(struct parse_events_state * parse_state,struct list_head * list,const char * sys_glob,const char * evt_glob,struct parse_events_error * err,struct parse_events_terms * head_config,YYLTYPE * loc)547 static int add_tracepoint_multi_sys(struct parse_events_state *parse_state,
548 struct list_head *list,
549 const char *sys_glob, const char *evt_glob,
550 struct parse_events_error *err,
551 struct parse_events_terms *head_config, YYLTYPE *loc)
552 {
553 struct add_tracepoint_multi_args args = {
554 .parse_state = parse_state,
555 .list = list,
556 .sys_glob = sys_glob,
557 .evt_glob = evt_glob,
558 .err = err,
559 .head_config = head_config,
560 .loc = loc,
561 .found = 0,
562 };
563 int ret;
564
565 if (strpbrk(sys_glob, "*?") == NULL) {
566 /* Not a glob. */
567 ret = add_tracepoint_multi_event(&args, sys_glob);
568 } else {
569 ret = tp_pmu__for_each_tp_sys(&args, add_tracepoint_multi_sys_cb);
570 }
571 if (args.found == 0) {
572 tracepoint_error(err, ENOENT, sys_glob, evt_glob, loc->first_column);
573 return -ENOENT;
574 }
575 return ret;
576 }
577
default_breakpoint_len(void)578 size_t default_breakpoint_len(void)
579 {
580 #if defined(__i386__)
581 static int len;
582
583 if (len == 0) {
584 struct perf_env env = {};
585
586 perf_env__init(&env);
587 len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long);
588 perf_env__exit(&env);
589 }
590 return len;
591 #elif defined(__aarch64__)
592 return 4;
593 #else
594 return sizeof(long);
595 #endif
596 }
597
598 static int
parse_breakpoint_type(const char * type,struct perf_event_attr * attr)599 parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
600 {
601 int i;
602
603 for (i = 0; i < 3; i++) {
604 if (!type || !type[i])
605 break;
606
607 #define CHECK_SET_TYPE(bit) \
608 do { \
609 if (attr->bp_type & bit) \
610 return -EINVAL; \
611 else \
612 attr->bp_type |= bit; \
613 } while (0)
614
615 switch (type[i]) {
616 case 'r':
617 CHECK_SET_TYPE(HW_BREAKPOINT_R);
618 break;
619 case 'w':
620 CHECK_SET_TYPE(HW_BREAKPOINT_W);
621 break;
622 case 'x':
623 CHECK_SET_TYPE(HW_BREAKPOINT_X);
624 break;
625 default:
626 return -EINVAL;
627 }
628 }
629
630 #undef CHECK_SET_TYPE
631
632 if (!attr->bp_type) /* Default */
633 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
634
635 return 0;
636 }
637
parse_events_add_breakpoint(struct parse_events_state * parse_state,struct list_head * list,u64 addr,char * type,u64 len,struct parse_events_terms * head_config)638 int parse_events_add_breakpoint(struct parse_events_state *parse_state,
639 struct list_head *list,
640 u64 addr, char *type, u64 len,
641 struct parse_events_terms *head_config)
642 {
643 struct perf_event_attr attr;
644 LIST_HEAD(config_terms);
645 const char *name;
646
647 memset(&attr, 0, sizeof(attr));
648 attr.bp_addr = addr;
649
650 if (parse_breakpoint_type(type, &attr))
651 return -EINVAL;
652
653 /* Provide some defaults if len is not specified */
654 if (!len) {
655 if (attr.bp_type == HW_BREAKPOINT_X)
656 len = default_breakpoint_len();
657 else
658 len = HW_BREAKPOINT_LEN_4;
659 }
660
661 attr.bp_len = len;
662
663 attr.type = PERF_TYPE_BREAKPOINT;
664 attr.sample_period = 1;
665
666 if (head_config) {
667 if (config_attr(&attr, head_config, parse_state, config_term_common))
668 return -EINVAL;
669
670 if (get_config_terms(head_config, &config_terms))
671 return -ENOMEM;
672 }
673
674 name = get_config_name(head_config);
675
676 return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL,
677 &config_terms, /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
678 }
679
check_type_val(struct parse_events_term * term,struct parse_events_error * err,enum parse_events__term_val_type type)680 static int check_type_val(struct parse_events_term *term,
681 struct parse_events_error *err,
682 enum parse_events__term_val_type type)
683 {
684 if (type == term->type_val)
685 return 0;
686
687 if (err) {
688 parse_events_error__handle(err, term->err_val,
689 type == PARSE_EVENTS__TERM_TYPE_NUM
690 ? strdup("expected numeric value")
691 : strdup("expected string value"),
692 NULL);
693 }
694 return -EINVAL;
695 }
696
697 static bool config_term_shrinked;
698
parse_events__term_type_str(enum parse_events__term_type term_type)699 const char *parse_events__term_type_str(enum parse_events__term_type term_type)
700 {
701 /*
702 * Update according to parse-events.l
703 */
704 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
705 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>",
706 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config",
707 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1",
708 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2",
709 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3",
710 [PARSE_EVENTS__TERM_TYPE_CONFIG4] = "config4",
711 [PARSE_EVENTS__TERM_TYPE_NAME] = "name",
712 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period",
713 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq",
714 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type",
715 [PARSE_EVENTS__TERM_TYPE_TIME] = "time",
716 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph",
717 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size",
718 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit",
719 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit",
720 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack",
721 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr",
722 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite",
723 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite",
724 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
725 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
726 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
727 [PARSE_EVENTS__TERM_TYPE_AUX_ACTION] = "aux-action",
728 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
729 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
730 [PARSE_EVENTS__TERM_TYPE_RAW] = "raw",
731 [PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG] = "legacy-hardware-config",
732 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG] = "legacy-cache-config",
733 [PARSE_EVENTS__TERM_TYPE_CPU] = "cpu",
734 [PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV] = "ratio-to-prev",
735 };
736 if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR)
737 return "unknown term";
738
739 return config_term_names[term_type];
740 }
741
742 static bool
config_term_avail(enum parse_events__term_type term_type,struct parse_events_error * err)743 config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err)
744 {
745 char *err_str;
746
747 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
748 parse_events_error__handle(err, -1,
749 strdup("Invalid term_type"), NULL);
750 return false;
751 }
752 if (!config_term_shrinked)
753 return true;
754
755 switch (term_type) {
756 case PARSE_EVENTS__TERM_TYPE_CONFIG:
757 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
758 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
759 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
760 case PARSE_EVENTS__TERM_TYPE_CONFIG4:
761 case PARSE_EVENTS__TERM_TYPE_NAME:
762 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
763 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
764 case PARSE_EVENTS__TERM_TYPE_PERCORE:
765 case PARSE_EVENTS__TERM_TYPE_CPU:
766 return true;
767 case PARSE_EVENTS__TERM_TYPE_USER:
768 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
769 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
770 case PARSE_EVENTS__TERM_TYPE_TIME:
771 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
772 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
773 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
774 case PARSE_EVENTS__TERM_TYPE_INHERIT:
775 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
776 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
777 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
778 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
779 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
780 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
781 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
782 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
783 case PARSE_EVENTS__TERM_TYPE_RAW:
784 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV:
785 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG:
786 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG:
787 default:
788 if (!err)
789 return false;
790
791 /* term_type is validated so indexing is safe */
792 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
793 parse_events__term_type_str(term_type)) >= 0)
794 parse_events_error__handle(err, -1, err_str, NULL);
795 return false;
796 }
797 }
798
parse_events__shrink_config_terms(void)799 void parse_events__shrink_config_terms(void)
800 {
801 config_term_shrinked = true;
802 }
803
config_term_common(struct perf_event_attr * attr,struct parse_events_term * term,struct parse_events_state * parse_state)804 static int config_term_common(struct perf_event_attr *attr,
805 struct parse_events_term *term,
806 struct parse_events_state *parse_state)
807 {
808 #define CHECK_TYPE_VAL(type) \
809 do { \
810 if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_ ## type)) \
811 return -EINVAL; \
812 } while (0)
813
814 switch (term->type_term) {
815 case PARSE_EVENTS__TERM_TYPE_CONFIG:
816 CHECK_TYPE_VAL(NUM);
817 attr->config = term->val.num;
818 break;
819 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
820 CHECK_TYPE_VAL(NUM);
821 attr->config1 = term->val.num;
822 break;
823 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
824 CHECK_TYPE_VAL(NUM);
825 attr->config2 = term->val.num;
826 break;
827 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
828 CHECK_TYPE_VAL(NUM);
829 attr->config3 = term->val.num;
830 break;
831 case PARSE_EVENTS__TERM_TYPE_CONFIG4:
832 CHECK_TYPE_VAL(NUM);
833 attr->config4 = term->val.num;
834 break;
835 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
836 CHECK_TYPE_VAL(NUM);
837 break;
838 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
839 CHECK_TYPE_VAL(NUM);
840 break;
841 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
842 CHECK_TYPE_VAL(STR);
843 if (strcmp(term->val.str, "no") &&
844 parse_branch_str(term->val.str,
845 &attr->branch_sample_type)) {
846 parse_events_error__handle(parse_state->error, term->err_val,
847 strdup("invalid branch sample type"),
848 NULL);
849 return -EINVAL;
850 }
851 break;
852 case PARSE_EVENTS__TERM_TYPE_TIME:
853 CHECK_TYPE_VAL(NUM);
854 if (term->val.num > 1) {
855 parse_events_error__handle(parse_state->error, term->err_val,
856 strdup("expected 0 or 1"),
857 NULL);
858 return -EINVAL;
859 }
860 break;
861 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
862 CHECK_TYPE_VAL(STR);
863 break;
864 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
865 CHECK_TYPE_VAL(NUM);
866 break;
867 case PARSE_EVENTS__TERM_TYPE_INHERIT:
868 CHECK_TYPE_VAL(NUM);
869 break;
870 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
871 CHECK_TYPE_VAL(NUM);
872 break;
873 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
874 CHECK_TYPE_VAL(NUM);
875 break;
876 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
877 CHECK_TYPE_VAL(NUM);
878 break;
879 case PARSE_EVENTS__TERM_TYPE_NAME:
880 CHECK_TYPE_VAL(STR);
881 break;
882 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
883 CHECK_TYPE_VAL(STR);
884 break;
885 case PARSE_EVENTS__TERM_TYPE_RAW:
886 CHECK_TYPE_VAL(STR);
887 break;
888 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
889 CHECK_TYPE_VAL(NUM);
890 break;
891 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
892 CHECK_TYPE_VAL(NUM);
893 break;
894 case PARSE_EVENTS__TERM_TYPE_PERCORE:
895 CHECK_TYPE_VAL(NUM);
896 if ((unsigned int)term->val.num > 1) {
897 parse_events_error__handle(parse_state->error, term->err_val,
898 strdup("expected 0 or 1"),
899 NULL);
900 return -EINVAL;
901 }
902 break;
903 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
904 CHECK_TYPE_VAL(NUM);
905 break;
906 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
907 CHECK_TYPE_VAL(STR);
908 break;
909 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
910 CHECK_TYPE_VAL(NUM);
911 if (term->val.num > UINT_MAX) {
912 parse_events_error__handle(parse_state->error, term->err_val,
913 strdup("too big"),
914 NULL);
915 return -EINVAL;
916 }
917 break;
918 case PARSE_EVENTS__TERM_TYPE_CPU: {
919 struct perf_cpu_map *map;
920
921 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
922 if (term->val.num >= (u64)cpu__max_present_cpu().cpu) {
923 parse_events_error__handle(parse_state->error, term->err_val,
924 strdup("too big"),
925 /*help=*/NULL);
926 return -EINVAL;
927 }
928 break;
929 }
930 assert(term->type_val == PARSE_EVENTS__TERM_TYPE_STR);
931 if (perf_pmus__find(term->val.str) != NULL)
932 break;
933
934 map = perf_cpu_map__new(term->val.str);
935 if (!map && !parse_state->fake_pmu) {
936 parse_events_error__handle(parse_state->error, term->err_val,
937 strdup("not a valid PMU or CPU number"),
938 /*help=*/NULL);
939 return -EINVAL;
940 }
941 perf_cpu_map__put(map);
942 break;
943 }
944 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV:
945 CHECK_TYPE_VAL(STR);
946 if (strtod(term->val.str, NULL) <= 0) {
947 parse_events_error__handle(parse_state->error, term->err_val,
948 strdup("zero or negative"),
949 NULL);
950 return -EINVAL;
951 }
952 if (errno == ERANGE) {
953 parse_events_error__handle(parse_state->error, term->err_val,
954 strdup("too big"),
955 NULL);
956 return -EINVAL;
957 }
958 break;
959 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
960 case PARSE_EVENTS__TERM_TYPE_USER:
961 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG:
962 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG:
963 default:
964 parse_events_error__handle(parse_state->error, term->err_term,
965 strdup(parse_events__term_type_str(term->type_term)),
966 parse_events_formats_error_string(NULL));
967 return -EINVAL;
968 }
969
970 /*
971 * Check term availability after basic checking so
972 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
973 *
974 * If check availability at the entry of this function,
975 * user will see "'<sysfs term>' is not usable in 'perf stat'"
976 * if an invalid config term is provided for legacy events
977 * (for example, instructions/badterm/...), which is confusing.
978 */
979 if (!config_term_avail(term->type_term, parse_state->error))
980 return -EINVAL;
981 return 0;
982 #undef CHECK_TYPE_VAL
983 }
984
check_pmu_is_core(__u32 type,const struct parse_events_term * term,struct parse_events_error * err)985 static bool check_pmu_is_core(__u32 type, const struct parse_events_term *term,
986 struct parse_events_error *err)
987 {
988 struct perf_pmu *pmu = NULL;
989
990 /* Avoid loading all PMUs with perf_pmus__find_by_type, just scan the core ones. */
991 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
992 if (pmu->type == type)
993 return true;
994 }
995 parse_events_error__handle(err, term->err_val,
996 strdup("needs a core PMU"),
997 NULL);
998 return false;
999 }
1000
config_term_pmu(struct perf_event_attr * attr,struct parse_events_term * term,struct parse_events_state * parse_state)1001 static int config_term_pmu(struct perf_event_attr *attr,
1002 struct parse_events_term *term,
1003 struct parse_events_state *parse_state)
1004 {
1005 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG) {
1006 if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_NUM))
1007 return -EINVAL;
1008 if (term->val.num >= PERF_COUNT_HW_MAX) {
1009 parse_events_error__handle(parse_state->error, term->err_val,
1010 strdup("too big"),
1011 NULL);
1012 return -EINVAL;
1013 }
1014 if (!check_pmu_is_core(attr->type, term, parse_state->error))
1015 return -EINVAL;
1016 attr->config = term->val.num;
1017 if (perf_pmus__supports_extended_type())
1018 attr->config |= (__u64)attr->type << PERF_PMU_TYPE_SHIFT;
1019 attr->type = PERF_TYPE_HARDWARE;
1020 return 0;
1021 }
1022 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG) {
1023 int cache_type, cache_op, cache_result;
1024
1025 if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_NUM))
1026 return -EINVAL;
1027 cache_type = term->val.num & 0xFF;
1028 cache_op = (term->val.num >> 8) & 0xFF;
1029 cache_result = (term->val.num >> 16) & 0xFF;
1030 if ((term->val.num & ~0xFFFFFF) ||
1031 cache_type >= PERF_COUNT_HW_CACHE_MAX ||
1032 cache_op >= PERF_COUNT_HW_CACHE_OP_MAX ||
1033 cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) {
1034 parse_events_error__handle(parse_state->error, term->err_val,
1035 strdup("too big"),
1036 NULL);
1037 return -EINVAL;
1038 }
1039 if (!check_pmu_is_core(attr->type, term, parse_state->error))
1040 return -EINVAL;
1041 attr->config = term->val.num;
1042 if (perf_pmus__supports_extended_type())
1043 attr->config |= (__u64)attr->type << PERF_PMU_TYPE_SHIFT;
1044 attr->type = PERF_TYPE_HW_CACHE;
1045 return 0;
1046 }
1047 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
1048 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) {
1049 /*
1050 * Always succeed for sysfs terms, as we dont know
1051 * at this point what type they need to have.
1052 */
1053 return 0;
1054 }
1055 return config_term_common(attr, term, parse_state);
1056 }
1057
config_term_tracepoint(struct perf_event_attr * attr,struct parse_events_term * term,struct parse_events_state * parse_state)1058 static int config_term_tracepoint(struct perf_event_attr *attr,
1059 struct parse_events_term *term,
1060 struct parse_events_state *parse_state)
1061 {
1062 switch (term->type_term) {
1063 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1064 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1065 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1066 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1067 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1068 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1069 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1070 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1071 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1072 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1073 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1074 return config_term_common(attr, term, parse_state);
1075 case PARSE_EVENTS__TERM_TYPE_USER:
1076 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1077 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1078 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1079 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1080 case PARSE_EVENTS__TERM_TYPE_CONFIG4:
1081 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG:
1082 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG:
1083 case PARSE_EVENTS__TERM_TYPE_NAME:
1084 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1085 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1086 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1087 case PARSE_EVENTS__TERM_TYPE_TIME:
1088 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1089 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1090 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1091 case PARSE_EVENTS__TERM_TYPE_RAW:
1092 case PARSE_EVENTS__TERM_TYPE_CPU:
1093 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV:
1094 default:
1095 parse_events_error__handle(parse_state->error, term->err_term,
1096 strdup(parse_events__term_type_str(term->type_term)),
1097 strdup("valid terms: call-graph,stack-size\n")
1098 );
1099 return -EINVAL;
1100 }
1101
1102 return 0;
1103 }
1104
config_attr(struct perf_event_attr * attr,const struct parse_events_terms * head,struct parse_events_state * parse_state,config_term_func_t config_term)1105 static int config_attr(struct perf_event_attr *attr,
1106 const struct parse_events_terms *head,
1107 struct parse_events_state *parse_state,
1108 config_term_func_t config_term)
1109 {
1110 struct parse_events_term *term;
1111
1112 list_for_each_entry(term, &head->terms, list)
1113 if (config_term(attr, term, parse_state))
1114 return -EINVAL;
1115
1116 return 0;
1117 }
1118
get_config_terms(const struct parse_events_terms * head_config,struct list_head * head_terms)1119 static int get_config_terms(const struct parse_events_terms *head_config,
1120 struct list_head *head_terms)
1121 {
1122 #define ADD_CONFIG_TERM(__type, __weak) \
1123 struct evsel_config_term *__t; \
1124 \
1125 __t = zalloc(sizeof(*__t)); \
1126 if (!__t) \
1127 return -ENOMEM; \
1128 \
1129 INIT_LIST_HEAD(&__t->list); \
1130 __t->type = EVSEL__CONFIG_TERM_ ## __type; \
1131 __t->weak = __weak; \
1132 list_add_tail(&__t->list, head_terms)
1133
1134 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \
1135 do { \
1136 ADD_CONFIG_TERM(__type, __weak); \
1137 __t->val.__name = __val; \
1138 } while (0)
1139
1140 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \
1141 do { \
1142 ADD_CONFIG_TERM(__type, __weak); \
1143 __t->val.str = strdup(__val); \
1144 if (!__t->val.str) { \
1145 zfree(&__t); \
1146 return -ENOMEM; \
1147 } \
1148 __t->free_str = true; \
1149 } while (0)
1150
1151 struct parse_events_term *term;
1152
1153 list_for_each_entry(term, &head_config->terms, list) {
1154 switch (term->type_term) {
1155 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1156 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
1157 break;
1158 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1159 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
1160 break;
1161 case PARSE_EVENTS__TERM_TYPE_TIME:
1162 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
1163 break;
1164 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1165 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
1166 break;
1167 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1168 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
1169 break;
1170 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1171 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
1172 term->val.num, term->weak);
1173 break;
1174 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1175 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1176 term->val.num ? 1 : 0, term->weak);
1177 break;
1178 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1179 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1180 term->val.num ? 0 : 1, term->weak);
1181 break;
1182 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1183 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
1184 term->val.num, term->weak);
1185 break;
1186 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1187 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
1188 term->val.num, term->weak);
1189 break;
1190 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1191 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1192 term->val.num ? 1 : 0, term->weak);
1193 break;
1194 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1195 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1196 term->val.num ? 0 : 1, term->weak);
1197 break;
1198 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1199 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
1200 break;
1201 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1202 ADD_CONFIG_TERM_VAL(PERCORE, percore,
1203 term->val.num ? true : false, term->weak);
1204 break;
1205 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1206 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
1207 term->val.num ? 1 : 0, term->weak);
1208 break;
1209 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1210 ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak);
1211 break;
1212 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1213 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
1214 term->val.num, term->weak);
1215 break;
1216 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV:
1217 ADD_CONFIG_TERM_STR(RATIO_TO_PREV, term->val.str, term->weak);
1218 break;
1219 case PARSE_EVENTS__TERM_TYPE_USER:
1220 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1221 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1222 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1223 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1224 case PARSE_EVENTS__TERM_TYPE_CONFIG4:
1225 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG:
1226 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG:
1227 case PARSE_EVENTS__TERM_TYPE_NAME:
1228 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1229 case PARSE_EVENTS__TERM_TYPE_RAW:
1230 case PARSE_EVENTS__TERM_TYPE_CPU:
1231 default:
1232 break;
1233 }
1234 }
1235 return 0;
1236 }
1237
1238 /*
1239 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
1240 * each bit of attr->config that the user has changed.
1241 */
get_config_chgs(struct perf_pmu * pmu,struct parse_events_terms * head_config,struct list_head * head_terms)1242 static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config,
1243 struct list_head *head_terms)
1244 {
1245 struct parse_events_term *term;
1246 u64 bits = 0;
1247 int type;
1248
1249 list_for_each_entry(term, &head_config->terms, list) {
1250 switch (term->type_term) {
1251 case PARSE_EVENTS__TERM_TYPE_USER:
1252 type = perf_pmu__format_type(pmu, term->config);
1253 if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
1254 continue;
1255 bits |= perf_pmu__format_bits(pmu, term->config);
1256 break;
1257 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1258 bits = ~(u64)0;
1259 break;
1260 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1261 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1262 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1263 case PARSE_EVENTS__TERM_TYPE_CONFIG4:
1264 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG:
1265 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG:
1266 case PARSE_EVENTS__TERM_TYPE_NAME:
1267 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1268 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1269 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1270 case PARSE_EVENTS__TERM_TYPE_TIME:
1271 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1272 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1273 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1274 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1275 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1276 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1277 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1278 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1279 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1280 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1281 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1282 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1283 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1284 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1285 case PARSE_EVENTS__TERM_TYPE_RAW:
1286 case PARSE_EVENTS__TERM_TYPE_CPU:
1287 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV:
1288 default:
1289 break;
1290 }
1291 }
1292
1293 if (bits)
1294 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
1295
1296 #undef ADD_CONFIG_TERM
1297 return 0;
1298 }
1299
parse_events_add_tracepoint(struct parse_events_state * parse_state,struct list_head * list,const char * sys,const char * event,struct parse_events_error * err,struct parse_events_terms * head_config,void * loc_)1300 int parse_events_add_tracepoint(struct parse_events_state *parse_state,
1301 struct list_head *list,
1302 const char *sys, const char *event,
1303 struct parse_events_error *err,
1304 struct parse_events_terms *head_config, void *loc_)
1305 {
1306 YYLTYPE *loc = loc_;
1307
1308 if (head_config) {
1309 struct perf_event_attr attr;
1310
1311 if (config_attr(&attr, head_config, parse_state, config_term_tracepoint))
1312 return -EINVAL;
1313 }
1314
1315 return add_tracepoint_multi_sys(parse_state, list, sys, event,
1316 err, head_config, loc);
1317 }
1318
__parse_events_add_numeric(struct parse_events_state * parse_state,struct list_head * list,struct perf_pmu * pmu,u32 type,u32 extended_type,u64 config,const struct parse_events_terms * head_config,struct evsel * first_wildcard_match)1319 static int __parse_events_add_numeric(struct parse_events_state *parse_state,
1320 struct list_head *list,
1321 struct perf_pmu *pmu, u32 type, u32 extended_type,
1322 u64 config, const struct parse_events_terms *head_config,
1323 struct evsel *first_wildcard_match)
1324 {
1325 struct perf_event_attr attr;
1326 LIST_HEAD(config_terms);
1327 const char *name, *metric_id;
1328 struct perf_cpu_map *cpus;
1329 int ret;
1330
1331 memset(&attr, 0, sizeof(attr));
1332 attr.type = type;
1333 attr.config = config;
1334 if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) {
1335 assert(perf_pmus__supports_extended_type());
1336 attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT;
1337 }
1338
1339 if (head_config) {
1340 if (config_attr(&attr, head_config, parse_state, config_term_common))
1341 return -EINVAL;
1342
1343 if (get_config_terms(head_config, &config_terms))
1344 return -ENOMEM;
1345 }
1346
1347 name = get_config_name(head_config);
1348 metric_id = get_config_metric_id(head_config);
1349 cpus = get_config_cpu(head_config, parse_state->fake_pmu);
1350 ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name,
1351 metric_id, pmu, &config_terms, first_wildcard_match,
1352 cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) ? 0 : -ENOMEM;
1353 perf_cpu_map__put(cpus);
1354 free_config_terms(&config_terms);
1355 return ret;
1356 }
1357
parse_events_add_numeric(struct parse_events_state * parse_state,struct list_head * list,u32 type,u64 config,const struct parse_events_terms * head_config,bool wildcard)1358 int parse_events_add_numeric(struct parse_events_state *parse_state,
1359 struct list_head *list,
1360 u32 type, u64 config,
1361 const struct parse_events_terms *head_config,
1362 bool wildcard)
1363 {
1364 struct perf_pmu *pmu = NULL;
1365 bool found_supported = false;
1366
1367 /* Wildcards on numeric values are only supported by core PMUs. */
1368 if (wildcard && perf_pmus__supports_extended_type()) {
1369 struct evsel *first_wildcard_match = NULL;
1370 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
1371 int ret;
1372
1373 found_supported = true;
1374 if (parse_events__filter_pmu(parse_state, pmu))
1375 continue;
1376
1377 ret = __parse_events_add_numeric(parse_state, list, pmu,
1378 type, pmu->type,
1379 config, head_config,
1380 first_wildcard_match);
1381 if (ret)
1382 return ret;
1383 if (first_wildcard_match == NULL)
1384 first_wildcard_match =
1385 container_of(list->prev, struct evsel, core.node);
1386 }
1387 if (found_supported)
1388 return 0;
1389 }
1390 return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type),
1391 type, /*extended_type=*/0, config, head_config,
1392 /*first_wildcard_match=*/NULL);
1393 }
1394
config_term_percore(struct list_head * config_terms)1395 static bool config_term_percore(struct list_head *config_terms)
1396 {
1397 struct evsel_config_term *term;
1398
1399 list_for_each_entry(term, config_terms, list) {
1400 if (term->type == EVSEL__CONFIG_TERM_PERCORE)
1401 return term->val.percore;
1402 }
1403
1404 return false;
1405 }
1406
parse_events_add_pmu(struct parse_events_state * parse_state,struct list_head * list,struct perf_pmu * pmu,const struct parse_events_terms * const_parsed_terms,struct evsel * first_wildcard_match)1407 static int parse_events_add_pmu(struct parse_events_state *parse_state,
1408 struct list_head *list, struct perf_pmu *pmu,
1409 const struct parse_events_terms *const_parsed_terms,
1410 struct evsel *first_wildcard_match)
1411 {
1412 u64 alternate_hw_config = PERF_COUNT_HW_MAX;
1413 struct perf_event_attr attr;
1414 struct perf_pmu_info info;
1415 struct evsel *evsel;
1416 struct parse_events_error *err = parse_state->error;
1417 LIST_HEAD(config_terms);
1418 struct parse_events_terms parsed_terms;
1419 bool alias_rewrote_terms = false;
1420 struct perf_cpu_map *term_cpu = NULL;
1421
1422 if (verbose > 1) {
1423 struct strbuf sb;
1424
1425 strbuf_init(&sb, /*hint=*/ 0);
1426 if (pmu->selectable && const_parsed_terms &&
1427 list_empty(&const_parsed_terms->terms)) {
1428 strbuf_addf(&sb, "%s//", pmu->name);
1429 } else {
1430 strbuf_addf(&sb, "%s/", pmu->name);
1431 parse_events_terms__to_strbuf(const_parsed_terms, &sb);
1432 strbuf_addch(&sb, '/');
1433 }
1434 fprintf(stderr, "Attempt to add: %s\n", sb.buf);
1435 strbuf_release(&sb);
1436 }
1437
1438 memset(&attr, 0, sizeof(attr));
1439 if (pmu->perf_event_attr_init_default)
1440 pmu->perf_event_attr_init_default(pmu, &attr);
1441
1442 attr.type = pmu->type;
1443
1444 if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) {
1445 evsel = __add_event(list, &parse_state->idx, &attr,
1446 /*init_attr=*/true, /*name=*/NULL,
1447 /*metric_id=*/NULL, pmu,
1448 /*config_terms=*/NULL, first_wildcard_match,
1449 /*cpu_list=*/NULL, alternate_hw_config);
1450 return evsel ? 0 : -ENOMEM;
1451 }
1452
1453 parse_events_terms__init(&parsed_terms);
1454 if (const_parsed_terms) {
1455 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
1456
1457 if (ret)
1458 return ret;
1459 }
1460 fix_raw(&parsed_terms, pmu);
1461
1462 /* Configure attr/terms with a known PMU, this will set hardcoded terms. */
1463 if (config_attr(&attr, &parsed_terms, parse_state, config_term_pmu)) {
1464 parse_events_terms__exit(&parsed_terms);
1465 return -EINVAL;
1466 }
1467
1468 /* Look for event names in the terms and rewrite into format based terms. */
1469 if (perf_pmu__check_alias(pmu, &parsed_terms,
1470 &info, &alias_rewrote_terms,
1471 &alternate_hw_config, err)) {
1472 parse_events_terms__exit(&parsed_terms);
1473 return -EINVAL;
1474 }
1475
1476 if (verbose > 1) {
1477 struct strbuf sb;
1478
1479 strbuf_init(&sb, /*hint=*/ 0);
1480 parse_events_terms__to_strbuf(&parsed_terms, &sb);
1481 fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf);
1482 strbuf_release(&sb);
1483 }
1484
1485 /* Configure attr/terms again if an alias was expanded. */
1486 if (alias_rewrote_terms &&
1487 config_attr(&attr, &parsed_terms, parse_state, config_term_pmu)) {
1488 parse_events_terms__exit(&parsed_terms);
1489 return -EINVAL;
1490 }
1491
1492 if (get_config_terms(&parsed_terms, &config_terms)) {
1493 parse_events_terms__exit(&parsed_terms);
1494 return -ENOMEM;
1495 }
1496
1497 /*
1498 * When using default config, record which bits of attr->config were
1499 * changed by the user.
1500 */
1501 if (pmu->perf_event_attr_init_default &&
1502 get_config_chgs(pmu, &parsed_terms, &config_terms)) {
1503 parse_events_terms__exit(&parsed_terms);
1504 return -ENOMEM;
1505 }
1506
1507 /* Skip configuring hard coded terms that were applied by config_attr. */
1508 if (perf_pmu__config(pmu, &attr, &parsed_terms, /*apply_hardcoded=*/false,
1509 parse_state->error)) {
1510 free_config_terms(&config_terms);
1511 parse_events_terms__exit(&parsed_terms);
1512 return -EINVAL;
1513 }
1514
1515 term_cpu = get_config_cpu(&parsed_terms, parse_state->fake_pmu);
1516 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
1517 get_config_name(&parsed_terms),
1518 get_config_metric_id(&parsed_terms), pmu,
1519 &config_terms, first_wildcard_match, term_cpu, alternate_hw_config);
1520 perf_cpu_map__put(term_cpu);
1521 if (!evsel) {
1522 parse_events_terms__exit(&parsed_terms);
1523 return -ENOMEM;
1524 }
1525
1526 if (evsel->name)
1527 evsel->use_config_name = true;
1528
1529 evsel->percore = config_term_percore(&evsel->config_terms);
1530
1531 parse_events_terms__exit(&parsed_terms);
1532 free((char *)evsel->unit);
1533 evsel->unit = strdup(info.unit);
1534 evsel->scale = info.scale;
1535 evsel->per_pkg = info.per_pkg;
1536 evsel->snapshot = info.snapshot;
1537 evsel->retirement_latency.mean = info.retirement_latency_mean;
1538 evsel->retirement_latency.min = info.retirement_latency_min;
1539 evsel->retirement_latency.max = info.retirement_latency_max;
1540
1541 return 0;
1542 }
1543
parse_events_multi_pmu_add(struct parse_events_state * parse_state,const char * event_name,const struct parse_events_terms * const_parsed_terms,struct list_head ** listp,void * loc_)1544 int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
1545 const char *event_name,
1546 const struct parse_events_terms *const_parsed_terms,
1547 struct list_head **listp, void *loc_)
1548 {
1549 struct parse_events_term *term;
1550 struct list_head *list = NULL;
1551 struct perf_pmu *pmu = NULL;
1552 YYLTYPE *loc = loc_;
1553 int ok = 0;
1554 const char *config;
1555 struct parse_events_terms parsed_terms;
1556 struct evsel *first_wildcard_match = NULL;
1557
1558 *listp = NULL;
1559
1560 parse_events_terms__init(&parsed_terms);
1561 if (const_parsed_terms) {
1562 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
1563
1564 if (ret)
1565 return ret;
1566 }
1567
1568 config = strdup(event_name);
1569 if (!config)
1570 goto out_err;
1571
1572 if (parse_events_term__num(&term,
1573 PARSE_EVENTS__TERM_TYPE_USER,
1574 config, /*num=*/1, /*novalue=*/true,
1575 loc, /*loc_val=*/NULL) < 0) {
1576 zfree(&config);
1577 goto out_err;
1578 }
1579 list_add_tail(&term->list, &parsed_terms.terms);
1580
1581 /* Add it for all PMUs that support the alias */
1582 list = malloc(sizeof(struct list_head));
1583 if (!list)
1584 goto out_err;
1585
1586 INIT_LIST_HEAD(list);
1587
1588 while ((pmu = perf_pmus__scan_for_event(pmu, event_name)) != NULL) {
1589
1590 if (parse_events__filter_pmu(parse_state, pmu))
1591 continue;
1592
1593 if (!perf_pmu__have_event(pmu, event_name))
1594 continue;
1595
1596 if (!parse_events_add_pmu(parse_state, list, pmu,
1597 &parsed_terms, first_wildcard_match)) {
1598 struct strbuf sb;
1599
1600 strbuf_init(&sb, /*hint=*/ 0);
1601 parse_events_terms__to_strbuf(&parsed_terms, &sb);
1602 pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf);
1603 strbuf_release(&sb);
1604 ok++;
1605 }
1606 if (first_wildcard_match == NULL)
1607 first_wildcard_match = container_of(list->prev, struct evsel, core.node);
1608 }
1609
1610 if (parse_state->fake_pmu) {
1611 if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms,
1612 first_wildcard_match)) {
1613 struct strbuf sb;
1614
1615 strbuf_init(&sb, /*hint=*/ 0);
1616 parse_events_terms__to_strbuf(&parsed_terms, &sb);
1617 pr_debug("%s -> fake/%s/\n", event_name, sb.buf);
1618 strbuf_release(&sb);
1619 ok++;
1620 }
1621 }
1622
1623 out_err:
1624 parse_events_terms__exit(&parsed_terms);
1625 if (ok)
1626 *listp = list;
1627 else
1628 free(list);
1629
1630 return ok ? 0 : -1;
1631 }
1632
parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state * parse_state,const char * event_or_pmu,const struct parse_events_terms * const_parsed_terms,struct list_head ** listp,void * loc_)1633 int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state,
1634 const char *event_or_pmu,
1635 const struct parse_events_terms *const_parsed_terms,
1636 struct list_head **listp,
1637 void *loc_)
1638 {
1639 YYLTYPE *loc = loc_;
1640 struct perf_pmu *pmu;
1641 int ok = 0;
1642 char *help;
1643 struct evsel *first_wildcard_match = NULL;
1644
1645 *listp = malloc(sizeof(**listp));
1646 if (!*listp)
1647 return -ENOMEM;
1648
1649 INIT_LIST_HEAD(*listp);
1650
1651 /* Attempt to add to list assuming event_or_pmu is a PMU name. */
1652 pmu = perf_pmus__find(event_or_pmu);
1653 if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms,
1654 first_wildcard_match))
1655 return 0;
1656
1657 if (parse_state->fake_pmu) {
1658 if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(),
1659 const_parsed_terms,
1660 first_wildcard_match))
1661 return 0;
1662 }
1663
1664 pmu = NULL;
1665 /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */
1666 while ((pmu = perf_pmus__scan_matching_wildcard(pmu, event_or_pmu)) != NULL) {
1667
1668 if (parse_events__filter_pmu(parse_state, pmu))
1669 continue;
1670
1671 if (!parse_events_add_pmu(parse_state, *listp, pmu,
1672 const_parsed_terms,
1673 first_wildcard_match)) {
1674 ok++;
1675 parse_state->wild_card_pmus = true;
1676 }
1677 if (first_wildcard_match == NULL) {
1678 first_wildcard_match =
1679 container_of((*listp)->prev, struct evsel, core.node);
1680 }
1681 }
1682 if (ok)
1683 return 0;
1684
1685 /* Failure to add, assume event_or_pmu is an event name. */
1686 zfree(listp);
1687 if (!parse_events_multi_pmu_add(parse_state, event_or_pmu,
1688 const_parsed_terms, listp, loc))
1689 return 0;
1690
1691 if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0)
1692 help = NULL;
1693 parse_events_error__handle(parse_state->error, loc->first_column,
1694 strdup("Bad event or PMU"),
1695 help);
1696 zfree(listp);
1697 return -EINVAL;
1698 }
1699
parse_events__set_leader(char * name,struct list_head * list)1700 void parse_events__set_leader(char *name, struct list_head *list)
1701 {
1702 struct evsel *leader;
1703
1704 if (list_empty(list)) {
1705 WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1706 return;
1707 }
1708
1709 leader = list_first_entry(list, struct evsel, core.node);
1710 __perf_evlist__set_leader(list, &leader->core);
1711 zfree(&leader->group_name);
1712 leader->group_name = name;
1713 }
1714
parse_events__modifier_list(struct parse_events_state * parse_state,YYLTYPE * loc,struct list_head * list,struct parse_events_modifier mod,bool group)1715 static int parse_events__modifier_list(struct parse_events_state *parse_state,
1716 YYLTYPE *loc,
1717 struct list_head *list,
1718 struct parse_events_modifier mod,
1719 bool group)
1720 {
1721 struct evsel *evsel;
1722
1723 if (!group && mod.weak) {
1724 parse_events_error__handle(parse_state->error, loc->first_column,
1725 strdup("Weak modifier is for use with groups"), NULL);
1726 return -EINVAL;
1727 }
1728
1729 __evlist__for_each_entry(list, evsel) {
1730 /* Translate modifiers into the equivalent evsel excludes. */
1731 int eu = group ? evsel->core.attr.exclude_user : 0;
1732 int ek = group ? evsel->core.attr.exclude_kernel : 0;
1733 int eh = group ? evsel->core.attr.exclude_hv : 0;
1734 int eH = group ? evsel->core.attr.exclude_host : 0;
1735 int eG = group ? evsel->core.attr.exclude_guest : 0;
1736 int exclude = eu | ek | eh;
1737 int exclude_GH = eG | eH;
1738
1739 if (mod.user) {
1740 if (!exclude)
1741 exclude = eu = ek = eh = 1;
1742 eu = 0;
1743 }
1744 if (mod.kernel) {
1745 if (!exclude)
1746 exclude = eu = ek = eh = 1;
1747 ek = 0;
1748 }
1749 if (mod.hypervisor) {
1750 if (!exclude)
1751 exclude = eu = ek = eh = 1;
1752 eh = 0;
1753 }
1754 if (mod.guest) {
1755 if (!exclude_GH)
1756 exclude_GH = eG = eH = 1;
1757 eG = 0;
1758 }
1759 if (mod.host) {
1760 if (!exclude_GH)
1761 exclude_GH = eG = eH = 1;
1762 eH = 0;
1763 }
1764 if (!exclude_GH && exclude_GH_default) {
1765 if (perf_host)
1766 eG = 1;
1767 else if (perf_guest)
1768 eH = 1;
1769 }
1770
1771 evsel->core.attr.exclude_user = eu;
1772 evsel->core.attr.exclude_kernel = ek;
1773 evsel->core.attr.exclude_hv = eh;
1774 evsel->core.attr.exclude_host = eH;
1775 evsel->core.attr.exclude_guest = eG;
1776 evsel->exclude_GH = exclude_GH;
1777
1778 /* Simple modifiers copied to the evsel. */
1779 if (mod.precise) {
1780 u8 precise = evsel->core.attr.precise_ip + mod.precise;
1781 /*
1782 * precise ip:
1783 *
1784 * 0 - SAMPLE_IP can have arbitrary skid
1785 * 1 - SAMPLE_IP must have constant skid
1786 * 2 - SAMPLE_IP requested to have 0 skid
1787 * 3 - SAMPLE_IP must have 0 skid
1788 *
1789 * See also PERF_RECORD_MISC_EXACT_IP
1790 */
1791 if (precise > 3) {
1792 char *help;
1793
1794 if (asprintf(&help,
1795 "Maximum combined precise value is 3, adding precision to \"%s\"",
1796 evsel__name(evsel)) > 0) {
1797 parse_events_error__handle(parse_state->error,
1798 loc->first_column,
1799 help, NULL);
1800 }
1801 return -EINVAL;
1802 }
1803 evsel->core.attr.precise_ip = precise;
1804 }
1805 if (mod.precise_max)
1806 evsel->precise_max = 1;
1807 if (mod.non_idle)
1808 evsel->core.attr.exclude_idle = 1;
1809 if (mod.sample_read)
1810 evsel->sample_read = 1;
1811 if (mod.pinned && evsel__is_group_leader(evsel))
1812 evsel->core.attr.pinned = 1;
1813 if (mod.exclusive && evsel__is_group_leader(evsel))
1814 evsel->core.attr.exclusive = 1;
1815 if (mod.weak)
1816 evsel->weak_group = true;
1817 if (mod.bpf)
1818 evsel->bpf_counter = true;
1819 if (mod.retire_lat)
1820 evsel->retire_lat = true;
1821 if (mod.dont_regroup)
1822 evsel->dont_regroup = true;
1823 }
1824 return 0;
1825 }
1826
parse_events__modifier_group(struct parse_events_state * parse_state,void * loc,struct list_head * list,struct parse_events_modifier mod)1827 int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc,
1828 struct list_head *list,
1829 struct parse_events_modifier mod)
1830 {
1831 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true);
1832 }
1833
parse_events__modifier_event(struct parse_events_state * parse_state,void * loc,struct list_head * list,struct parse_events_modifier mod)1834 int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc,
1835 struct list_head *list,
1836 struct parse_events_modifier mod)
1837 {
1838 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false);
1839 }
1840
parse_events__set_default_name(struct list_head * list,char * name)1841 int parse_events__set_default_name(struct list_head *list, char *name)
1842 {
1843 struct evsel *evsel;
1844 bool used_name = false;
1845
1846 __evlist__for_each_entry(list, evsel) {
1847 if (!evsel->name) {
1848 evsel->name = used_name ? strdup(name) : name;
1849 used_name = true;
1850 if (!evsel->name)
1851 return -ENOMEM;
1852 }
1853 }
1854 if (!used_name)
1855 free(name);
1856 return 0;
1857 }
1858
parse_events__scanner(const char * str,struct parse_events_state * parse_state)1859 static int parse_events__scanner(const char *str,
1860 struct parse_events_state *parse_state)
1861 {
1862 YY_BUFFER_STATE buffer;
1863 void *scanner;
1864 int ret;
1865
1866 ret = parse_events_lex_init_extra(parse_state, &scanner);
1867 if (ret)
1868 return ret;
1869
1870 buffer = parse_events__scan_string(str, scanner);
1871
1872 #ifdef PARSER_DEBUG
1873 parse_events_debug = 1;
1874 parse_events_set_debug(1, scanner);
1875 #endif
1876 ret = parse_events_parse(parse_state, scanner);
1877
1878 parse_events__flush_buffer(buffer, scanner);
1879 parse_events__delete_buffer(buffer, scanner);
1880 parse_events_lex_destroy(scanner);
1881 return ret;
1882 }
1883
1884 /*
1885 * parse event config string, return a list of event terms.
1886 */
parse_events_terms(struct parse_events_terms * terms,const char * str)1887 int parse_events_terms(struct parse_events_terms *terms, const char *str)
1888 {
1889 struct parse_events_state parse_state = {
1890 .terms = NULL,
1891 .stoken = PE_START_TERMS,
1892 };
1893 int ret;
1894
1895 ret = parse_events__scanner(str, &parse_state);
1896 if (!ret)
1897 list_splice(&parse_state.terms->terms, &terms->terms);
1898
1899 zfree(&parse_state.terms);
1900 return ret;
1901 }
1902
evsel__compute_group_pmu_name(struct evsel * evsel,const struct list_head * head)1903 static int evsel__compute_group_pmu_name(struct evsel *evsel,
1904 const struct list_head *head)
1905 {
1906 struct evsel *leader = evsel__leader(evsel);
1907 struct evsel *pos;
1908 const char *group_pmu_name;
1909 struct perf_pmu *pmu = evsel__find_pmu(evsel);
1910
1911 if (!pmu) {
1912 /*
1913 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU
1914 * is a core PMU, but in heterogeneous systems this is
1915 * unknown. For now pick the first core PMU.
1916 */
1917 pmu = perf_pmus__scan_core(NULL);
1918 }
1919 if (!pmu) {
1920 pr_debug("No PMU found for '%s'\n", evsel__name(evsel));
1921 return -EINVAL;
1922 }
1923 group_pmu_name = pmu->name;
1924 /*
1925 * Software events may be in a group with other uncore PMU events. Use
1926 * the pmu_name of the first non-software event to avoid breaking the
1927 * software event out of the group.
1928 *
1929 * Aux event leaders, like intel_pt, expect a group with events from
1930 * other PMUs, so substitute the AUX event's PMU in this case.
1931 */
1932 if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) {
1933 struct perf_pmu *leader_pmu = evsel__find_pmu(leader);
1934
1935 if (!leader_pmu) {
1936 /* As with determining pmu above. */
1937 leader_pmu = perf_pmus__scan_core(NULL);
1938 }
1939 /*
1940 * Starting with the leader, find the first event with a named
1941 * non-software PMU. for_each_group_(member|evsel) isn't used as
1942 * the list isn't yet sorted putting evsel's in the same group
1943 * together.
1944 */
1945 if (leader_pmu && !perf_pmu__is_software(leader_pmu)) {
1946 group_pmu_name = leader_pmu->name;
1947 } else if (leader->core.nr_members > 1) {
1948 list_for_each_entry(pos, head, core.node) {
1949 struct perf_pmu *pos_pmu;
1950
1951 if (pos == leader || evsel__leader(pos) != leader)
1952 continue;
1953 pos_pmu = evsel__find_pmu(pos);
1954 if (!pos_pmu) {
1955 /* As with determining pmu above. */
1956 pos_pmu = perf_pmus__scan_core(NULL);
1957 }
1958 if (pos_pmu && !perf_pmu__is_software(pos_pmu)) {
1959 group_pmu_name = pos_pmu->name;
1960 break;
1961 }
1962 }
1963 }
1964 }
1965 /* Record computed name. */
1966 evsel->group_pmu_name = strdup(group_pmu_name);
1967 return evsel->group_pmu_name ? 0 : -ENOMEM;
1968 }
1969
arch_evlist__cmp(const struct evsel * lhs,const struct evsel * rhs)1970 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
1971 {
1972 /* Order by insertion index. */
1973 return lhs->core.idx - rhs->core.idx;
1974 }
1975
evlist__cmp(void * _fg_idx,const struct list_head * l,const struct list_head * r)1976 static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r)
1977 {
1978 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
1979 const struct evsel *lhs = container_of(lhs_core, struct evsel, core);
1980 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
1981 const struct evsel *rhs = container_of(rhs_core, struct evsel, core);
1982 int *force_grouped_idx = _fg_idx;
1983 int lhs_sort_idx, rhs_sort_idx, ret;
1984 const char *lhs_pmu_name, *rhs_pmu_name;
1985
1986 /*
1987 * Get the indexes of the 2 events to sort. If the events are
1988 * in groups then the leader's index is used otherwise the
1989 * event's index is used. An index may be forced for events that
1990 * must be in the same group, namely Intel topdown events.
1991 */
1992 if (lhs->dont_regroup) {
1993 lhs_sort_idx = lhs_core->idx;
1994 } else if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) {
1995 lhs_sort_idx = *force_grouped_idx;
1996 } else {
1997 bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1;
1998
1999 lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx;
2000 }
2001 if (rhs->dont_regroup) {
2002 rhs_sort_idx = rhs_core->idx;
2003 } else if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) {
2004 rhs_sort_idx = *force_grouped_idx;
2005 } else {
2006 bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1;
2007
2008 rhs_sort_idx = rhs_has_group ? rhs_core->leader->idx : rhs_core->idx;
2009 }
2010
2011 /* If the indices differ then respect the insertion order. */
2012 if (lhs_sort_idx != rhs_sort_idx)
2013 return lhs_sort_idx - rhs_sort_idx;
2014
2015 /*
2016 * Ignoring forcing, lhs_sort_idx == rhs_sort_idx so lhs and rhs should
2017 * be in the same group. Events in the same group need to be ordered by
2018 * their grouping PMU name as the group will be broken to ensure only
2019 * events on the same PMU are programmed together.
2020 *
2021 * With forcing the lhs_sort_idx == rhs_sort_idx shows that one or both
2022 * events are being forced to be at force_group_index. If only one event
2023 * is being forced then the other event is the group leader of the group
2024 * we're trying to force the event into. Ensure for the force grouped
2025 * case that the PMU name ordering is also respected.
2026 */
2027 lhs_pmu_name = lhs->group_pmu_name;
2028 rhs_pmu_name = rhs->group_pmu_name;
2029 ret = strcmp(lhs_pmu_name, rhs_pmu_name);
2030 if (ret)
2031 return ret;
2032
2033 /*
2034 * Architecture specific sorting, by default sort events in the same
2035 * group with the same PMU by their insertion index. On Intel topdown
2036 * constraints must be adhered to - slots first, etc.
2037 */
2038 return arch_evlist__cmp(lhs, rhs);
2039 }
2040
arch_evlist__add_required_events(struct list_head * list __always_unused)2041 int __weak arch_evlist__add_required_events(struct list_head *list __always_unused)
2042 {
2043 return 0;
2044 }
2045
parse_events__sort_events_and_fix_groups(struct list_head * list)2046 static int parse_events__sort_events_and_fix_groups(struct list_head *list)
2047 {
2048 int idx = 0, force_grouped_idx = -1;
2049 struct evsel *pos, *cur_leader = NULL;
2050 struct perf_evsel *cur_leaders_grp = NULL;
2051 bool idx_changed = false;
2052 int orig_num_leaders = 0, num_leaders = 0;
2053 int ret;
2054 struct evsel *force_grouped_leader = NULL;
2055 bool last_event_was_forced_leader = false;
2056
2057 /* On x86 topdown metrics events require a slots event. */
2058 ret = arch_evlist__add_required_events(list);
2059 if (ret)
2060 return ret;
2061
2062 /*
2063 * Compute index to insert ungrouped events at. Place them where the
2064 * first ungrouped event appears.
2065 */
2066 list_for_each_entry(pos, list, core.node) {
2067 const struct evsel *pos_leader = evsel__leader(pos);
2068
2069 ret = evsel__compute_group_pmu_name(pos, list);
2070 if (ret)
2071 return ret;
2072
2073 if (pos == pos_leader)
2074 orig_num_leaders++;
2075
2076 /*
2077 * Ensure indexes are sequential, in particular for multiple
2078 * event lists being merged. The indexes are used to detect when
2079 * the user order is modified.
2080 */
2081 pos->core.idx = idx++;
2082
2083 /*
2084 * Remember an index to sort all forced grouped events
2085 * together to. Use the group leader as some events
2086 * must appear first within the group.
2087 */
2088 if (force_grouped_idx == -1 && arch_evsel__must_be_in_group(pos))
2089 force_grouped_idx = pos_leader->core.idx;
2090 }
2091
2092 /* Sort events. */
2093 list_sort(&force_grouped_idx, list, evlist__cmp);
2094
2095 /*
2096 * Recompute groups, splitting for PMUs and adding groups for events
2097 * that require them.
2098 */
2099 idx = 0;
2100 list_for_each_entry(pos, list, core.node) {
2101 struct evsel *pos_leader = evsel__leader(pos);
2102 const char *pos_pmu_name = pos->group_pmu_name;
2103 const char *cur_leader_pmu_name;
2104 bool pos_force_grouped = force_grouped_idx != -1 && !pos->dont_regroup &&
2105 arch_evsel__must_be_in_group(pos);
2106
2107 /* Reset index and nr_members. */
2108 if (pos->core.idx != idx)
2109 idx_changed = true;
2110 pos->core.idx = idx++;
2111 pos->core.nr_members = 0;
2112
2113 /*
2114 * Set the group leader respecting the given groupings and that
2115 * groups can't span PMUs.
2116 */
2117 if (!cur_leader || pos->dont_regroup) {
2118 cur_leader = pos->dont_regroup ? pos_leader : pos;
2119 cur_leaders_grp = &cur_leader->core;
2120 if (pos_force_grouped)
2121 force_grouped_leader = pos;
2122 }
2123 cur_leader_pmu_name = cur_leader->group_pmu_name;
2124 if (strcmp(cur_leader_pmu_name, pos_pmu_name)) {
2125 /* PMU changed so the group/leader must change. */
2126 cur_leader = pos;
2127 cur_leaders_grp = pos->core.leader;
2128 if (pos_force_grouped && force_grouped_leader == NULL)
2129 force_grouped_leader = pos;
2130 } else if (cur_leaders_grp != pos->core.leader) {
2131 bool split_even_if_last_leader_was_forced = true;
2132
2133 /*
2134 * Event is for a different group. If the last event was
2135 * the forced group leader then subsequent group events
2136 * and forced events should be in the same group. If
2137 * there are no other forced group events then the
2138 * forced group leader wasn't really being forced into a
2139 * group, it just set arch_evsel__must_be_in_group, and
2140 * we don't want the group to split here.
2141 */
2142 if (force_grouped_idx != -1 && last_event_was_forced_leader) {
2143 struct evsel *pos2 = pos;
2144 /*
2145 * Search the whole list as the group leaders
2146 * aren't currently valid.
2147 */
2148 list_for_each_entry_continue(pos2, list, core.node) {
2149 if (pos->core.leader == pos2->core.leader &&
2150 arch_evsel__must_be_in_group(pos2)) {
2151 split_even_if_last_leader_was_forced = false;
2152 break;
2153 }
2154 }
2155 }
2156 if (!last_event_was_forced_leader || split_even_if_last_leader_was_forced) {
2157 if (pos_force_grouped) {
2158 if (force_grouped_leader) {
2159 cur_leader = force_grouped_leader;
2160 cur_leaders_grp = force_grouped_leader->core.leader;
2161 } else {
2162 cur_leader = force_grouped_leader = pos;
2163 cur_leaders_grp = &pos->core;
2164 }
2165 } else {
2166 cur_leader = pos;
2167 cur_leaders_grp = pos->core.leader;
2168 }
2169 }
2170 }
2171 if (pos_leader != cur_leader) {
2172 /* The leader changed so update it. */
2173 evsel__set_leader(pos, cur_leader);
2174 }
2175 last_event_was_forced_leader = (force_grouped_leader == pos);
2176 }
2177 list_for_each_entry(pos, list, core.node) {
2178 struct evsel *pos_leader = evsel__leader(pos);
2179
2180 if (pos == pos_leader)
2181 num_leaders++;
2182 pos_leader->core.nr_members++;
2183 }
2184 return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0;
2185 }
2186
__parse_events(struct evlist * evlist,const char * str,const char * pmu_filter,struct parse_events_error * err,bool fake_pmu,bool warn_if_reordered,bool fake_tp)2187 int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter,
2188 struct parse_events_error *err, bool fake_pmu,
2189 bool warn_if_reordered, bool fake_tp)
2190 {
2191 struct parse_events_state parse_state = {
2192 .list = LIST_HEAD_INIT(parse_state.list),
2193 .idx = evlist->core.nr_entries,
2194 .error = err,
2195 .stoken = PE_START_EVENTS,
2196 .fake_pmu = fake_pmu,
2197 .fake_tp = fake_tp,
2198 .pmu_filter = pmu_filter,
2199 .match_legacy_cache_terms = true,
2200 };
2201 int ret, ret2;
2202
2203 ret = parse_events__scanner(str, &parse_state);
2204
2205 if (!ret && list_empty(&parse_state.list)) {
2206 WARN_ONCE(true, "WARNING: event parser found nothing\n");
2207 return -1;
2208 }
2209
2210 ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list);
2211 if (ret2 < 0)
2212 return ret;
2213
2214 /*
2215 * Add list to the evlist even with errors to allow callers to clean up.
2216 */
2217 evlist__splice_list_tail(evlist, &parse_state.list);
2218
2219 if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) {
2220 pr_warning("WARNING: events were regrouped to match PMUs\n");
2221
2222 if (verbose > 0) {
2223 struct strbuf sb = STRBUF_INIT;
2224
2225 evlist__uniquify_evsel_names(evlist, &stat_config);
2226 evlist__format_evsels(evlist, &sb, 2048);
2227 pr_debug("evlist after sorting/fixing: '%s'\n", sb.buf);
2228 strbuf_release(&sb);
2229 }
2230 }
2231 if (!ret) {
2232 struct evsel *last;
2233
2234 last = evlist__last(evlist);
2235 last->cmdline_group_boundary = true;
2236
2237 return 0;
2238 }
2239
2240 /*
2241 * There are 2 users - builtin-record and builtin-test objects.
2242 * Both call evlist__delete in case of error, so we dont
2243 * need to bother.
2244 */
2245 return ret;
2246 }
2247
parse_event(struct evlist * evlist,const char * str)2248 int parse_event(struct evlist *evlist, const char *str)
2249 {
2250 struct parse_events_error err;
2251 int ret;
2252
2253 parse_events_error__init(&err);
2254 ret = parse_events(evlist, str, &err);
2255 if (ret && verbose > 0)
2256 parse_events_error__print(&err, str);
2257 parse_events_error__exit(&err);
2258 return ret;
2259 }
2260
2261 struct parse_events_error_entry {
2262 /** @list: The list the error is part of. */
2263 struct list_head list;
2264 /** @idx: index in the parsed string */
2265 int idx;
2266 /** @str: string to display at the index */
2267 char *str;
2268 /** @help: optional help string */
2269 char *help;
2270 };
2271
parse_events_error__init(struct parse_events_error * err)2272 void parse_events_error__init(struct parse_events_error *err)
2273 {
2274 INIT_LIST_HEAD(&err->list);
2275 }
2276
parse_events_error__exit(struct parse_events_error * err)2277 void parse_events_error__exit(struct parse_events_error *err)
2278 {
2279 struct parse_events_error_entry *pos, *tmp;
2280
2281 list_for_each_entry_safe(pos, tmp, &err->list, list) {
2282 zfree(&pos->str);
2283 zfree(&pos->help);
2284 list_del_init(&pos->list);
2285 free(pos);
2286 }
2287 }
2288
parse_events_error__handle(struct parse_events_error * err,int idx,char * str,char * help)2289 void parse_events_error__handle(struct parse_events_error *err, int idx,
2290 char *str, char *help)
2291 {
2292 struct parse_events_error_entry *entry;
2293
2294 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n"))
2295 goto out_free;
2296
2297 entry = zalloc(sizeof(*entry));
2298 if (!entry) {
2299 pr_err("Failed to allocate memory for event parsing error: %s (%s)\n",
2300 str, help ?: "<no help>");
2301 goto out_free;
2302 }
2303 entry->idx = idx;
2304 entry->str = str;
2305 entry->help = help;
2306 list_add(&entry->list, &err->list);
2307 return;
2308 out_free:
2309 free(str);
2310 free(help);
2311 }
2312
2313 #define MAX_WIDTH 1000
get_term_width(void)2314 static int get_term_width(void)
2315 {
2316 struct winsize ws;
2317
2318 get_term_dimensions(&ws);
2319 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
2320 }
2321
__parse_events_error__print(int err_idx,const char * err_str,const char * err_help,const char * event)2322 static void __parse_events_error__print(int err_idx, const char *err_str,
2323 const char *err_help, const char *event)
2324 {
2325 const char *str = "invalid or unsupported event: ";
2326 char _buf[MAX_WIDTH];
2327 char *buf = (char *) event;
2328 int idx = 0;
2329 if (err_str) {
2330 /* -2 for extra '' in the final fprintf */
2331 int width = get_term_width() - 2;
2332 int len_event = strlen(event);
2333 int len_str, max_len, cut = 0;
2334
2335 /*
2336 * Maximum error index indent, we will cut
2337 * the event string if it's bigger.
2338 */
2339 int max_err_idx = 13;
2340
2341 /*
2342 * Let's be specific with the message when
2343 * we have the precise error.
2344 */
2345 str = "event syntax error: ";
2346 len_str = strlen(str);
2347 max_len = width - len_str;
2348
2349 buf = _buf;
2350
2351 /* We're cutting from the beginning. */
2352 if (err_idx > max_err_idx)
2353 cut = err_idx - max_err_idx;
2354
2355 strncpy(buf, event + cut, max_len);
2356
2357 /* Mark cut parts with '..' on both sides. */
2358 if (cut)
2359 buf[0] = buf[1] = '.';
2360
2361 if ((len_event - cut) > max_len) {
2362 buf[max_len - 1] = buf[max_len - 2] = '.';
2363 buf[max_len] = 0;
2364 }
2365
2366 idx = len_str + err_idx - cut;
2367 }
2368
2369 fprintf(stderr, "%s'%s'\n", str, buf);
2370 if (idx) {
2371 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str);
2372 if (err_help)
2373 fprintf(stderr, "\n%s\n", err_help);
2374 }
2375 }
2376
parse_events_error__print(const struct parse_events_error * err,const char * event)2377 void parse_events_error__print(const struct parse_events_error *err,
2378 const char *event)
2379 {
2380 struct parse_events_error_entry *pos;
2381 bool first = true;
2382
2383 list_for_each_entry(pos, &err->list, list) {
2384 if (!first)
2385 fputs("\n", stderr);
2386 __parse_events_error__print(pos->idx, pos->str, pos->help, event);
2387 first = false;
2388 }
2389 }
2390
2391 /*
2392 * In the list of errors err, do any of the error strings (str) contain the
2393 * given needle string?
2394 */
parse_events_error__contains(const struct parse_events_error * err,const char * needle)2395 bool parse_events_error__contains(const struct parse_events_error *err,
2396 const char *needle)
2397 {
2398 struct parse_events_error_entry *pos;
2399
2400 list_for_each_entry(pos, &err->list, list) {
2401 if (strstr(pos->str, needle) != NULL)
2402 return true;
2403 }
2404 return false;
2405 }
2406
2407 #undef MAX_WIDTH
2408
parse_events_option(const struct option * opt,const char * str,int unset __maybe_unused)2409 int parse_events_option(const struct option *opt, const char *str,
2410 int unset __maybe_unused)
2411 {
2412 struct parse_events_option_args *args = opt->value;
2413 struct parse_events_error err;
2414 int ret;
2415
2416 parse_events_error__init(&err);
2417 ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err,
2418 /*fake_pmu=*/false, /*warn_if_reordered=*/true,
2419 /*fake_tp=*/false);
2420
2421 if (ret) {
2422 parse_events_error__print(&err, str);
2423 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
2424 }
2425 parse_events_error__exit(&err);
2426
2427 return ret;
2428 }
2429
parse_events_option_new_evlist(const struct option * opt,const char * str,int unset)2430 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset)
2431 {
2432 struct parse_events_option_args *args = opt->value;
2433 int ret;
2434
2435 if (*args->evlistp == NULL) {
2436 *args->evlistp = evlist__new();
2437
2438 if (*args->evlistp == NULL) {
2439 fprintf(stderr, "Not enough memory to create evlist\n");
2440 return -1;
2441 }
2442 }
2443 ret = parse_events_option(opt, str, unset);
2444 if (ret) {
2445 evlist__delete(*args->evlistp);
2446 *args->evlistp = NULL;
2447 }
2448
2449 return ret;
2450 }
2451
2452 static int
foreach_evsel_in_last_glob(struct evlist * evlist,int (* func)(struct evsel * evsel,const void * arg),const void * arg)2453 foreach_evsel_in_last_glob(struct evlist *evlist,
2454 int (*func)(struct evsel *evsel,
2455 const void *arg),
2456 const void *arg)
2457 {
2458 struct evsel *last = NULL;
2459 int err;
2460
2461 /*
2462 * Don't return when list_empty, give func a chance to report
2463 * error when it found last == NULL.
2464 *
2465 * So no need to WARN here, let *func do this.
2466 */
2467 if (evlist->core.nr_entries > 0)
2468 last = evlist__last(evlist);
2469
2470 do {
2471 err = (*func)(last, arg);
2472 if (err)
2473 return -1;
2474 if (!last)
2475 return 0;
2476
2477 if (last->core.node.prev == &evlist->core.entries)
2478 return 0;
2479 last = list_entry(last->core.node.prev, struct evsel, core.node);
2480 } while (!last->cmdline_group_boundary);
2481
2482 return 0;
2483 }
2484
2485 /* Will a tracepoint filter work for str or should a BPF filter be used? */
is_possible_tp_filter(const char * str)2486 static bool is_possible_tp_filter(const char *str)
2487 {
2488 return strstr(str, "uid") == NULL;
2489 }
2490
set_filter(struct evsel * evsel,const void * arg)2491 static int set_filter(struct evsel *evsel, const void *arg)
2492 {
2493 const char *str = arg;
2494 int nr_addr_filters = 0;
2495 struct perf_pmu *pmu;
2496
2497 if (evsel == NULL) {
2498 fprintf(stderr,
2499 "--filter option should follow a -e tracepoint or HW tracer option\n");
2500 return -1;
2501 }
2502
2503 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && is_possible_tp_filter(str)) {
2504 if (evsel__append_tp_filter(evsel, str) < 0) {
2505 fprintf(stderr,
2506 "not enough memory to hold filter string\n");
2507 return -1;
2508 }
2509
2510 return 0;
2511 }
2512
2513 pmu = evsel__find_pmu(evsel);
2514 if (pmu) {
2515 perf_pmu__scan_file(pmu, "nr_addr_filters",
2516 "%d", &nr_addr_filters);
2517 }
2518 if (!nr_addr_filters)
2519 return perf_bpf_filter__parse(&evsel->bpf_filters, str);
2520
2521 if (evsel__append_addr_filter(evsel, str) < 0) {
2522 fprintf(stderr,
2523 "not enough memory to hold filter string\n");
2524 return -1;
2525 }
2526
2527 return 0;
2528 }
2529
parse_filter(const struct option * opt,const char * str,int unset __maybe_unused)2530 int parse_filter(const struct option *opt, const char *str,
2531 int unset __maybe_unused)
2532 {
2533 struct evlist *evlist = *(struct evlist **)opt->value;
2534
2535 return foreach_evsel_in_last_glob(evlist, set_filter,
2536 (const void *)str);
2537 }
2538
parse_uid_filter(struct evlist * evlist,uid_t uid)2539 int parse_uid_filter(struct evlist *evlist, uid_t uid)
2540 {
2541 struct option opt = {
2542 .value = &evlist,
2543 };
2544 char buf[128];
2545 int ret;
2546
2547 snprintf(buf, sizeof(buf), "uid == %d", uid);
2548 ret = parse_filter(&opt, buf, /*unset=*/0);
2549 if (ret) {
2550 if (use_browser >= 1) {
2551 /*
2552 * Use ui__warning so a pop up appears above the
2553 * underlying BPF error message.
2554 */
2555 ui__warning("Failed to add UID filtering that uses BPF filtering.\n");
2556 } else {
2557 fprintf(stderr, "Failed to add UID filtering that uses BPF filtering.\n");
2558 }
2559 }
2560 return ret;
2561 }
2562
add_exclude_perf_filter(struct evsel * evsel,const void * arg __maybe_unused)2563 static int add_exclude_perf_filter(struct evsel *evsel,
2564 const void *arg __maybe_unused)
2565 {
2566 char new_filter[64];
2567
2568 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2569 fprintf(stderr,
2570 "--exclude-perf option should follow a -e tracepoint option\n");
2571 return -1;
2572 }
2573
2574 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid());
2575
2576 if (evsel__append_tp_filter(evsel, new_filter) < 0) {
2577 fprintf(stderr,
2578 "not enough memory to hold filter string\n");
2579 return -1;
2580 }
2581
2582 return 0;
2583 }
2584
exclude_perf(const struct option * opt,const char * arg __maybe_unused,int unset __maybe_unused)2585 int exclude_perf(const struct option *opt,
2586 const char *arg __maybe_unused,
2587 int unset __maybe_unused)
2588 {
2589 struct evlist *evlist = *(struct evlist **)opt->value;
2590
2591 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter,
2592 NULL);
2593 }
2594
parse_events__is_hardcoded_term(struct parse_events_term * term)2595 int parse_events__is_hardcoded_term(struct parse_events_term *term)
2596 {
2597 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
2598 }
2599
new_term(struct parse_events_term ** _term,struct parse_events_term * temp,char * str,u64 num)2600 static int new_term(struct parse_events_term **_term,
2601 struct parse_events_term *temp,
2602 char *str, u64 num)
2603 {
2604 struct parse_events_term *term;
2605
2606 term = malloc(sizeof(*term));
2607 if (!term)
2608 return -ENOMEM;
2609
2610 *term = *temp;
2611 INIT_LIST_HEAD(&term->list);
2612 term->weak = false;
2613
2614 switch (term->type_val) {
2615 case PARSE_EVENTS__TERM_TYPE_NUM:
2616 term->val.num = num;
2617 break;
2618 case PARSE_EVENTS__TERM_TYPE_STR:
2619 term->val.str = str;
2620 break;
2621 default:
2622 free(term);
2623 return -EINVAL;
2624 }
2625
2626 *_term = term;
2627 return 0;
2628 }
2629
parse_events_term__num(struct parse_events_term ** term,enum parse_events__term_type type_term,const char * config,u64 num,bool no_value,void * loc_term_,void * loc_val_)2630 int parse_events_term__num(struct parse_events_term **term,
2631 enum parse_events__term_type type_term,
2632 const char *config, u64 num,
2633 bool no_value,
2634 void *loc_term_, void *loc_val_)
2635 {
2636 YYLTYPE *loc_term = loc_term_;
2637 YYLTYPE *loc_val = loc_val_;
2638
2639 struct parse_events_term temp = {
2640 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
2641 .type_term = type_term,
2642 .config = config ? : strdup(parse_events__term_type_str(type_term)),
2643 .no_value = no_value,
2644 .err_term = loc_term ? loc_term->first_column : 0,
2645 .err_val = loc_val ? loc_val->first_column : 0,
2646 };
2647
2648 return new_term(term, &temp, /*str=*/NULL, num);
2649 }
2650
parse_events_term__str(struct parse_events_term ** term,enum parse_events__term_type type_term,char * config,char * str,void * loc_term_,void * loc_val_)2651 int parse_events_term__str(struct parse_events_term **term,
2652 enum parse_events__term_type type_term,
2653 char *config, char *str,
2654 void *loc_term_, void *loc_val_)
2655 {
2656 YYLTYPE *loc_term = loc_term_;
2657 YYLTYPE *loc_val = loc_val_;
2658
2659 struct parse_events_term temp = {
2660 .type_val = PARSE_EVENTS__TERM_TYPE_STR,
2661 .type_term = type_term,
2662 .config = config,
2663 .err_term = loc_term ? loc_term->first_column : 0,
2664 .err_val = loc_val ? loc_val->first_column : 0,
2665 };
2666
2667 return new_term(term, &temp, str, /*num=*/0);
2668 }
2669
parse_events_term__term(struct parse_events_term ** term,enum parse_events__term_type term_lhs,enum parse_events__term_type term_rhs,void * loc_term,void * loc_val)2670 int parse_events_term__term(struct parse_events_term **term,
2671 enum parse_events__term_type term_lhs,
2672 enum parse_events__term_type term_rhs,
2673 void *loc_term, void *loc_val)
2674 {
2675 return parse_events_term__str(term, term_lhs, NULL,
2676 strdup(parse_events__term_type_str(term_rhs)),
2677 loc_term, loc_val);
2678 }
2679
parse_events_term__clone(struct parse_events_term ** new,const struct parse_events_term * term)2680 int parse_events_term__clone(struct parse_events_term **new,
2681 const struct parse_events_term *term)
2682 {
2683 char *str;
2684 struct parse_events_term temp = *term;
2685
2686 temp.used = false;
2687 if (term->config) {
2688 temp.config = strdup(term->config);
2689 if (!temp.config)
2690 return -ENOMEM;
2691 }
2692 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2693 return new_term(new, &temp, /*str=*/NULL, term->val.num);
2694
2695 str = strdup(term->val.str);
2696 if (!str) {
2697 zfree(&temp.config);
2698 return -ENOMEM;
2699 }
2700 return new_term(new, &temp, str, /*num=*/0);
2701 }
2702
parse_events_term__delete(struct parse_events_term * term)2703 void parse_events_term__delete(struct parse_events_term *term)
2704 {
2705 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
2706 zfree(&term->val.str);
2707
2708 zfree(&term->config);
2709 free(term);
2710 }
2711
parse_events_terms__copy(const struct parse_events_terms * src,struct parse_events_terms * dest)2712 static int parse_events_terms__copy(const struct parse_events_terms *src,
2713 struct parse_events_terms *dest)
2714 {
2715 struct parse_events_term *term;
2716
2717 list_for_each_entry (term, &src->terms, list) {
2718 struct parse_events_term *n;
2719 int ret;
2720
2721 ret = parse_events_term__clone(&n, term);
2722 if (ret)
2723 return ret;
2724
2725 list_add_tail(&n->list, &dest->terms);
2726 }
2727 return 0;
2728 }
2729
parse_events_terms__init(struct parse_events_terms * terms)2730 void parse_events_terms__init(struct parse_events_terms *terms)
2731 {
2732 INIT_LIST_HEAD(&terms->terms);
2733 }
2734
parse_events_terms__exit(struct parse_events_terms * terms)2735 void parse_events_terms__exit(struct parse_events_terms *terms)
2736 {
2737 struct parse_events_term *term, *h;
2738
2739 list_for_each_entry_safe(term, h, &terms->terms, list) {
2740 list_del_init(&term->list);
2741 parse_events_term__delete(term);
2742 }
2743 }
2744
parse_events_terms__delete(struct parse_events_terms * terms)2745 void parse_events_terms__delete(struct parse_events_terms *terms)
2746 {
2747 if (!terms)
2748 return;
2749 parse_events_terms__exit(terms);
2750 free(terms);
2751 }
2752
parse_events_terms__to_strbuf(const struct parse_events_terms * terms,struct strbuf * sb)2753 static int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb)
2754 {
2755 struct parse_events_term *term;
2756 bool first = true;
2757
2758 if (!terms)
2759 return 0;
2760
2761 list_for_each_entry(term, &terms->terms, list) {
2762 int ret;
2763
2764 if (!first) {
2765 ret = strbuf_addch(sb, ',');
2766 if (ret < 0)
2767 return ret;
2768 }
2769 first = false;
2770
2771 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2772 if (term->no_value) {
2773 assert(term->val.num == 1);
2774 ret = strbuf_addf(sb, "%s", term->config);
2775 } else
2776 ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num);
2777 else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
2778 if (term->config) {
2779 ret = strbuf_addf(sb, "%s=", term->config);
2780 if (ret < 0)
2781 return ret;
2782 } else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) {
2783 ret = strbuf_addf(sb, "%s=",
2784 parse_events__term_type_str(term->type_term));
2785 if (ret < 0)
2786 return ret;
2787 }
2788 assert(!term->no_value);
2789 ret = strbuf_addf(sb, "%s", term->val.str);
2790 }
2791 if (ret < 0)
2792 return ret;
2793 }
2794 return 0;
2795 }
2796
config_terms_list(char * buf,size_t buf_sz)2797 static void config_terms_list(char *buf, size_t buf_sz)
2798 {
2799 int i;
2800 bool first = true;
2801
2802 buf[0] = '\0';
2803 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
2804 const char *name = parse_events__term_type_str(i);
2805
2806 if (!config_term_avail(i, NULL))
2807 continue;
2808 if (!name)
2809 continue;
2810 if (name[0] == '<')
2811 continue;
2812
2813 if (strlen(buf) + strlen(name) + 2 >= buf_sz)
2814 return;
2815
2816 if (!first)
2817 strcat(buf, ",");
2818 else
2819 first = false;
2820 strcat(buf, name);
2821 }
2822 }
2823
2824 /*
2825 * Return string contains valid config terms of an event.
2826 * @additional_terms: For terms such as PMU sysfs terms.
2827 */
parse_events_formats_error_string(char * additional_terms)2828 char *parse_events_formats_error_string(char *additional_terms)
2829 {
2830 char *str;
2831 /* "no-overwrite" is the longest name */
2832 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
2833 (sizeof("no-overwrite") - 1)];
2834
2835 config_terms_list(static_terms, sizeof(static_terms));
2836 /* valid terms */
2837 if (additional_terms) {
2838 if (asprintf(&str, "valid terms: %s,%s",
2839 additional_terms, static_terms) < 0)
2840 goto fail;
2841 } else {
2842 if (asprintf(&str, "valid terms: %s", static_terms) < 0)
2843 goto fail;
2844 }
2845 return str;
2846
2847 fail:
2848 return NULL;
2849 }
2850