1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/hw_breakpoint.h> 3 #include <linux/err.h> 4 #include <linux/list_sort.h> 5 #include <linux/zalloc.h> 6 #include <dirent.h> 7 #include <errno.h> 8 #include <sys/ioctl.h> 9 #include <sys/param.h> 10 #include "cpumap.h" 11 #include "term.h" 12 #include "env.h" 13 #include "evlist.h" 14 #include "evsel.h" 15 #include <subcmd/parse-options.h> 16 #include "parse-events.h" 17 #include "string2.h" 18 #include "strbuf.h" 19 #include "debug.h" 20 #include <perf/cpumap.h> 21 #include <util/parse-events-bison.h> 22 #include <util/parse-events-flex.h> 23 #include "pmu.h" 24 #include "pmus.h" 25 #include "tp_pmu.h" 26 #include "asm/bug.h" 27 #include "ui/ui.h" 28 #include "util/parse-branch-options.h" 29 #include "util/evsel_config.h" 30 #include "util/event.h" 31 #include "util/bpf-filter.h" 32 #include "util/stat.h" 33 #include "util/util.h" 34 #include "tracepoint.h" 35 #include <api/fs/tracing_path.h> 36 37 #define MAX_NAME_LEN 100 38 39 static int get_config_terms(const struct parse_events_terms *head_config, 40 struct list_head *head_terms); 41 static int parse_events_terms__copy(const struct parse_events_terms *src, 42 struct parse_events_terms *dest); 43 static int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb); 44 45 static const char *const event_types[] = { 46 [PERF_TYPE_HARDWARE] = "hardware", 47 [PERF_TYPE_SOFTWARE] = "software", 48 [PERF_TYPE_TRACEPOINT] = "tracepoint", 49 [PERF_TYPE_HW_CACHE] = "hardware-cache", 50 [PERF_TYPE_RAW] = "raw", 51 [PERF_TYPE_BREAKPOINT] = "breakpoint", 52 }; 53 54 const char *event_type(size_t type) 55 { 56 if (type >= PERF_TYPE_MAX) 57 return "unknown"; 58 59 return event_types[type]; 60 } 61 62 static char *get_config_str(const struct parse_events_terms *head_terms, 63 enum parse_events__term_type type_term) 64 { 65 struct parse_events_term *term; 66 67 if (!head_terms) 68 return NULL; 69 70 list_for_each_entry(term, &head_terms->terms, list) 71 if (term->type_term == type_term) 72 return term->val.str; 73 74 return NULL; 75 } 76 77 static char *get_config_metric_id(const struct parse_events_terms *head_terms) 78 { 79 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID); 80 } 81 82 static char *get_config_name(const struct parse_events_terms *head_terms) 83 { 84 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME); 85 } 86 87 static struct perf_cpu_map *get_config_cpu(const struct parse_events_terms *head_terms, 88 bool fake_pmu) 89 { 90 struct parse_events_term *term; 91 struct perf_cpu_map *cpus = NULL; 92 93 if (!head_terms) 94 return NULL; 95 96 list_for_each_entry(term, &head_terms->terms, list) { 97 struct perf_cpu_map *term_cpus; 98 99 if (term->type_term != PARSE_EVENTS__TERM_TYPE_CPU) 100 continue; 101 102 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) { 103 term_cpus = perf_cpu_map__new_int(term->val.num); 104 } else { 105 struct perf_pmu *pmu = perf_pmus__find(term->val.str); 106 107 if (pmu) { 108 term_cpus = pmu->is_core && perf_cpu_map__is_empty(pmu->cpus) 109 ? cpu_map__online() 110 : perf_cpu_map__get(pmu->cpus); 111 } else { 112 term_cpus = perf_cpu_map__new(term->val.str); 113 if (!term_cpus && fake_pmu) { 114 /* 115 * Assume the PMU string makes sense on a different 116 * machine and fake a value with all online CPUs. 117 */ 118 term_cpus = cpu_map__online(); 119 } 120 } 121 } 122 perf_cpu_map__merge(&cpus, term_cpus); 123 perf_cpu_map__put(term_cpus); 124 } 125 126 return cpus; 127 } 128 129 /** 130 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that 131 * matches the raw's string value. If the string value matches an 132 * event then change the term to be an event, if not then change it to 133 * be a config term. For example, "read" may be an event of the PMU or 134 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of 135 * the event can be determined and we don't need to scan all PMUs 136 * ahead-of-time. 137 * @config_terms: the list of terms that may contain a raw term. 138 * @pmu: the PMU to scan for events from. 139 */ 140 static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu) 141 { 142 struct parse_events_term *term; 143 144 list_for_each_entry(term, &config_terms->terms, list) { 145 u64 num; 146 147 if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW) 148 continue; 149 150 if (perf_pmu__have_event(pmu, term->val.str)) { 151 zfree(&term->config); 152 term->config = term->val.str; 153 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 154 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 155 term->val.num = 1; 156 term->no_value = true; 157 continue; 158 } 159 160 zfree(&term->config); 161 term->config = strdup("config"); 162 errno = 0; 163 num = strtoull(term->val.str + 1, NULL, 16); 164 assert(errno == 0); 165 free(term->val.str); 166 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 167 term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG; 168 term->val.num = num; 169 term->no_value = false; 170 } 171 } 172 173 static struct evsel * 174 __add_event(struct list_head *list, int *idx, 175 struct perf_event_attr *attr, 176 bool init_attr, 177 const char *name, const char *metric_id, struct perf_pmu *pmu, 178 struct list_head *config_terms, struct evsel *first_wildcard_match, 179 struct perf_cpu_map *user_cpus, u64 alternate_hw_config) 180 { 181 struct evsel *evsel; 182 bool is_pmu_core; 183 struct perf_cpu_map *cpus, *pmu_cpus; 184 bool has_user_cpus = !perf_cpu_map__is_empty(user_cpus); 185 186 /* 187 * Ensure the first_wildcard_match's PMU matches that of the new event 188 * being added. Otherwise try to match with another event further down 189 * the evlist. 190 */ 191 if (first_wildcard_match) { 192 struct evsel *pos = list_prev_entry(first_wildcard_match, core.node); 193 194 first_wildcard_match = NULL; 195 list_for_each_entry_continue(pos, list, core.node) { 196 if (perf_pmu__name_no_suffix_match(pos->pmu, pmu->name)) { 197 first_wildcard_match = pos; 198 break; 199 } 200 if (pos->pmu->is_core && (!pmu || pmu->is_core)) { 201 first_wildcard_match = pos; 202 break; 203 } 204 } 205 } 206 207 if (pmu) { 208 perf_pmu__warn_invalid_formats(pmu); 209 if (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX) { 210 perf_pmu__warn_invalid_config(pmu, attr->config, name, 211 PERF_PMU_FORMAT_VALUE_CONFIG, "config"); 212 perf_pmu__warn_invalid_config(pmu, attr->config1, name, 213 PERF_PMU_FORMAT_VALUE_CONFIG1, "config1"); 214 perf_pmu__warn_invalid_config(pmu, attr->config2, name, 215 PERF_PMU_FORMAT_VALUE_CONFIG2, "config2"); 216 perf_pmu__warn_invalid_config(pmu, attr->config3, name, 217 PERF_PMU_FORMAT_VALUE_CONFIG3, "config3"); 218 } 219 } 220 /* 221 * If a PMU wasn't given, such as for legacy events, find now that 222 * warnings won't be generated. 223 */ 224 if (!pmu) 225 pmu = perf_pmus__find_by_attr(attr); 226 227 if (pmu) { 228 is_pmu_core = pmu->is_core; 229 pmu_cpus = perf_cpu_map__get(pmu->cpus); 230 if (perf_cpu_map__is_empty(pmu_cpus)) 231 pmu_cpus = cpu_map__online(); 232 } else { 233 is_pmu_core = (attr->type == PERF_TYPE_HARDWARE || 234 attr->type == PERF_TYPE_HW_CACHE); 235 pmu_cpus = is_pmu_core ? cpu_map__online() : NULL; 236 } 237 238 if (has_user_cpus) 239 cpus = perf_cpu_map__get(user_cpus); 240 else 241 cpus = perf_cpu_map__get(pmu_cpus); 242 243 if (init_attr) 244 event_attr_init(attr); 245 246 evsel = evsel__new_idx(attr, *idx); 247 if (!evsel) 248 goto out_err; 249 250 if (name) { 251 evsel->name = strdup(name); 252 if (!evsel->name) 253 goto out_err; 254 } 255 256 if (metric_id) { 257 evsel->metric_id = strdup(metric_id); 258 if (!evsel->metric_id) 259 goto out_err; 260 } 261 262 (*idx)++; 263 evsel->core.cpus = cpus; 264 evsel->core.pmu_cpus = pmu_cpus; 265 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false; 266 evsel->core.is_pmu_core = is_pmu_core; 267 evsel->pmu = pmu; 268 evsel->alternate_hw_config = alternate_hw_config; 269 evsel->first_wildcard_match = first_wildcard_match; 270 271 if (config_terms) 272 list_splice_init(config_terms, &evsel->config_terms); 273 274 if (list) 275 list_add_tail(&evsel->core.node, list); 276 277 if (has_user_cpus) 278 evsel__warn_user_requested_cpus(evsel, user_cpus); 279 280 return evsel; 281 out_err: 282 perf_cpu_map__put(cpus); 283 perf_cpu_map__put(pmu_cpus); 284 zfree(&evsel->name); 285 zfree(&evsel->metric_id); 286 free(evsel); 287 return NULL; 288 } 289 290 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr, 291 const char *name, const char *metric_id, 292 struct perf_pmu *pmu) 293 { 294 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name, 295 metric_id, pmu, /*config_terms=*/NULL, 296 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL, 297 /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 298 } 299 300 static int add_event(struct list_head *list, int *idx, 301 struct perf_event_attr *attr, const char *name, 302 const char *metric_id, struct list_head *config_terms, 303 u64 alternate_hw_config) 304 { 305 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id, 306 /*pmu=*/NULL, config_terms, 307 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL, 308 alternate_hw_config) ? 0 : -ENOMEM; 309 } 310 311 /** 312 * parse_aliases - search names for entries beginning or equalling str ignoring 313 * case. If mutliple entries in names match str then the longest 314 * is chosen. 315 * @str: The needle to look for. 316 * @names: The haystack to search. 317 * @size: The size of the haystack. 318 * @longest: Out argument giving the length of the matching entry. 319 */ 320 static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size, 321 int *longest) 322 { 323 *longest = -1; 324 for (int i = 0; i < size; i++) { 325 for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) { 326 int n = strlen(names[i][j]); 327 328 if (n > *longest && !strncasecmp(str, names[i][j], n)) 329 *longest = n; 330 } 331 if (*longest > 0) 332 return i; 333 } 334 335 return -1; 336 } 337 338 typedef int config_term_func_t(struct perf_event_attr *attr, 339 struct parse_events_term *term, 340 struct parse_events_state *parse_state); 341 static int config_term_common(struct perf_event_attr *attr, 342 struct parse_events_term *term, 343 struct parse_events_state *parse_state); 344 static int config_attr(struct perf_event_attr *attr, 345 const struct parse_events_terms *head, 346 struct parse_events_state *parse_state, 347 config_term_func_t config_term); 348 349 /** 350 * parse_events__decode_legacy_cache - Search name for the legacy cache event 351 * name composed of 1, 2 or 3 hyphen 352 * separated sections. The first section is 353 * the cache type while the others are the 354 * optional op and optional result. To make 355 * life hard the names in the table also 356 * contain hyphens and the longest name 357 * should always be selected. 358 */ 359 int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config) 360 { 361 int len, cache_type = -1, cache_op = -1, cache_result = -1; 362 const char *name_end = &name[strlen(name) + 1]; 363 const char *str = name; 364 365 cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len); 366 if (cache_type == -1) 367 return -EINVAL; 368 str += len + 1; 369 370 if (str < name_end) { 371 cache_op = parse_aliases(str, evsel__hw_cache_op, 372 PERF_COUNT_HW_CACHE_OP_MAX, &len); 373 if (cache_op >= 0) { 374 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 375 return -EINVAL; 376 str += len + 1; 377 } else { 378 cache_result = parse_aliases(str, evsel__hw_cache_result, 379 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 380 if (cache_result >= 0) 381 str += len + 1; 382 } 383 } 384 if (str < name_end) { 385 if (cache_op < 0) { 386 cache_op = parse_aliases(str, evsel__hw_cache_op, 387 PERF_COUNT_HW_CACHE_OP_MAX, &len); 388 if (cache_op >= 0) { 389 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 390 return -EINVAL; 391 } 392 } else if (cache_result < 0) { 393 cache_result = parse_aliases(str, evsel__hw_cache_result, 394 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 395 } 396 } 397 398 /* 399 * Fall back to reads: 400 */ 401 if (cache_op == -1) 402 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 403 404 /* 405 * Fall back to accesses: 406 */ 407 if (cache_result == -1) 408 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 409 410 *config = cache_type | (cache_op << 8) | (cache_result << 16); 411 if (perf_pmus__supports_extended_type()) 412 *config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT; 413 return 0; 414 } 415 416 /** 417 * parse_events__filter_pmu - returns false if a wildcard PMU should be 418 * considered, true if it should be filtered. 419 */ 420 bool parse_events__filter_pmu(const struct parse_events_state *parse_state, 421 const struct perf_pmu *pmu) 422 { 423 if (parse_state->pmu_filter == NULL) 424 return false; 425 426 return strcmp(parse_state->pmu_filter, pmu->name) != 0; 427 } 428 429 static int parse_events_add_pmu(struct parse_events_state *parse_state, 430 struct list_head *list, struct perf_pmu *pmu, 431 const struct parse_events_terms *const_parsed_terms, 432 struct evsel *first_wildcard_match); 433 434 static void tracepoint_error(struct parse_events_error *e, int err, 435 const char *sys, const char *name, int column) 436 { 437 const char *str; 438 char help[BUFSIZ]; 439 440 if (!e) 441 return; 442 443 /* 444 * We get error directly from syscall errno ( > 0), 445 * or from encoded pointer's error ( < 0). 446 */ 447 err = abs(err); 448 449 switch (err) { 450 case EACCES: 451 str = "can't access trace events"; 452 break; 453 case ENOENT: 454 str = "unknown tracepoint"; 455 break; 456 default: 457 str = "failed to add tracepoint"; 458 break; 459 } 460 461 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name); 462 parse_events_error__handle(e, column, strdup(str), strdup(help)); 463 } 464 465 static int add_tracepoint(struct parse_events_state *parse_state, 466 struct list_head *list, 467 const char *sys_name, const char *evt_name, 468 struct parse_events_error *err, 469 struct parse_events_terms *head_config, void *loc_) 470 { 471 YYLTYPE *loc = loc_; 472 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++, 473 !parse_state->fake_tp); 474 475 if (IS_ERR(evsel)) { 476 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column); 477 return PTR_ERR(evsel); 478 } 479 480 if (head_config) { 481 LIST_HEAD(config_terms); 482 483 if (get_config_terms(head_config, &config_terms)) 484 return -ENOMEM; 485 list_splice(&config_terms, &evsel->config_terms); 486 } 487 488 list_add_tail(&evsel->core.node, list); 489 return 0; 490 } 491 492 struct add_tracepoint_multi_args { 493 struct parse_events_state *parse_state; 494 struct list_head *list; 495 const char *sys_glob; 496 const char *evt_glob; 497 struct parse_events_error *err; 498 struct parse_events_terms *head_config; 499 YYLTYPE *loc; 500 int found; 501 }; 502 503 static int add_tracepoint_multi_event_cb(void *state, const char *sys_name, const char *evt_name) 504 { 505 struct add_tracepoint_multi_args *args = state; 506 int ret; 507 508 if (!strglobmatch(evt_name, args->evt_glob)) 509 return 0; 510 511 args->found++; 512 ret = add_tracepoint(args->parse_state, args->list, sys_name, evt_name, 513 args->err, args->head_config, args->loc); 514 515 return ret; 516 } 517 518 static int add_tracepoint_multi_event(struct add_tracepoint_multi_args *args, const char *sys_name) 519 { 520 if (strpbrk(args->evt_glob, "*?") == NULL) { 521 /* Not a glob. */ 522 args->found++; 523 return add_tracepoint(args->parse_state, args->list, sys_name, args->evt_glob, 524 args->err, args->head_config, args->loc); 525 } 526 527 return tp_pmu__for_each_tp_event(sys_name, args, add_tracepoint_multi_event_cb); 528 } 529 530 static int add_tracepoint_multi_sys_cb(void *state, const char *sys_name) 531 { 532 struct add_tracepoint_multi_args *args = state; 533 534 if (!strglobmatch(sys_name, args->sys_glob)) 535 return 0; 536 537 return add_tracepoint_multi_event(args, sys_name); 538 } 539 540 static int add_tracepoint_multi_sys(struct parse_events_state *parse_state, 541 struct list_head *list, 542 const char *sys_glob, const char *evt_glob, 543 struct parse_events_error *err, 544 struct parse_events_terms *head_config, YYLTYPE *loc) 545 { 546 struct add_tracepoint_multi_args args = { 547 .parse_state = parse_state, 548 .list = list, 549 .sys_glob = sys_glob, 550 .evt_glob = evt_glob, 551 .err = err, 552 .head_config = head_config, 553 .loc = loc, 554 .found = 0, 555 }; 556 int ret; 557 558 if (strpbrk(sys_glob, "*?") == NULL) { 559 /* Not a glob. */ 560 ret = add_tracepoint_multi_event(&args, sys_glob); 561 } else { 562 ret = tp_pmu__for_each_tp_sys(&args, add_tracepoint_multi_sys_cb); 563 } 564 if (args.found == 0) { 565 tracepoint_error(err, ENOENT, sys_glob, evt_glob, loc->first_column); 566 return -ENOENT; 567 } 568 return ret; 569 } 570 571 size_t default_breakpoint_len(void) 572 { 573 #if defined(__i386__) 574 static int len; 575 576 if (len == 0) { 577 struct perf_env env = {}; 578 579 perf_env__init(&env); 580 len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long); 581 perf_env__exit(&env); 582 } 583 return len; 584 #elif defined(__aarch64__) 585 return 4; 586 #else 587 return sizeof(long); 588 #endif 589 } 590 591 static int 592 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 593 { 594 int i; 595 596 for (i = 0; i < 3; i++) { 597 if (!type || !type[i]) 598 break; 599 600 #define CHECK_SET_TYPE(bit) \ 601 do { \ 602 if (attr->bp_type & bit) \ 603 return -EINVAL; \ 604 else \ 605 attr->bp_type |= bit; \ 606 } while (0) 607 608 switch (type[i]) { 609 case 'r': 610 CHECK_SET_TYPE(HW_BREAKPOINT_R); 611 break; 612 case 'w': 613 CHECK_SET_TYPE(HW_BREAKPOINT_W); 614 break; 615 case 'x': 616 CHECK_SET_TYPE(HW_BREAKPOINT_X); 617 break; 618 default: 619 return -EINVAL; 620 } 621 } 622 623 #undef CHECK_SET_TYPE 624 625 if (!attr->bp_type) /* Default */ 626 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 627 628 return 0; 629 } 630 631 int parse_events_add_breakpoint(struct parse_events_state *parse_state, 632 struct list_head *list, 633 u64 addr, char *type, u64 len, 634 struct parse_events_terms *head_config) 635 { 636 struct perf_event_attr attr; 637 LIST_HEAD(config_terms); 638 const char *name; 639 640 memset(&attr, 0, sizeof(attr)); 641 attr.bp_addr = addr; 642 643 if (parse_breakpoint_type(type, &attr)) 644 return -EINVAL; 645 646 /* Provide some defaults if len is not specified */ 647 if (!len) { 648 if (attr.bp_type == HW_BREAKPOINT_X) 649 len = default_breakpoint_len(); 650 else 651 len = HW_BREAKPOINT_LEN_4; 652 } 653 654 attr.bp_len = len; 655 656 attr.type = PERF_TYPE_BREAKPOINT; 657 attr.sample_period = 1; 658 659 if (head_config) { 660 if (config_attr(&attr, head_config, parse_state, config_term_common)) 661 return -EINVAL; 662 663 if (get_config_terms(head_config, &config_terms)) 664 return -ENOMEM; 665 } 666 667 name = get_config_name(head_config); 668 669 return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL, 670 &config_terms, /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 671 } 672 673 static int check_type_val(struct parse_events_term *term, 674 struct parse_events_error *err, 675 enum parse_events__term_val_type type) 676 { 677 if (type == term->type_val) 678 return 0; 679 680 if (err) { 681 parse_events_error__handle(err, term->err_val, 682 type == PARSE_EVENTS__TERM_TYPE_NUM 683 ? strdup("expected numeric value") 684 : strdup("expected string value"), 685 NULL); 686 } 687 return -EINVAL; 688 } 689 690 static bool config_term_shrinked; 691 692 const char *parse_events__term_type_str(enum parse_events__term_type term_type) 693 { 694 /* 695 * Update according to parse-events.l 696 */ 697 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { 698 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>", 699 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config", 700 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1", 701 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2", 702 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3", 703 [PARSE_EVENTS__TERM_TYPE_NAME] = "name", 704 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period", 705 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq", 706 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type", 707 [PARSE_EVENTS__TERM_TYPE_TIME] = "time", 708 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph", 709 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size", 710 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", 711 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", 712 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack", 713 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr", 714 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite", 715 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite", 716 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config", 717 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore", 718 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output", 719 [PARSE_EVENTS__TERM_TYPE_AUX_ACTION] = "aux-action", 720 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size", 721 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id", 722 [PARSE_EVENTS__TERM_TYPE_RAW] = "raw", 723 [PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG] = "legacy-hardware-config", 724 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG] = "legacy-cache-config", 725 [PARSE_EVENTS__TERM_TYPE_CPU] = "cpu", 726 [PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV] = "ratio-to-prev", 727 }; 728 if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR) 729 return "unknown term"; 730 731 return config_term_names[term_type]; 732 } 733 734 static bool 735 config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err) 736 { 737 char *err_str; 738 739 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) { 740 parse_events_error__handle(err, -1, 741 strdup("Invalid term_type"), NULL); 742 return false; 743 } 744 if (!config_term_shrinked) 745 return true; 746 747 switch (term_type) { 748 case PARSE_EVENTS__TERM_TYPE_CONFIG: 749 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 750 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 751 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 752 case PARSE_EVENTS__TERM_TYPE_NAME: 753 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 754 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 755 case PARSE_EVENTS__TERM_TYPE_PERCORE: 756 case PARSE_EVENTS__TERM_TYPE_CPU: 757 return true; 758 case PARSE_EVENTS__TERM_TYPE_USER: 759 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 760 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 761 case PARSE_EVENTS__TERM_TYPE_TIME: 762 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 763 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 764 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 765 case PARSE_EVENTS__TERM_TYPE_INHERIT: 766 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 767 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 768 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 769 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 770 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 771 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 772 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 773 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 774 case PARSE_EVENTS__TERM_TYPE_RAW: 775 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV: 776 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG: 777 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG: 778 default: 779 if (!err) 780 return false; 781 782 /* term_type is validated so indexing is safe */ 783 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'", 784 parse_events__term_type_str(term_type)) >= 0) 785 parse_events_error__handle(err, -1, err_str, NULL); 786 return false; 787 } 788 } 789 790 void parse_events__shrink_config_terms(void) 791 { 792 config_term_shrinked = true; 793 } 794 795 static int config_term_common(struct perf_event_attr *attr, 796 struct parse_events_term *term, 797 struct parse_events_state *parse_state) 798 { 799 #define CHECK_TYPE_VAL(type) \ 800 do { \ 801 if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_ ## type)) \ 802 return -EINVAL; \ 803 } while (0) 804 805 switch (term->type_term) { 806 case PARSE_EVENTS__TERM_TYPE_CONFIG: 807 CHECK_TYPE_VAL(NUM); 808 attr->config = term->val.num; 809 break; 810 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 811 CHECK_TYPE_VAL(NUM); 812 attr->config1 = term->val.num; 813 break; 814 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 815 CHECK_TYPE_VAL(NUM); 816 attr->config2 = term->val.num; 817 break; 818 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 819 CHECK_TYPE_VAL(NUM); 820 attr->config3 = term->val.num; 821 break; 822 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 823 CHECK_TYPE_VAL(NUM); 824 break; 825 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 826 CHECK_TYPE_VAL(NUM); 827 break; 828 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 829 CHECK_TYPE_VAL(STR); 830 if (strcmp(term->val.str, "no") && 831 parse_branch_str(term->val.str, 832 &attr->branch_sample_type)) { 833 parse_events_error__handle(parse_state->error, term->err_val, 834 strdup("invalid branch sample type"), 835 NULL); 836 return -EINVAL; 837 } 838 break; 839 case PARSE_EVENTS__TERM_TYPE_TIME: 840 CHECK_TYPE_VAL(NUM); 841 if (term->val.num > 1) { 842 parse_events_error__handle(parse_state->error, term->err_val, 843 strdup("expected 0 or 1"), 844 NULL); 845 return -EINVAL; 846 } 847 break; 848 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 849 CHECK_TYPE_VAL(STR); 850 break; 851 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 852 CHECK_TYPE_VAL(NUM); 853 break; 854 case PARSE_EVENTS__TERM_TYPE_INHERIT: 855 CHECK_TYPE_VAL(NUM); 856 break; 857 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 858 CHECK_TYPE_VAL(NUM); 859 break; 860 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 861 CHECK_TYPE_VAL(NUM); 862 break; 863 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 864 CHECK_TYPE_VAL(NUM); 865 break; 866 case PARSE_EVENTS__TERM_TYPE_NAME: 867 CHECK_TYPE_VAL(STR); 868 break; 869 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 870 CHECK_TYPE_VAL(STR); 871 break; 872 case PARSE_EVENTS__TERM_TYPE_RAW: 873 CHECK_TYPE_VAL(STR); 874 break; 875 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 876 CHECK_TYPE_VAL(NUM); 877 break; 878 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 879 CHECK_TYPE_VAL(NUM); 880 break; 881 case PARSE_EVENTS__TERM_TYPE_PERCORE: 882 CHECK_TYPE_VAL(NUM); 883 if ((unsigned int)term->val.num > 1) { 884 parse_events_error__handle(parse_state->error, term->err_val, 885 strdup("expected 0 or 1"), 886 NULL); 887 return -EINVAL; 888 } 889 break; 890 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 891 CHECK_TYPE_VAL(NUM); 892 break; 893 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 894 CHECK_TYPE_VAL(STR); 895 break; 896 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 897 CHECK_TYPE_VAL(NUM); 898 if (term->val.num > UINT_MAX) { 899 parse_events_error__handle(parse_state->error, term->err_val, 900 strdup("too big"), 901 NULL); 902 return -EINVAL; 903 } 904 break; 905 case PARSE_EVENTS__TERM_TYPE_CPU: { 906 struct perf_cpu_map *map; 907 908 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) { 909 if (term->val.num >= (u64)cpu__max_present_cpu().cpu) { 910 parse_events_error__handle(parse_state->error, term->err_val, 911 strdup("too big"), 912 /*help=*/NULL); 913 return -EINVAL; 914 } 915 break; 916 } 917 assert(term->type_val == PARSE_EVENTS__TERM_TYPE_STR); 918 if (perf_pmus__find(term->val.str) != NULL) 919 break; 920 921 map = perf_cpu_map__new(term->val.str); 922 if (!map && !parse_state->fake_pmu) { 923 parse_events_error__handle(parse_state->error, term->err_val, 924 strdup("not a valid PMU or CPU number"), 925 /*help=*/NULL); 926 return -EINVAL; 927 } 928 perf_cpu_map__put(map); 929 break; 930 } 931 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV: 932 CHECK_TYPE_VAL(STR); 933 if (strtod(term->val.str, NULL) <= 0) { 934 parse_events_error__handle(parse_state->error, term->err_val, 935 strdup("zero or negative"), 936 NULL); 937 return -EINVAL; 938 } 939 if (errno == ERANGE) { 940 parse_events_error__handle(parse_state->error, term->err_val, 941 strdup("too big"), 942 NULL); 943 return -EINVAL; 944 } 945 break; 946 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 947 case PARSE_EVENTS__TERM_TYPE_USER: 948 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG: 949 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG: 950 default: 951 parse_events_error__handle(parse_state->error, term->err_term, 952 strdup(parse_events__term_type_str(term->type_term)), 953 parse_events_formats_error_string(NULL)); 954 return -EINVAL; 955 } 956 957 /* 958 * Check term availability after basic checking so 959 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered. 960 * 961 * If check availability at the entry of this function, 962 * user will see "'<sysfs term>' is not usable in 'perf stat'" 963 * if an invalid config term is provided for legacy events 964 * (for example, instructions/badterm/...), which is confusing. 965 */ 966 if (!config_term_avail(term->type_term, parse_state->error)) 967 return -EINVAL; 968 return 0; 969 #undef CHECK_TYPE_VAL 970 } 971 972 static bool check_pmu_is_core(__u32 type, const struct parse_events_term *term, 973 struct parse_events_error *err) 974 { 975 struct perf_pmu *pmu = NULL; 976 977 /* Avoid loading all PMUs with perf_pmus__find_by_type, just scan the core ones. */ 978 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 979 if (pmu->type == type) 980 return true; 981 } 982 parse_events_error__handle(err, term->err_val, 983 strdup("needs a core PMU"), 984 NULL); 985 return false; 986 } 987 988 static int config_term_pmu(struct perf_event_attr *attr, 989 struct parse_events_term *term, 990 struct parse_events_state *parse_state) 991 { 992 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG) { 993 if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_NUM)) 994 return -EINVAL; 995 if (term->val.num >= PERF_COUNT_HW_MAX) { 996 parse_events_error__handle(parse_state->error, term->err_val, 997 strdup("too big"), 998 NULL); 999 return -EINVAL; 1000 } 1001 if (!check_pmu_is_core(attr->type, term, parse_state->error)) 1002 return -EINVAL; 1003 attr->config = term->val.num; 1004 if (perf_pmus__supports_extended_type()) 1005 attr->config |= (__u64)attr->type << PERF_PMU_TYPE_SHIFT; 1006 attr->type = PERF_TYPE_HARDWARE; 1007 return 0; 1008 } 1009 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG) { 1010 int cache_type, cache_op, cache_result; 1011 1012 if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_NUM)) 1013 return -EINVAL; 1014 cache_type = term->val.num & 0xFF; 1015 cache_op = (term->val.num >> 8) & 0xFF; 1016 cache_result = (term->val.num >> 16) & 0xFF; 1017 if ((term->val.num & ~0xFFFFFF) || 1018 cache_type >= PERF_COUNT_HW_CACHE_MAX || 1019 cache_op >= PERF_COUNT_HW_CACHE_OP_MAX || 1020 cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) { 1021 parse_events_error__handle(parse_state->error, term->err_val, 1022 strdup("too big"), 1023 NULL); 1024 return -EINVAL; 1025 } 1026 if (!check_pmu_is_core(attr->type, term, parse_state->error)) 1027 return -EINVAL; 1028 attr->config = term->val.num; 1029 if (perf_pmus__supports_extended_type()) 1030 attr->config |= (__u64)attr->type << PERF_PMU_TYPE_SHIFT; 1031 attr->type = PERF_TYPE_HW_CACHE; 1032 return 0; 1033 } 1034 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || 1035 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) { 1036 /* 1037 * Always succeed for sysfs terms, as we dont know 1038 * at this point what type they need to have. 1039 */ 1040 return 0; 1041 } 1042 return config_term_common(attr, term, parse_state); 1043 } 1044 1045 static int config_term_tracepoint(struct perf_event_attr *attr, 1046 struct parse_events_term *term, 1047 struct parse_events_state *parse_state) 1048 { 1049 switch (term->type_term) { 1050 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1051 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1052 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1053 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1054 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1055 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1056 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1057 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1058 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1059 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1060 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1061 return config_term_common(attr, term, parse_state); 1062 case PARSE_EVENTS__TERM_TYPE_USER: 1063 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1064 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1065 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1066 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1067 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG: 1068 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG: 1069 case PARSE_EVENTS__TERM_TYPE_NAME: 1070 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1071 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1072 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1073 case PARSE_EVENTS__TERM_TYPE_TIME: 1074 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1075 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1076 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1077 case PARSE_EVENTS__TERM_TYPE_RAW: 1078 case PARSE_EVENTS__TERM_TYPE_CPU: 1079 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV: 1080 default: 1081 parse_events_error__handle(parse_state->error, term->err_term, 1082 strdup(parse_events__term_type_str(term->type_term)), 1083 strdup("valid terms: call-graph,stack-size\n") 1084 ); 1085 return -EINVAL; 1086 } 1087 1088 return 0; 1089 } 1090 1091 static int config_attr(struct perf_event_attr *attr, 1092 const struct parse_events_terms *head, 1093 struct parse_events_state *parse_state, 1094 config_term_func_t config_term) 1095 { 1096 struct parse_events_term *term; 1097 1098 list_for_each_entry(term, &head->terms, list) 1099 if (config_term(attr, term, parse_state)) 1100 return -EINVAL; 1101 1102 return 0; 1103 } 1104 1105 static int get_config_terms(const struct parse_events_terms *head_config, 1106 struct list_head *head_terms) 1107 { 1108 #define ADD_CONFIG_TERM(__type, __weak) \ 1109 struct evsel_config_term *__t; \ 1110 \ 1111 __t = zalloc(sizeof(*__t)); \ 1112 if (!__t) \ 1113 return -ENOMEM; \ 1114 \ 1115 INIT_LIST_HEAD(&__t->list); \ 1116 __t->type = EVSEL__CONFIG_TERM_ ## __type; \ 1117 __t->weak = __weak; \ 1118 list_add_tail(&__t->list, head_terms) 1119 1120 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \ 1121 do { \ 1122 ADD_CONFIG_TERM(__type, __weak); \ 1123 __t->val.__name = __val; \ 1124 } while (0) 1125 1126 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \ 1127 do { \ 1128 ADD_CONFIG_TERM(__type, __weak); \ 1129 __t->val.str = strdup(__val); \ 1130 if (!__t->val.str) { \ 1131 zfree(&__t); \ 1132 return -ENOMEM; \ 1133 } \ 1134 __t->free_str = true; \ 1135 } while (0) 1136 1137 struct parse_events_term *term; 1138 1139 list_for_each_entry(term, &head_config->terms, list) { 1140 switch (term->type_term) { 1141 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1142 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak); 1143 break; 1144 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1145 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak); 1146 break; 1147 case PARSE_EVENTS__TERM_TYPE_TIME: 1148 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak); 1149 break; 1150 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1151 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak); 1152 break; 1153 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1154 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak); 1155 break; 1156 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1157 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user, 1158 term->val.num, term->weak); 1159 break; 1160 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1161 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1162 term->val.num ? 1 : 0, term->weak); 1163 break; 1164 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1165 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1166 term->val.num ? 0 : 1, term->weak); 1167 break; 1168 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1169 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack, 1170 term->val.num, term->weak); 1171 break; 1172 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1173 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events, 1174 term->val.num, term->weak); 1175 break; 1176 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1177 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1178 term->val.num ? 1 : 0, term->weak); 1179 break; 1180 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1181 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1182 term->val.num ? 0 : 1, term->weak); 1183 break; 1184 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1185 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak); 1186 break; 1187 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1188 ADD_CONFIG_TERM_VAL(PERCORE, percore, 1189 term->val.num ? true : false, term->weak); 1190 break; 1191 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1192 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output, 1193 term->val.num ? 1 : 0, term->weak); 1194 break; 1195 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1196 ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak); 1197 break; 1198 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1199 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size, 1200 term->val.num, term->weak); 1201 break; 1202 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV: 1203 ADD_CONFIG_TERM_STR(RATIO_TO_PREV, term->val.str, term->weak); 1204 break; 1205 case PARSE_EVENTS__TERM_TYPE_USER: 1206 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1207 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1208 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1209 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1210 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG: 1211 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG: 1212 case PARSE_EVENTS__TERM_TYPE_NAME: 1213 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1214 case PARSE_EVENTS__TERM_TYPE_RAW: 1215 case PARSE_EVENTS__TERM_TYPE_CPU: 1216 default: 1217 break; 1218 } 1219 } 1220 return 0; 1221 } 1222 1223 /* 1224 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for 1225 * each bit of attr->config that the user has changed. 1226 */ 1227 static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config, 1228 struct list_head *head_terms) 1229 { 1230 struct parse_events_term *term; 1231 u64 bits = 0; 1232 int type; 1233 1234 list_for_each_entry(term, &head_config->terms, list) { 1235 switch (term->type_term) { 1236 case PARSE_EVENTS__TERM_TYPE_USER: 1237 type = perf_pmu__format_type(pmu, term->config); 1238 if (type != PERF_PMU_FORMAT_VALUE_CONFIG) 1239 continue; 1240 bits |= perf_pmu__format_bits(pmu, term->config); 1241 break; 1242 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1243 bits = ~(u64)0; 1244 break; 1245 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1246 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1247 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1248 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG: 1249 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG: 1250 case PARSE_EVENTS__TERM_TYPE_NAME: 1251 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1252 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1253 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1254 case PARSE_EVENTS__TERM_TYPE_TIME: 1255 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1256 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1257 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1258 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1259 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1260 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1261 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1262 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1263 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1264 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1265 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1266 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1267 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1268 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1269 case PARSE_EVENTS__TERM_TYPE_RAW: 1270 case PARSE_EVENTS__TERM_TYPE_CPU: 1271 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV: 1272 default: 1273 break; 1274 } 1275 } 1276 1277 if (bits) 1278 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false); 1279 1280 #undef ADD_CONFIG_TERM 1281 return 0; 1282 } 1283 1284 int parse_events_add_tracepoint(struct parse_events_state *parse_state, 1285 struct list_head *list, 1286 const char *sys, const char *event, 1287 struct parse_events_error *err, 1288 struct parse_events_terms *head_config, void *loc_) 1289 { 1290 YYLTYPE *loc = loc_; 1291 1292 if (head_config) { 1293 struct perf_event_attr attr; 1294 1295 if (config_attr(&attr, head_config, parse_state, config_term_tracepoint)) 1296 return -EINVAL; 1297 } 1298 1299 return add_tracepoint_multi_sys(parse_state, list, sys, event, 1300 err, head_config, loc); 1301 } 1302 1303 static int __parse_events_add_numeric(struct parse_events_state *parse_state, 1304 struct list_head *list, 1305 struct perf_pmu *pmu, u32 type, u32 extended_type, 1306 u64 config, const struct parse_events_terms *head_config, 1307 struct evsel *first_wildcard_match) 1308 { 1309 struct perf_event_attr attr; 1310 LIST_HEAD(config_terms); 1311 const char *name, *metric_id; 1312 struct perf_cpu_map *cpus; 1313 int ret; 1314 1315 memset(&attr, 0, sizeof(attr)); 1316 attr.type = type; 1317 attr.config = config; 1318 if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) { 1319 assert(perf_pmus__supports_extended_type()); 1320 attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT; 1321 } 1322 1323 if (head_config) { 1324 if (config_attr(&attr, head_config, parse_state, config_term_common)) 1325 return -EINVAL; 1326 1327 if (get_config_terms(head_config, &config_terms)) 1328 return -ENOMEM; 1329 } 1330 1331 name = get_config_name(head_config); 1332 metric_id = get_config_metric_id(head_config); 1333 cpus = get_config_cpu(head_config, parse_state->fake_pmu); 1334 ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name, 1335 metric_id, pmu, &config_terms, first_wildcard_match, 1336 cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) ? 0 : -ENOMEM; 1337 perf_cpu_map__put(cpus); 1338 free_config_terms(&config_terms); 1339 return ret; 1340 } 1341 1342 int parse_events_add_numeric(struct parse_events_state *parse_state, 1343 struct list_head *list, 1344 u32 type, u64 config, 1345 const struct parse_events_terms *head_config, 1346 bool wildcard) 1347 { 1348 struct perf_pmu *pmu = NULL; 1349 bool found_supported = false; 1350 1351 /* Wildcards on numeric values are only supported by core PMUs. */ 1352 if (wildcard && perf_pmus__supports_extended_type()) { 1353 struct evsel *first_wildcard_match = NULL; 1354 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 1355 int ret; 1356 1357 found_supported = true; 1358 if (parse_events__filter_pmu(parse_state, pmu)) 1359 continue; 1360 1361 ret = __parse_events_add_numeric(parse_state, list, pmu, 1362 type, pmu->type, 1363 config, head_config, 1364 first_wildcard_match); 1365 if (ret) 1366 return ret; 1367 if (first_wildcard_match == NULL) 1368 first_wildcard_match = 1369 container_of(list->prev, struct evsel, core.node); 1370 } 1371 if (found_supported) 1372 return 0; 1373 } 1374 return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type), 1375 type, /*extended_type=*/0, config, head_config, 1376 /*first_wildcard_match=*/NULL); 1377 } 1378 1379 static bool config_term_percore(struct list_head *config_terms) 1380 { 1381 struct evsel_config_term *term; 1382 1383 list_for_each_entry(term, config_terms, list) { 1384 if (term->type == EVSEL__CONFIG_TERM_PERCORE) 1385 return term->val.percore; 1386 } 1387 1388 return false; 1389 } 1390 1391 static int parse_events_add_pmu(struct parse_events_state *parse_state, 1392 struct list_head *list, struct perf_pmu *pmu, 1393 const struct parse_events_terms *const_parsed_terms, 1394 struct evsel *first_wildcard_match) 1395 { 1396 u64 alternate_hw_config = PERF_COUNT_HW_MAX; 1397 struct perf_event_attr attr; 1398 struct perf_pmu_info info; 1399 struct evsel *evsel; 1400 struct parse_events_error *err = parse_state->error; 1401 LIST_HEAD(config_terms); 1402 struct parse_events_terms parsed_terms; 1403 bool alias_rewrote_terms = false; 1404 struct perf_cpu_map *term_cpu = NULL; 1405 1406 if (verbose > 1) { 1407 struct strbuf sb; 1408 1409 strbuf_init(&sb, /*hint=*/ 0); 1410 if (pmu->selectable && const_parsed_terms && 1411 list_empty(&const_parsed_terms->terms)) { 1412 strbuf_addf(&sb, "%s//", pmu->name); 1413 } else { 1414 strbuf_addf(&sb, "%s/", pmu->name); 1415 parse_events_terms__to_strbuf(const_parsed_terms, &sb); 1416 strbuf_addch(&sb, '/'); 1417 } 1418 fprintf(stderr, "Attempt to add: %s\n", sb.buf); 1419 strbuf_release(&sb); 1420 } 1421 1422 memset(&attr, 0, sizeof(attr)); 1423 if (pmu->perf_event_attr_init_default) 1424 pmu->perf_event_attr_init_default(pmu, &attr); 1425 1426 attr.type = pmu->type; 1427 1428 if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) { 1429 evsel = __add_event(list, &parse_state->idx, &attr, 1430 /*init_attr=*/true, /*name=*/NULL, 1431 /*metric_id=*/NULL, pmu, 1432 /*config_terms=*/NULL, first_wildcard_match, 1433 /*cpu_list=*/NULL, alternate_hw_config); 1434 return evsel ? 0 : -ENOMEM; 1435 } 1436 1437 parse_events_terms__init(&parsed_terms); 1438 if (const_parsed_terms) { 1439 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1440 1441 if (ret) 1442 return ret; 1443 } 1444 fix_raw(&parsed_terms, pmu); 1445 1446 /* Configure attr/terms with a known PMU, this will set hardcoded terms. */ 1447 if (config_attr(&attr, &parsed_terms, parse_state, config_term_pmu)) { 1448 parse_events_terms__exit(&parsed_terms); 1449 return -EINVAL; 1450 } 1451 1452 /* Look for event names in the terms and rewrite into format based terms. */ 1453 if (perf_pmu__check_alias(pmu, &parsed_terms, 1454 &info, &alias_rewrote_terms, 1455 &alternate_hw_config, err)) { 1456 parse_events_terms__exit(&parsed_terms); 1457 return -EINVAL; 1458 } 1459 1460 if (verbose > 1) { 1461 struct strbuf sb; 1462 1463 strbuf_init(&sb, /*hint=*/ 0); 1464 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1465 fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf); 1466 strbuf_release(&sb); 1467 } 1468 1469 /* Configure attr/terms again if an alias was expanded. */ 1470 if (alias_rewrote_terms && 1471 config_attr(&attr, &parsed_terms, parse_state, config_term_pmu)) { 1472 parse_events_terms__exit(&parsed_terms); 1473 return -EINVAL; 1474 } 1475 1476 if (get_config_terms(&parsed_terms, &config_terms)) { 1477 parse_events_terms__exit(&parsed_terms); 1478 return -ENOMEM; 1479 } 1480 1481 /* 1482 * When using default config, record which bits of attr->config were 1483 * changed by the user. 1484 */ 1485 if (pmu->perf_event_attr_init_default && 1486 get_config_chgs(pmu, &parsed_terms, &config_terms)) { 1487 parse_events_terms__exit(&parsed_terms); 1488 return -ENOMEM; 1489 } 1490 1491 /* Skip configuring hard coded terms that were applied by config_attr. */ 1492 if (perf_pmu__config(pmu, &attr, &parsed_terms, /*apply_hardcoded=*/false, 1493 parse_state->error)) { 1494 free_config_terms(&config_terms); 1495 parse_events_terms__exit(&parsed_terms); 1496 return -EINVAL; 1497 } 1498 1499 term_cpu = get_config_cpu(&parsed_terms, parse_state->fake_pmu); 1500 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true, 1501 get_config_name(&parsed_terms), 1502 get_config_metric_id(&parsed_terms), pmu, 1503 &config_terms, first_wildcard_match, term_cpu, alternate_hw_config); 1504 perf_cpu_map__put(term_cpu); 1505 if (!evsel) { 1506 parse_events_terms__exit(&parsed_terms); 1507 return -ENOMEM; 1508 } 1509 1510 if (evsel->name) 1511 evsel->use_config_name = true; 1512 1513 evsel->percore = config_term_percore(&evsel->config_terms); 1514 1515 parse_events_terms__exit(&parsed_terms); 1516 free((char *)evsel->unit); 1517 evsel->unit = strdup(info.unit); 1518 evsel->scale = info.scale; 1519 evsel->per_pkg = info.per_pkg; 1520 evsel->snapshot = info.snapshot; 1521 evsel->retirement_latency.mean = info.retirement_latency_mean; 1522 evsel->retirement_latency.min = info.retirement_latency_min; 1523 evsel->retirement_latency.max = info.retirement_latency_max; 1524 1525 return 0; 1526 } 1527 1528 int parse_events_multi_pmu_add(struct parse_events_state *parse_state, 1529 const char *event_name, 1530 const struct parse_events_terms *const_parsed_terms, 1531 struct list_head **listp, void *loc_) 1532 { 1533 struct parse_events_term *term; 1534 struct list_head *list = NULL; 1535 struct perf_pmu *pmu = NULL; 1536 YYLTYPE *loc = loc_; 1537 int ok = 0; 1538 const char *config; 1539 struct parse_events_terms parsed_terms; 1540 struct evsel *first_wildcard_match = NULL; 1541 1542 *listp = NULL; 1543 1544 parse_events_terms__init(&parsed_terms); 1545 if (const_parsed_terms) { 1546 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1547 1548 if (ret) 1549 return ret; 1550 } 1551 1552 config = strdup(event_name); 1553 if (!config) 1554 goto out_err; 1555 1556 if (parse_events_term__num(&term, 1557 PARSE_EVENTS__TERM_TYPE_USER, 1558 config, /*num=*/1, /*novalue=*/true, 1559 loc, /*loc_val=*/NULL) < 0) { 1560 zfree(&config); 1561 goto out_err; 1562 } 1563 list_add_tail(&term->list, &parsed_terms.terms); 1564 1565 /* Add it for all PMUs that support the alias */ 1566 list = malloc(sizeof(struct list_head)); 1567 if (!list) 1568 goto out_err; 1569 1570 INIT_LIST_HEAD(list); 1571 1572 while ((pmu = perf_pmus__scan_for_event(pmu, event_name)) != NULL) { 1573 1574 if (parse_events__filter_pmu(parse_state, pmu)) 1575 continue; 1576 1577 if (!perf_pmu__have_event(pmu, event_name)) 1578 continue; 1579 1580 if (!parse_events_add_pmu(parse_state, list, pmu, 1581 &parsed_terms, first_wildcard_match)) { 1582 struct strbuf sb; 1583 1584 strbuf_init(&sb, /*hint=*/ 0); 1585 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1586 pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf); 1587 strbuf_release(&sb); 1588 ok++; 1589 } 1590 if (first_wildcard_match == NULL) 1591 first_wildcard_match = container_of(list->prev, struct evsel, core.node); 1592 } 1593 1594 if (parse_state->fake_pmu) { 1595 if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms, 1596 first_wildcard_match)) { 1597 struct strbuf sb; 1598 1599 strbuf_init(&sb, /*hint=*/ 0); 1600 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1601 pr_debug("%s -> fake/%s/\n", event_name, sb.buf); 1602 strbuf_release(&sb); 1603 ok++; 1604 } 1605 } 1606 1607 out_err: 1608 parse_events_terms__exit(&parsed_terms); 1609 if (ok) 1610 *listp = list; 1611 else 1612 free(list); 1613 1614 return ok ? 0 : -1; 1615 } 1616 1617 int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state, 1618 const char *event_or_pmu, 1619 const struct parse_events_terms *const_parsed_terms, 1620 struct list_head **listp, 1621 void *loc_) 1622 { 1623 YYLTYPE *loc = loc_; 1624 struct perf_pmu *pmu; 1625 int ok = 0; 1626 char *help; 1627 struct evsel *first_wildcard_match = NULL; 1628 1629 *listp = malloc(sizeof(**listp)); 1630 if (!*listp) 1631 return -ENOMEM; 1632 1633 INIT_LIST_HEAD(*listp); 1634 1635 /* Attempt to add to list assuming event_or_pmu is a PMU name. */ 1636 pmu = perf_pmus__find(event_or_pmu); 1637 if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms, 1638 first_wildcard_match)) 1639 return 0; 1640 1641 if (parse_state->fake_pmu) { 1642 if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(), 1643 const_parsed_terms, 1644 first_wildcard_match)) 1645 return 0; 1646 } 1647 1648 pmu = NULL; 1649 /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */ 1650 while ((pmu = perf_pmus__scan_matching_wildcard(pmu, event_or_pmu)) != NULL) { 1651 1652 if (parse_events__filter_pmu(parse_state, pmu)) 1653 continue; 1654 1655 if (!parse_events_add_pmu(parse_state, *listp, pmu, 1656 const_parsed_terms, 1657 first_wildcard_match)) { 1658 ok++; 1659 parse_state->wild_card_pmus = true; 1660 } 1661 if (first_wildcard_match == NULL) { 1662 first_wildcard_match = 1663 container_of((*listp)->prev, struct evsel, core.node); 1664 } 1665 } 1666 if (ok) 1667 return 0; 1668 1669 /* Failure to add, assume event_or_pmu is an event name. */ 1670 zfree(listp); 1671 if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, 1672 const_parsed_terms, listp, loc)) 1673 return 0; 1674 1675 if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0) 1676 help = NULL; 1677 parse_events_error__handle(parse_state->error, loc->first_column, 1678 strdup("Bad event or PMU"), 1679 help); 1680 zfree(listp); 1681 return -EINVAL; 1682 } 1683 1684 void parse_events__set_leader(char *name, struct list_head *list) 1685 { 1686 struct evsel *leader; 1687 1688 if (list_empty(list)) { 1689 WARN_ONCE(true, "WARNING: failed to set leader: empty list"); 1690 return; 1691 } 1692 1693 leader = list_first_entry(list, struct evsel, core.node); 1694 __perf_evlist__set_leader(list, &leader->core); 1695 zfree(&leader->group_name); 1696 leader->group_name = name; 1697 } 1698 1699 static int parse_events__modifier_list(struct parse_events_state *parse_state, 1700 YYLTYPE *loc, 1701 struct list_head *list, 1702 struct parse_events_modifier mod, 1703 bool group) 1704 { 1705 struct evsel *evsel; 1706 1707 if (!group && mod.weak) { 1708 parse_events_error__handle(parse_state->error, loc->first_column, 1709 strdup("Weak modifier is for use with groups"), NULL); 1710 return -EINVAL; 1711 } 1712 1713 __evlist__for_each_entry(list, evsel) { 1714 /* Translate modifiers into the equivalent evsel excludes. */ 1715 int eu = group ? evsel->core.attr.exclude_user : 0; 1716 int ek = group ? evsel->core.attr.exclude_kernel : 0; 1717 int eh = group ? evsel->core.attr.exclude_hv : 0; 1718 int eH = group ? evsel->core.attr.exclude_host : 0; 1719 int eG = group ? evsel->core.attr.exclude_guest : 0; 1720 int exclude = eu | ek | eh; 1721 int exclude_GH = eG | eH; 1722 1723 if (mod.user) { 1724 if (!exclude) 1725 exclude = eu = ek = eh = 1; 1726 eu = 0; 1727 } 1728 if (mod.kernel) { 1729 if (!exclude) 1730 exclude = eu = ek = eh = 1; 1731 ek = 0; 1732 } 1733 if (mod.hypervisor) { 1734 if (!exclude) 1735 exclude = eu = ek = eh = 1; 1736 eh = 0; 1737 } 1738 if (mod.guest) { 1739 if (!exclude_GH) 1740 exclude_GH = eG = eH = 1; 1741 eG = 0; 1742 } 1743 if (mod.host) { 1744 if (!exclude_GH) 1745 exclude_GH = eG = eH = 1; 1746 eH = 0; 1747 } 1748 if (!exclude_GH && exclude_GH_default) { 1749 if (perf_host) 1750 eG = 1; 1751 else if (perf_guest) 1752 eH = 1; 1753 } 1754 1755 evsel->core.attr.exclude_user = eu; 1756 evsel->core.attr.exclude_kernel = ek; 1757 evsel->core.attr.exclude_hv = eh; 1758 evsel->core.attr.exclude_host = eH; 1759 evsel->core.attr.exclude_guest = eG; 1760 evsel->exclude_GH = exclude_GH; 1761 1762 /* Simple modifiers copied to the evsel. */ 1763 if (mod.precise) { 1764 u8 precise = evsel->core.attr.precise_ip + mod.precise; 1765 /* 1766 * precise ip: 1767 * 1768 * 0 - SAMPLE_IP can have arbitrary skid 1769 * 1 - SAMPLE_IP must have constant skid 1770 * 2 - SAMPLE_IP requested to have 0 skid 1771 * 3 - SAMPLE_IP must have 0 skid 1772 * 1773 * See also PERF_RECORD_MISC_EXACT_IP 1774 */ 1775 if (precise > 3) { 1776 char *help; 1777 1778 if (asprintf(&help, 1779 "Maximum combined precise value is 3, adding precision to \"%s\"", 1780 evsel__name(evsel)) > 0) { 1781 parse_events_error__handle(parse_state->error, 1782 loc->first_column, 1783 help, NULL); 1784 } 1785 return -EINVAL; 1786 } 1787 evsel->core.attr.precise_ip = precise; 1788 } 1789 if (mod.precise_max) 1790 evsel->precise_max = 1; 1791 if (mod.non_idle) 1792 evsel->core.attr.exclude_idle = 1; 1793 if (mod.sample_read) 1794 evsel->sample_read = 1; 1795 if (mod.pinned && evsel__is_group_leader(evsel)) 1796 evsel->core.attr.pinned = 1; 1797 if (mod.exclusive && evsel__is_group_leader(evsel)) 1798 evsel->core.attr.exclusive = 1; 1799 if (mod.weak) 1800 evsel->weak_group = true; 1801 if (mod.bpf) 1802 evsel->bpf_counter = true; 1803 if (mod.retire_lat) 1804 evsel->retire_lat = true; 1805 if (mod.dont_regroup) 1806 evsel->dont_regroup = true; 1807 } 1808 return 0; 1809 } 1810 1811 int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc, 1812 struct list_head *list, 1813 struct parse_events_modifier mod) 1814 { 1815 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true); 1816 } 1817 1818 int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc, 1819 struct list_head *list, 1820 struct parse_events_modifier mod) 1821 { 1822 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false); 1823 } 1824 1825 int parse_events__set_default_name(struct list_head *list, char *name) 1826 { 1827 struct evsel *evsel; 1828 bool used_name = false; 1829 1830 __evlist__for_each_entry(list, evsel) { 1831 if (!evsel->name) { 1832 evsel->name = used_name ? strdup(name) : name; 1833 used_name = true; 1834 if (!evsel->name) 1835 return -ENOMEM; 1836 } 1837 } 1838 if (!used_name) 1839 free(name); 1840 return 0; 1841 } 1842 1843 static int parse_events__scanner(const char *str, 1844 struct parse_events_state *parse_state) 1845 { 1846 YY_BUFFER_STATE buffer; 1847 void *scanner; 1848 int ret; 1849 1850 ret = parse_events_lex_init_extra(parse_state, &scanner); 1851 if (ret) 1852 return ret; 1853 1854 buffer = parse_events__scan_string(str, scanner); 1855 1856 #ifdef PARSER_DEBUG 1857 parse_events_debug = 1; 1858 parse_events_set_debug(1, scanner); 1859 #endif 1860 ret = parse_events_parse(parse_state, scanner); 1861 1862 parse_events__flush_buffer(buffer, scanner); 1863 parse_events__delete_buffer(buffer, scanner); 1864 parse_events_lex_destroy(scanner); 1865 return ret; 1866 } 1867 1868 /* 1869 * parse event config string, return a list of event terms. 1870 */ 1871 int parse_events_terms(struct parse_events_terms *terms, const char *str) 1872 { 1873 struct parse_events_state parse_state = { 1874 .terms = NULL, 1875 .stoken = PE_START_TERMS, 1876 }; 1877 int ret; 1878 1879 ret = parse_events__scanner(str, &parse_state); 1880 if (!ret) 1881 list_splice(&parse_state.terms->terms, &terms->terms); 1882 1883 zfree(&parse_state.terms); 1884 return ret; 1885 } 1886 1887 static int evsel__compute_group_pmu_name(struct evsel *evsel, 1888 const struct list_head *head) 1889 { 1890 struct evsel *leader = evsel__leader(evsel); 1891 struct evsel *pos; 1892 const char *group_pmu_name; 1893 struct perf_pmu *pmu = evsel__find_pmu(evsel); 1894 1895 if (!pmu) { 1896 /* 1897 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU 1898 * is a core PMU, but in heterogeneous systems this is 1899 * unknown. For now pick the first core PMU. 1900 */ 1901 pmu = perf_pmus__scan_core(NULL); 1902 } 1903 if (!pmu) { 1904 pr_debug("No PMU found for '%s'\n", evsel__name(evsel)); 1905 return -EINVAL; 1906 } 1907 group_pmu_name = pmu->name; 1908 /* 1909 * Software events may be in a group with other uncore PMU events. Use 1910 * the pmu_name of the first non-software event to avoid breaking the 1911 * software event out of the group. 1912 * 1913 * Aux event leaders, like intel_pt, expect a group with events from 1914 * other PMUs, so substitute the AUX event's PMU in this case. 1915 */ 1916 if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) { 1917 struct perf_pmu *leader_pmu = evsel__find_pmu(leader); 1918 1919 if (!leader_pmu) { 1920 /* As with determining pmu above. */ 1921 leader_pmu = perf_pmus__scan_core(NULL); 1922 } 1923 /* 1924 * Starting with the leader, find the first event with a named 1925 * non-software PMU. for_each_group_(member|evsel) isn't used as 1926 * the list isn't yet sorted putting evsel's in the same group 1927 * together. 1928 */ 1929 if (leader_pmu && !perf_pmu__is_software(leader_pmu)) { 1930 group_pmu_name = leader_pmu->name; 1931 } else if (leader->core.nr_members > 1) { 1932 list_for_each_entry(pos, head, core.node) { 1933 struct perf_pmu *pos_pmu; 1934 1935 if (pos == leader || evsel__leader(pos) != leader) 1936 continue; 1937 pos_pmu = evsel__find_pmu(pos); 1938 if (!pos_pmu) { 1939 /* As with determining pmu above. */ 1940 pos_pmu = perf_pmus__scan_core(NULL); 1941 } 1942 if (pos_pmu && !perf_pmu__is_software(pos_pmu)) { 1943 group_pmu_name = pos_pmu->name; 1944 break; 1945 } 1946 } 1947 } 1948 } 1949 /* Record computed name. */ 1950 evsel->group_pmu_name = strdup(group_pmu_name); 1951 return evsel->group_pmu_name ? 0 : -ENOMEM; 1952 } 1953 1954 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs) 1955 { 1956 /* Order by insertion index. */ 1957 return lhs->core.idx - rhs->core.idx; 1958 } 1959 1960 static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r) 1961 { 1962 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node); 1963 const struct evsel *lhs = container_of(lhs_core, struct evsel, core); 1964 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node); 1965 const struct evsel *rhs = container_of(rhs_core, struct evsel, core); 1966 int *force_grouped_idx = _fg_idx; 1967 int lhs_sort_idx, rhs_sort_idx, ret; 1968 const char *lhs_pmu_name, *rhs_pmu_name; 1969 1970 /* 1971 * Get the indexes of the 2 events to sort. If the events are 1972 * in groups then the leader's index is used otherwise the 1973 * event's index is used. An index may be forced for events that 1974 * must be in the same group, namely Intel topdown events. 1975 */ 1976 if (lhs->dont_regroup) { 1977 lhs_sort_idx = lhs_core->idx; 1978 } else if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) { 1979 lhs_sort_idx = *force_grouped_idx; 1980 } else { 1981 bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1; 1982 1983 lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx; 1984 } 1985 if (rhs->dont_regroup) { 1986 rhs_sort_idx = rhs_core->idx; 1987 } else if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) { 1988 rhs_sort_idx = *force_grouped_idx; 1989 } else { 1990 bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1; 1991 1992 rhs_sort_idx = rhs_has_group ? rhs_core->leader->idx : rhs_core->idx; 1993 } 1994 1995 /* If the indices differ then respect the insertion order. */ 1996 if (lhs_sort_idx != rhs_sort_idx) 1997 return lhs_sort_idx - rhs_sort_idx; 1998 1999 /* 2000 * Ignoring forcing, lhs_sort_idx == rhs_sort_idx so lhs and rhs should 2001 * be in the same group. Events in the same group need to be ordered by 2002 * their grouping PMU name as the group will be broken to ensure only 2003 * events on the same PMU are programmed together. 2004 * 2005 * With forcing the lhs_sort_idx == rhs_sort_idx shows that one or both 2006 * events are being forced to be at force_group_index. If only one event 2007 * is being forced then the other event is the group leader of the group 2008 * we're trying to force the event into. Ensure for the force grouped 2009 * case that the PMU name ordering is also respected. 2010 */ 2011 lhs_pmu_name = lhs->group_pmu_name; 2012 rhs_pmu_name = rhs->group_pmu_name; 2013 ret = strcmp(lhs_pmu_name, rhs_pmu_name); 2014 if (ret) 2015 return ret; 2016 2017 /* 2018 * Architecture specific sorting, by default sort events in the same 2019 * group with the same PMU by their insertion index. On Intel topdown 2020 * constraints must be adhered to - slots first, etc. 2021 */ 2022 return arch_evlist__cmp(lhs, rhs); 2023 } 2024 2025 int __weak arch_evlist__add_required_events(struct list_head *list __always_unused) 2026 { 2027 return 0; 2028 } 2029 2030 static int parse_events__sort_events_and_fix_groups(struct list_head *list) 2031 { 2032 int idx = 0, force_grouped_idx = -1; 2033 struct evsel *pos, *cur_leader = NULL; 2034 struct perf_evsel *cur_leaders_grp = NULL; 2035 bool idx_changed = false; 2036 int orig_num_leaders = 0, num_leaders = 0; 2037 int ret; 2038 struct evsel *force_grouped_leader = NULL; 2039 bool last_event_was_forced_leader = false; 2040 2041 /* On x86 topdown metrics events require a slots event. */ 2042 ret = arch_evlist__add_required_events(list); 2043 if (ret) 2044 return ret; 2045 2046 /* 2047 * Compute index to insert ungrouped events at. Place them where the 2048 * first ungrouped event appears. 2049 */ 2050 list_for_each_entry(pos, list, core.node) { 2051 const struct evsel *pos_leader = evsel__leader(pos); 2052 2053 ret = evsel__compute_group_pmu_name(pos, list); 2054 if (ret) 2055 return ret; 2056 2057 if (pos == pos_leader) 2058 orig_num_leaders++; 2059 2060 /* 2061 * Ensure indexes are sequential, in particular for multiple 2062 * event lists being merged. The indexes are used to detect when 2063 * the user order is modified. 2064 */ 2065 pos->core.idx = idx++; 2066 2067 /* 2068 * Remember an index to sort all forced grouped events 2069 * together to. Use the group leader as some events 2070 * must appear first within the group. 2071 */ 2072 if (force_grouped_idx == -1 && arch_evsel__must_be_in_group(pos)) 2073 force_grouped_idx = pos_leader->core.idx; 2074 } 2075 2076 /* Sort events. */ 2077 list_sort(&force_grouped_idx, list, evlist__cmp); 2078 2079 /* 2080 * Recompute groups, splitting for PMUs and adding groups for events 2081 * that require them. 2082 */ 2083 idx = 0; 2084 list_for_each_entry(pos, list, core.node) { 2085 struct evsel *pos_leader = evsel__leader(pos); 2086 const char *pos_pmu_name = pos->group_pmu_name; 2087 const char *cur_leader_pmu_name; 2088 bool pos_force_grouped = force_grouped_idx != -1 && !pos->dont_regroup && 2089 arch_evsel__must_be_in_group(pos); 2090 2091 /* Reset index and nr_members. */ 2092 if (pos->core.idx != idx) 2093 idx_changed = true; 2094 pos->core.idx = idx++; 2095 pos->core.nr_members = 0; 2096 2097 /* 2098 * Set the group leader respecting the given groupings and that 2099 * groups can't span PMUs. 2100 */ 2101 if (!cur_leader || pos->dont_regroup) { 2102 cur_leader = pos->dont_regroup ? pos_leader : pos; 2103 cur_leaders_grp = &cur_leader->core; 2104 if (pos_force_grouped) 2105 force_grouped_leader = pos; 2106 } 2107 cur_leader_pmu_name = cur_leader->group_pmu_name; 2108 if (strcmp(cur_leader_pmu_name, pos_pmu_name)) { 2109 /* PMU changed so the group/leader must change. */ 2110 cur_leader = pos; 2111 cur_leaders_grp = pos->core.leader; 2112 if (pos_force_grouped && force_grouped_leader == NULL) 2113 force_grouped_leader = pos; 2114 } else if (cur_leaders_grp != pos->core.leader) { 2115 bool split_even_if_last_leader_was_forced = true; 2116 2117 /* 2118 * Event is for a different group. If the last event was 2119 * the forced group leader then subsequent group events 2120 * and forced events should be in the same group. If 2121 * there are no other forced group events then the 2122 * forced group leader wasn't really being forced into a 2123 * group, it just set arch_evsel__must_be_in_group, and 2124 * we don't want the group to split here. 2125 */ 2126 if (force_grouped_idx != -1 && last_event_was_forced_leader) { 2127 struct evsel *pos2 = pos; 2128 /* 2129 * Search the whole list as the group leaders 2130 * aren't currently valid. 2131 */ 2132 list_for_each_entry_continue(pos2, list, core.node) { 2133 if (pos->core.leader == pos2->core.leader && 2134 arch_evsel__must_be_in_group(pos2)) { 2135 split_even_if_last_leader_was_forced = false; 2136 break; 2137 } 2138 } 2139 } 2140 if (!last_event_was_forced_leader || split_even_if_last_leader_was_forced) { 2141 if (pos_force_grouped) { 2142 if (force_grouped_leader) { 2143 cur_leader = force_grouped_leader; 2144 cur_leaders_grp = force_grouped_leader->core.leader; 2145 } else { 2146 cur_leader = force_grouped_leader = pos; 2147 cur_leaders_grp = &pos->core; 2148 } 2149 } else { 2150 cur_leader = pos; 2151 cur_leaders_grp = pos->core.leader; 2152 } 2153 } 2154 } 2155 if (pos_leader != cur_leader) { 2156 /* The leader changed so update it. */ 2157 evsel__set_leader(pos, cur_leader); 2158 } 2159 last_event_was_forced_leader = (force_grouped_leader == pos); 2160 } 2161 list_for_each_entry(pos, list, core.node) { 2162 struct evsel *pos_leader = evsel__leader(pos); 2163 2164 if (pos == pos_leader) 2165 num_leaders++; 2166 pos_leader->core.nr_members++; 2167 } 2168 return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0; 2169 } 2170 2171 int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter, 2172 struct parse_events_error *err, bool fake_pmu, 2173 bool warn_if_reordered, bool fake_tp) 2174 { 2175 struct parse_events_state parse_state = { 2176 .list = LIST_HEAD_INIT(parse_state.list), 2177 .idx = evlist->core.nr_entries, 2178 .error = err, 2179 .stoken = PE_START_EVENTS, 2180 .fake_pmu = fake_pmu, 2181 .fake_tp = fake_tp, 2182 .pmu_filter = pmu_filter, 2183 .match_legacy_cache_terms = true, 2184 }; 2185 int ret, ret2; 2186 2187 ret = parse_events__scanner(str, &parse_state); 2188 2189 if (!ret && list_empty(&parse_state.list)) { 2190 WARN_ONCE(true, "WARNING: event parser found nothing\n"); 2191 return -1; 2192 } 2193 2194 ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list); 2195 if (ret2 < 0) 2196 return ret; 2197 2198 /* 2199 * Add list to the evlist even with errors to allow callers to clean up. 2200 */ 2201 evlist__splice_list_tail(evlist, &parse_state.list); 2202 2203 if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) { 2204 pr_warning("WARNING: events were regrouped to match PMUs\n"); 2205 2206 if (verbose > 0) { 2207 struct strbuf sb = STRBUF_INIT; 2208 2209 evlist__uniquify_evsel_names(evlist, &stat_config); 2210 evlist__format_evsels(evlist, &sb, 2048); 2211 pr_debug("evlist after sorting/fixing: '%s'\n", sb.buf); 2212 strbuf_release(&sb); 2213 } 2214 } 2215 if (!ret) { 2216 struct evsel *last; 2217 2218 last = evlist__last(evlist); 2219 last->cmdline_group_boundary = true; 2220 2221 return 0; 2222 } 2223 2224 /* 2225 * There are 2 users - builtin-record and builtin-test objects. 2226 * Both call evlist__delete in case of error, so we dont 2227 * need to bother. 2228 */ 2229 return ret; 2230 } 2231 2232 int parse_event(struct evlist *evlist, const char *str) 2233 { 2234 struct parse_events_error err; 2235 int ret; 2236 2237 parse_events_error__init(&err); 2238 ret = parse_events(evlist, str, &err); 2239 if (ret && verbose > 0) 2240 parse_events_error__print(&err, str); 2241 parse_events_error__exit(&err); 2242 return ret; 2243 } 2244 2245 struct parse_events_error_entry { 2246 /** @list: The list the error is part of. */ 2247 struct list_head list; 2248 /** @idx: index in the parsed string */ 2249 int idx; 2250 /** @str: string to display at the index */ 2251 char *str; 2252 /** @help: optional help string */ 2253 char *help; 2254 }; 2255 2256 void parse_events_error__init(struct parse_events_error *err) 2257 { 2258 INIT_LIST_HEAD(&err->list); 2259 } 2260 2261 void parse_events_error__exit(struct parse_events_error *err) 2262 { 2263 struct parse_events_error_entry *pos, *tmp; 2264 2265 list_for_each_entry_safe(pos, tmp, &err->list, list) { 2266 zfree(&pos->str); 2267 zfree(&pos->help); 2268 list_del_init(&pos->list); 2269 free(pos); 2270 } 2271 } 2272 2273 void parse_events_error__handle(struct parse_events_error *err, int idx, 2274 char *str, char *help) 2275 { 2276 struct parse_events_error_entry *entry; 2277 2278 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) 2279 goto out_free; 2280 2281 entry = zalloc(sizeof(*entry)); 2282 if (!entry) { 2283 pr_err("Failed to allocate memory for event parsing error: %s (%s)\n", 2284 str, help ?: "<no help>"); 2285 goto out_free; 2286 } 2287 entry->idx = idx; 2288 entry->str = str; 2289 entry->help = help; 2290 list_add(&entry->list, &err->list); 2291 return; 2292 out_free: 2293 free(str); 2294 free(help); 2295 } 2296 2297 #define MAX_WIDTH 1000 2298 static int get_term_width(void) 2299 { 2300 struct winsize ws; 2301 2302 get_term_dimensions(&ws); 2303 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; 2304 } 2305 2306 static void __parse_events_error__print(int err_idx, const char *err_str, 2307 const char *err_help, const char *event) 2308 { 2309 const char *str = "invalid or unsupported event: "; 2310 char _buf[MAX_WIDTH]; 2311 char *buf = (char *) event; 2312 int idx = 0; 2313 if (err_str) { 2314 /* -2 for extra '' in the final fprintf */ 2315 int width = get_term_width() - 2; 2316 int len_event = strlen(event); 2317 int len_str, max_len, cut = 0; 2318 2319 /* 2320 * Maximum error index indent, we will cut 2321 * the event string if it's bigger. 2322 */ 2323 int max_err_idx = 13; 2324 2325 /* 2326 * Let's be specific with the message when 2327 * we have the precise error. 2328 */ 2329 str = "event syntax error: "; 2330 len_str = strlen(str); 2331 max_len = width - len_str; 2332 2333 buf = _buf; 2334 2335 /* We're cutting from the beginning. */ 2336 if (err_idx > max_err_idx) 2337 cut = err_idx - max_err_idx; 2338 2339 strncpy(buf, event + cut, max_len); 2340 2341 /* Mark cut parts with '..' on both sides. */ 2342 if (cut) 2343 buf[0] = buf[1] = '.'; 2344 2345 if ((len_event - cut) > max_len) { 2346 buf[max_len - 1] = buf[max_len - 2] = '.'; 2347 buf[max_len] = 0; 2348 } 2349 2350 idx = len_str + err_idx - cut; 2351 } 2352 2353 fprintf(stderr, "%s'%s'\n", str, buf); 2354 if (idx) { 2355 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str); 2356 if (err_help) 2357 fprintf(stderr, "\n%s\n", err_help); 2358 } 2359 } 2360 2361 void parse_events_error__print(const struct parse_events_error *err, 2362 const char *event) 2363 { 2364 struct parse_events_error_entry *pos; 2365 bool first = true; 2366 2367 list_for_each_entry(pos, &err->list, list) { 2368 if (!first) 2369 fputs("\n", stderr); 2370 __parse_events_error__print(pos->idx, pos->str, pos->help, event); 2371 first = false; 2372 } 2373 } 2374 2375 /* 2376 * In the list of errors err, do any of the error strings (str) contain the 2377 * given needle string? 2378 */ 2379 bool parse_events_error__contains(const struct parse_events_error *err, 2380 const char *needle) 2381 { 2382 struct parse_events_error_entry *pos; 2383 2384 list_for_each_entry(pos, &err->list, list) { 2385 if (strstr(pos->str, needle) != NULL) 2386 return true; 2387 } 2388 return false; 2389 } 2390 2391 #undef MAX_WIDTH 2392 2393 int parse_events_option(const struct option *opt, const char *str, 2394 int unset __maybe_unused) 2395 { 2396 struct parse_events_option_args *args = opt->value; 2397 struct parse_events_error err; 2398 int ret; 2399 2400 parse_events_error__init(&err); 2401 ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err, 2402 /*fake_pmu=*/false, /*warn_if_reordered=*/true, 2403 /*fake_tp=*/false); 2404 2405 if (ret) { 2406 parse_events_error__print(&err, str); 2407 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 2408 } 2409 parse_events_error__exit(&err); 2410 2411 return ret; 2412 } 2413 2414 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset) 2415 { 2416 struct parse_events_option_args *args = opt->value; 2417 int ret; 2418 2419 if (*args->evlistp == NULL) { 2420 *args->evlistp = evlist__new(); 2421 2422 if (*args->evlistp == NULL) { 2423 fprintf(stderr, "Not enough memory to create evlist\n"); 2424 return -1; 2425 } 2426 } 2427 ret = parse_events_option(opt, str, unset); 2428 if (ret) { 2429 evlist__delete(*args->evlistp); 2430 *args->evlistp = NULL; 2431 } 2432 2433 return ret; 2434 } 2435 2436 static int 2437 foreach_evsel_in_last_glob(struct evlist *evlist, 2438 int (*func)(struct evsel *evsel, 2439 const void *arg), 2440 const void *arg) 2441 { 2442 struct evsel *last = NULL; 2443 int err; 2444 2445 /* 2446 * Don't return when list_empty, give func a chance to report 2447 * error when it found last == NULL. 2448 * 2449 * So no need to WARN here, let *func do this. 2450 */ 2451 if (evlist->core.nr_entries > 0) 2452 last = evlist__last(evlist); 2453 2454 do { 2455 err = (*func)(last, arg); 2456 if (err) 2457 return -1; 2458 if (!last) 2459 return 0; 2460 2461 if (last->core.node.prev == &evlist->core.entries) 2462 return 0; 2463 last = list_entry(last->core.node.prev, struct evsel, core.node); 2464 } while (!last->cmdline_group_boundary); 2465 2466 return 0; 2467 } 2468 2469 /* Will a tracepoint filter work for str or should a BPF filter be used? */ 2470 static bool is_possible_tp_filter(const char *str) 2471 { 2472 return strstr(str, "uid") == NULL; 2473 } 2474 2475 static int set_filter(struct evsel *evsel, const void *arg) 2476 { 2477 const char *str = arg; 2478 int nr_addr_filters = 0; 2479 struct perf_pmu *pmu; 2480 2481 if (evsel == NULL) { 2482 fprintf(stderr, 2483 "--filter option should follow a -e tracepoint or HW tracer option\n"); 2484 return -1; 2485 } 2486 2487 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && is_possible_tp_filter(str)) { 2488 if (evsel__append_tp_filter(evsel, str) < 0) { 2489 fprintf(stderr, 2490 "not enough memory to hold filter string\n"); 2491 return -1; 2492 } 2493 2494 return 0; 2495 } 2496 2497 pmu = evsel__find_pmu(evsel); 2498 if (pmu) { 2499 perf_pmu__scan_file(pmu, "nr_addr_filters", 2500 "%d", &nr_addr_filters); 2501 } 2502 if (!nr_addr_filters) 2503 return perf_bpf_filter__parse(&evsel->bpf_filters, str); 2504 2505 if (evsel__append_addr_filter(evsel, str) < 0) { 2506 fprintf(stderr, 2507 "not enough memory to hold filter string\n"); 2508 return -1; 2509 } 2510 2511 return 0; 2512 } 2513 2514 int parse_filter(const struct option *opt, const char *str, 2515 int unset __maybe_unused) 2516 { 2517 struct evlist *evlist = *(struct evlist **)opt->value; 2518 2519 return foreach_evsel_in_last_glob(evlist, set_filter, 2520 (const void *)str); 2521 } 2522 2523 int parse_uid_filter(struct evlist *evlist, uid_t uid) 2524 { 2525 struct option opt = { 2526 .value = &evlist, 2527 }; 2528 char buf[128]; 2529 int ret; 2530 2531 snprintf(buf, sizeof(buf), "uid == %d", uid); 2532 ret = parse_filter(&opt, buf, /*unset=*/0); 2533 if (ret) { 2534 if (use_browser >= 1) { 2535 /* 2536 * Use ui__warning so a pop up appears above the 2537 * underlying BPF error message. 2538 */ 2539 ui__warning("Failed to add UID filtering that uses BPF filtering.\n"); 2540 } else { 2541 fprintf(stderr, "Failed to add UID filtering that uses BPF filtering.\n"); 2542 } 2543 } 2544 return ret; 2545 } 2546 2547 static int add_exclude_perf_filter(struct evsel *evsel, 2548 const void *arg __maybe_unused) 2549 { 2550 char new_filter[64]; 2551 2552 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2553 fprintf(stderr, 2554 "--exclude-perf option should follow a -e tracepoint option\n"); 2555 return -1; 2556 } 2557 2558 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid()); 2559 2560 if (evsel__append_tp_filter(evsel, new_filter) < 0) { 2561 fprintf(stderr, 2562 "not enough memory to hold filter string\n"); 2563 return -1; 2564 } 2565 2566 return 0; 2567 } 2568 2569 int exclude_perf(const struct option *opt, 2570 const char *arg __maybe_unused, 2571 int unset __maybe_unused) 2572 { 2573 struct evlist *evlist = *(struct evlist **)opt->value; 2574 2575 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter, 2576 NULL); 2577 } 2578 2579 int parse_events__is_hardcoded_term(struct parse_events_term *term) 2580 { 2581 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 2582 } 2583 2584 static int new_term(struct parse_events_term **_term, 2585 struct parse_events_term *temp, 2586 char *str, u64 num) 2587 { 2588 struct parse_events_term *term; 2589 2590 term = malloc(sizeof(*term)); 2591 if (!term) 2592 return -ENOMEM; 2593 2594 *term = *temp; 2595 INIT_LIST_HEAD(&term->list); 2596 term->weak = false; 2597 2598 switch (term->type_val) { 2599 case PARSE_EVENTS__TERM_TYPE_NUM: 2600 term->val.num = num; 2601 break; 2602 case PARSE_EVENTS__TERM_TYPE_STR: 2603 term->val.str = str; 2604 break; 2605 default: 2606 free(term); 2607 return -EINVAL; 2608 } 2609 2610 *_term = term; 2611 return 0; 2612 } 2613 2614 int parse_events_term__num(struct parse_events_term **term, 2615 enum parse_events__term_type type_term, 2616 const char *config, u64 num, 2617 bool no_value, 2618 void *loc_term_, void *loc_val_) 2619 { 2620 YYLTYPE *loc_term = loc_term_; 2621 YYLTYPE *loc_val = loc_val_; 2622 2623 struct parse_events_term temp = { 2624 .type_val = PARSE_EVENTS__TERM_TYPE_NUM, 2625 .type_term = type_term, 2626 .config = config ? : strdup(parse_events__term_type_str(type_term)), 2627 .no_value = no_value, 2628 .err_term = loc_term ? loc_term->first_column : 0, 2629 .err_val = loc_val ? loc_val->first_column : 0, 2630 }; 2631 2632 return new_term(term, &temp, /*str=*/NULL, num); 2633 } 2634 2635 int parse_events_term__str(struct parse_events_term **term, 2636 enum parse_events__term_type type_term, 2637 char *config, char *str, 2638 void *loc_term_, void *loc_val_) 2639 { 2640 YYLTYPE *loc_term = loc_term_; 2641 YYLTYPE *loc_val = loc_val_; 2642 2643 struct parse_events_term temp = { 2644 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2645 .type_term = type_term, 2646 .config = config, 2647 .err_term = loc_term ? loc_term->first_column : 0, 2648 .err_val = loc_val ? loc_val->first_column : 0, 2649 }; 2650 2651 return new_term(term, &temp, str, /*num=*/0); 2652 } 2653 2654 int parse_events_term__term(struct parse_events_term **term, 2655 enum parse_events__term_type term_lhs, 2656 enum parse_events__term_type term_rhs, 2657 void *loc_term, void *loc_val) 2658 { 2659 return parse_events_term__str(term, term_lhs, NULL, 2660 strdup(parse_events__term_type_str(term_rhs)), 2661 loc_term, loc_val); 2662 } 2663 2664 int parse_events_term__clone(struct parse_events_term **new, 2665 const struct parse_events_term *term) 2666 { 2667 char *str; 2668 struct parse_events_term temp = *term; 2669 2670 temp.used = false; 2671 if (term->config) { 2672 temp.config = strdup(term->config); 2673 if (!temp.config) 2674 return -ENOMEM; 2675 } 2676 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2677 return new_term(new, &temp, /*str=*/NULL, term->val.num); 2678 2679 str = strdup(term->val.str); 2680 if (!str) { 2681 zfree(&temp.config); 2682 return -ENOMEM; 2683 } 2684 return new_term(new, &temp, str, /*num=*/0); 2685 } 2686 2687 void parse_events_term__delete(struct parse_events_term *term) 2688 { 2689 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) 2690 zfree(&term->val.str); 2691 2692 zfree(&term->config); 2693 free(term); 2694 } 2695 2696 static int parse_events_terms__copy(const struct parse_events_terms *src, 2697 struct parse_events_terms *dest) 2698 { 2699 struct parse_events_term *term; 2700 2701 list_for_each_entry (term, &src->terms, list) { 2702 struct parse_events_term *n; 2703 int ret; 2704 2705 ret = parse_events_term__clone(&n, term); 2706 if (ret) 2707 return ret; 2708 2709 list_add_tail(&n->list, &dest->terms); 2710 } 2711 return 0; 2712 } 2713 2714 void parse_events_terms__init(struct parse_events_terms *terms) 2715 { 2716 INIT_LIST_HEAD(&terms->terms); 2717 } 2718 2719 void parse_events_terms__exit(struct parse_events_terms *terms) 2720 { 2721 struct parse_events_term *term, *h; 2722 2723 list_for_each_entry_safe(term, h, &terms->terms, list) { 2724 list_del_init(&term->list); 2725 parse_events_term__delete(term); 2726 } 2727 } 2728 2729 void parse_events_terms__delete(struct parse_events_terms *terms) 2730 { 2731 if (!terms) 2732 return; 2733 parse_events_terms__exit(terms); 2734 free(terms); 2735 } 2736 2737 static int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb) 2738 { 2739 struct parse_events_term *term; 2740 bool first = true; 2741 2742 if (!terms) 2743 return 0; 2744 2745 list_for_each_entry(term, &terms->terms, list) { 2746 int ret; 2747 2748 if (!first) { 2749 ret = strbuf_addch(sb, ','); 2750 if (ret < 0) 2751 return ret; 2752 } 2753 first = false; 2754 2755 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2756 if (term->no_value) { 2757 assert(term->val.num == 1); 2758 ret = strbuf_addf(sb, "%s", term->config); 2759 } else 2760 ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num); 2761 else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) { 2762 if (term->config) { 2763 ret = strbuf_addf(sb, "%s=", term->config); 2764 if (ret < 0) 2765 return ret; 2766 } else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) { 2767 ret = strbuf_addf(sb, "%s=", 2768 parse_events__term_type_str(term->type_term)); 2769 if (ret < 0) 2770 return ret; 2771 } 2772 assert(!term->no_value); 2773 ret = strbuf_addf(sb, "%s", term->val.str); 2774 } 2775 if (ret < 0) 2776 return ret; 2777 } 2778 return 0; 2779 } 2780 2781 static void config_terms_list(char *buf, size_t buf_sz) 2782 { 2783 int i; 2784 bool first = true; 2785 2786 buf[0] = '\0'; 2787 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) { 2788 const char *name = parse_events__term_type_str(i); 2789 2790 if (!config_term_avail(i, NULL)) 2791 continue; 2792 if (!name) 2793 continue; 2794 if (name[0] == '<') 2795 continue; 2796 2797 if (strlen(buf) + strlen(name) + 2 >= buf_sz) 2798 return; 2799 2800 if (!first) 2801 strcat(buf, ","); 2802 else 2803 first = false; 2804 strcat(buf, name); 2805 } 2806 } 2807 2808 /* 2809 * Return string contains valid config terms of an event. 2810 * @additional_terms: For terms such as PMU sysfs terms. 2811 */ 2812 char *parse_events_formats_error_string(char *additional_terms) 2813 { 2814 char *str; 2815 /* "no-overwrite" is the longest name */ 2816 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR * 2817 (sizeof("no-overwrite") - 1)]; 2818 2819 config_terms_list(static_terms, sizeof(static_terms)); 2820 /* valid terms */ 2821 if (additional_terms) { 2822 if (asprintf(&str, "valid terms: %s,%s", 2823 additional_terms, static_terms) < 0) 2824 goto fail; 2825 } else { 2826 if (asprintf(&str, "valid terms: %s", static_terms) < 0) 2827 goto fail; 2828 } 2829 return str; 2830 2831 fail: 2832 return NULL; 2833 } 2834