1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/hw_breakpoint.h> 3 #include <linux/err.h> 4 #include <linux/list_sort.h> 5 #include <linux/zalloc.h> 6 #include <dirent.h> 7 #include <errno.h> 8 #include <sys/ioctl.h> 9 #include <sys/param.h> 10 #include "cpumap.h" 11 #include "term.h" 12 #include "env.h" 13 #include "evlist.h" 14 #include "evsel.h" 15 #include <subcmd/parse-options.h> 16 #include "parse-events.h" 17 #include "string2.h" 18 #include "strbuf.h" 19 #include "debug.h" 20 #include <perf/cpumap.h> 21 #include <util/parse-events-bison.h> 22 #include <util/parse-events-flex.h> 23 #include "pmu.h" 24 #include "pmus.h" 25 #include "tp_pmu.h" 26 #include "asm/bug.h" 27 #include "ui/ui.h" 28 #include "util/parse-branch-options.h" 29 #include "util/evsel_config.h" 30 #include "util/event.h" 31 #include "util/bpf-filter.h" 32 #include "util/stat.h" 33 #include "util/tool_pmu.h" 34 #include "util/util.h" 35 #include "tracepoint.h" 36 #include <api/fs/tracing_path.h> 37 38 #define MAX_NAME_LEN 100 39 40 static int get_config_terms(const struct parse_events_terms *head_config, 41 struct list_head *head_terms); 42 static int parse_events_terms__copy(const struct parse_events_terms *src, 43 struct parse_events_terms *dest); 44 static int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb); 45 46 static const char *const event_types[] = { 47 [PERF_TYPE_HARDWARE] = "hardware", 48 [PERF_TYPE_SOFTWARE] = "software", 49 [PERF_TYPE_TRACEPOINT] = "tracepoint", 50 [PERF_TYPE_HW_CACHE] = "hardware-cache", 51 [PERF_TYPE_RAW] = "raw", 52 [PERF_TYPE_BREAKPOINT] = "breakpoint", 53 }; 54 55 const char *event_type(size_t type) 56 { 57 if (type >= PERF_TYPE_MAX) 58 return "unknown"; 59 60 return event_types[type]; 61 } 62 63 static char *get_config_str(const struct parse_events_terms *head_terms, 64 enum parse_events__term_type type_term) 65 { 66 struct parse_events_term *term; 67 68 if (!head_terms) 69 return NULL; 70 71 list_for_each_entry(term, &head_terms->terms, list) 72 if (term->type_term == type_term) 73 return term->val.str; 74 75 return NULL; 76 } 77 78 static char *get_config_metric_id(const struct parse_events_terms *head_terms) 79 { 80 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID); 81 } 82 83 static char *get_config_name(const struct parse_events_terms *head_terms) 84 { 85 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME); 86 } 87 88 static struct perf_cpu_map *get_config_cpu(const struct parse_events_terms *head_terms, 89 bool fake_pmu) 90 { 91 struct parse_events_term *term; 92 struct perf_cpu_map *cpus = NULL; 93 94 if (!head_terms) 95 return NULL; 96 97 list_for_each_entry(term, &head_terms->terms, list) { 98 struct perf_cpu_map *term_cpus; 99 100 if (term->type_term != PARSE_EVENTS__TERM_TYPE_CPU) 101 continue; 102 103 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) { 104 term_cpus = perf_cpu_map__new_int(term->val.num); 105 } else { 106 struct perf_pmu *pmu = perf_pmus__find(term->val.str); 107 108 if (pmu) { 109 term_cpus = pmu->is_core && perf_cpu_map__is_empty(pmu->cpus) 110 ? cpu_map__online() 111 : perf_cpu_map__get(pmu->cpus); 112 } else { 113 term_cpus = perf_cpu_map__new(term->val.str); 114 if (!term_cpus && fake_pmu) { 115 /* 116 * Assume the PMU string makes sense on a different 117 * machine and fake a value with all online CPUs. 118 */ 119 term_cpus = cpu_map__online(); 120 } 121 } 122 } 123 perf_cpu_map__merge(&cpus, term_cpus); 124 perf_cpu_map__put(term_cpus); 125 } 126 127 return cpus; 128 } 129 130 /** 131 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that 132 * matches the raw's string value. If the string value matches an 133 * event then change the term to be an event, if not then change it to 134 * be a config term. For example, "read" may be an event of the PMU or 135 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of 136 * the event can be determined and we don't need to scan all PMUs 137 * ahead-of-time. 138 * @config_terms: the list of terms that may contain a raw term. 139 * @pmu: the PMU to scan for events from. 140 */ 141 static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu) 142 { 143 struct parse_events_term *term; 144 145 list_for_each_entry(term, &config_terms->terms, list) { 146 u64 num; 147 148 if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW) 149 continue; 150 151 if (perf_pmu__have_event(pmu, term->val.str)) { 152 zfree(&term->config); 153 term->config = term->val.str; 154 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 155 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 156 term->val.num = 1; 157 term->no_value = true; 158 continue; 159 } 160 161 zfree(&term->config); 162 term->config = strdup("config"); 163 errno = 0; 164 num = strtoull(term->val.str + 1, NULL, 16); 165 assert(errno == 0); 166 free(term->val.str); 167 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 168 term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG; 169 term->val.num = num; 170 term->no_value = false; 171 } 172 } 173 174 static struct evsel * 175 __add_event(struct list_head *list, int *idx, 176 struct perf_event_attr *attr, 177 bool init_attr, 178 const char *name, const char *metric_id, struct perf_pmu *pmu, 179 struct list_head *config_terms, struct evsel *first_wildcard_match, 180 struct perf_cpu_map *user_cpus, u64 alternate_hw_config) 181 { 182 struct evsel *evsel; 183 bool is_pmu_core; 184 struct perf_cpu_map *cpus, *pmu_cpus; 185 bool has_user_cpus = !perf_cpu_map__is_empty(user_cpus); 186 187 /* 188 * Ensure the first_wildcard_match's PMU matches that of the new event 189 * being added. Otherwise try to match with another event further down 190 * the evlist. 191 */ 192 if (first_wildcard_match) { 193 struct evsel *pos = list_prev_entry(first_wildcard_match, core.node); 194 195 first_wildcard_match = NULL; 196 list_for_each_entry_continue(pos, list, core.node) { 197 if (perf_pmu__name_no_suffix_match(pos->pmu, pmu->name)) { 198 first_wildcard_match = pos; 199 break; 200 } 201 if (pos->pmu->is_core && (!pmu || pmu->is_core)) { 202 first_wildcard_match = pos; 203 break; 204 } 205 } 206 } 207 208 if (pmu) { 209 perf_pmu__warn_invalid_formats(pmu); 210 if (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX) { 211 perf_pmu__warn_invalid_config(pmu, attr->config, name, 212 PERF_PMU_FORMAT_VALUE_CONFIG, "config"); 213 perf_pmu__warn_invalid_config(pmu, attr->config1, name, 214 PERF_PMU_FORMAT_VALUE_CONFIG1, "config1"); 215 perf_pmu__warn_invalid_config(pmu, attr->config2, name, 216 PERF_PMU_FORMAT_VALUE_CONFIG2, "config2"); 217 perf_pmu__warn_invalid_config(pmu, attr->config3, name, 218 PERF_PMU_FORMAT_VALUE_CONFIG3, "config3"); 219 } 220 } 221 /* 222 * If a PMU wasn't given, such as for legacy events, find now that 223 * warnings won't be generated. 224 */ 225 if (!pmu) 226 pmu = perf_pmus__find_by_attr(attr); 227 228 if (pmu) { 229 is_pmu_core = pmu->is_core; 230 pmu_cpus = perf_cpu_map__get(pmu->cpus); 231 if (perf_cpu_map__is_empty(pmu_cpus)) { 232 if (perf_pmu__is_tool(pmu)) 233 pmu_cpus = tool_pmu__cpus(attr); 234 else 235 pmu_cpus = cpu_map__online(); 236 } 237 } else { 238 is_pmu_core = (attr->type == PERF_TYPE_HARDWARE || 239 attr->type == PERF_TYPE_HW_CACHE); 240 pmu_cpus = is_pmu_core ? cpu_map__online() : NULL; 241 } 242 243 if (has_user_cpus) 244 cpus = perf_cpu_map__get(user_cpus); 245 else 246 cpus = perf_cpu_map__get(pmu_cpus); 247 248 if (init_attr) 249 event_attr_init(attr); 250 251 evsel = evsel__new_idx(attr, *idx); 252 if (!evsel) 253 goto out_err; 254 255 if (name) { 256 evsel->name = strdup(name); 257 if (!evsel->name) 258 goto out_err; 259 } 260 261 if (metric_id) { 262 evsel->metric_id = strdup(metric_id); 263 if (!evsel->metric_id) 264 goto out_err; 265 } 266 267 (*idx)++; 268 evsel->core.cpus = cpus; 269 evsel->core.pmu_cpus = pmu_cpus; 270 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false; 271 evsel->core.is_pmu_core = is_pmu_core; 272 evsel->pmu = pmu; 273 evsel->alternate_hw_config = alternate_hw_config; 274 evsel->first_wildcard_match = first_wildcard_match; 275 276 if (config_terms) 277 list_splice_init(config_terms, &evsel->config_terms); 278 279 if (list) 280 list_add_tail(&evsel->core.node, list); 281 282 if (has_user_cpus) 283 evsel__warn_user_requested_cpus(evsel, user_cpus); 284 285 return evsel; 286 out_err: 287 perf_cpu_map__put(cpus); 288 perf_cpu_map__put(pmu_cpus); 289 zfree(&evsel->name); 290 zfree(&evsel->metric_id); 291 free(evsel); 292 return NULL; 293 } 294 295 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr, 296 const char *name, const char *metric_id, 297 struct perf_pmu *pmu) 298 { 299 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name, 300 metric_id, pmu, /*config_terms=*/NULL, 301 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL, 302 /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 303 } 304 305 static int add_event(struct list_head *list, int *idx, 306 struct perf_event_attr *attr, const char *name, 307 const char *metric_id, struct list_head *config_terms, 308 u64 alternate_hw_config) 309 { 310 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id, 311 /*pmu=*/NULL, config_terms, 312 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL, 313 alternate_hw_config) ? 0 : -ENOMEM; 314 } 315 316 /** 317 * parse_aliases - search names for entries beginning or equalling str ignoring 318 * case. If mutliple entries in names match str then the longest 319 * is chosen. 320 * @str: The needle to look for. 321 * @names: The haystack to search. 322 * @size: The size of the haystack. 323 * @longest: Out argument giving the length of the matching entry. 324 */ 325 static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size, 326 int *longest) 327 { 328 *longest = -1; 329 for (int i = 0; i < size; i++) { 330 for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) { 331 int n = strlen(names[i][j]); 332 333 if (n > *longest && !strncasecmp(str, names[i][j], n)) 334 *longest = n; 335 } 336 if (*longest > 0) 337 return i; 338 } 339 340 return -1; 341 } 342 343 typedef int config_term_func_t(struct perf_event_attr *attr, 344 struct parse_events_term *term, 345 struct parse_events_state *parse_state); 346 static int config_term_common(struct perf_event_attr *attr, 347 struct parse_events_term *term, 348 struct parse_events_state *parse_state); 349 static int config_attr(struct perf_event_attr *attr, 350 const struct parse_events_terms *head, 351 struct parse_events_state *parse_state, 352 config_term_func_t config_term); 353 354 /** 355 * parse_events__decode_legacy_cache - Search name for the legacy cache event 356 * name composed of 1, 2 or 3 hyphen 357 * separated sections. The first section is 358 * the cache type while the others are the 359 * optional op and optional result. To make 360 * life hard the names in the table also 361 * contain hyphens and the longest name 362 * should always be selected. 363 */ 364 int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config) 365 { 366 int len, cache_type = -1, cache_op = -1, cache_result = -1; 367 const char *name_end = &name[strlen(name) + 1]; 368 const char *str = name; 369 370 cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len); 371 if (cache_type == -1) 372 return -EINVAL; 373 str += len + 1; 374 375 if (str < name_end) { 376 cache_op = parse_aliases(str, evsel__hw_cache_op, 377 PERF_COUNT_HW_CACHE_OP_MAX, &len); 378 if (cache_op >= 0) { 379 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 380 return -EINVAL; 381 str += len + 1; 382 } else { 383 cache_result = parse_aliases(str, evsel__hw_cache_result, 384 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 385 if (cache_result >= 0) 386 str += len + 1; 387 } 388 } 389 if (str < name_end) { 390 if (cache_op < 0) { 391 cache_op = parse_aliases(str, evsel__hw_cache_op, 392 PERF_COUNT_HW_CACHE_OP_MAX, &len); 393 if (cache_op >= 0) { 394 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 395 return -EINVAL; 396 } 397 } else if (cache_result < 0) { 398 cache_result = parse_aliases(str, evsel__hw_cache_result, 399 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 400 } 401 } 402 403 /* 404 * Fall back to reads: 405 */ 406 if (cache_op == -1) 407 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 408 409 /* 410 * Fall back to accesses: 411 */ 412 if (cache_result == -1) 413 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 414 415 *config = cache_type | (cache_op << 8) | (cache_result << 16); 416 if (perf_pmus__supports_extended_type()) 417 *config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT; 418 return 0; 419 } 420 421 /** 422 * parse_events__filter_pmu - returns false if a wildcard PMU should be 423 * considered, true if it should be filtered. 424 */ 425 bool parse_events__filter_pmu(const struct parse_events_state *parse_state, 426 const struct perf_pmu *pmu) 427 { 428 if (parse_state->pmu_filter == NULL) 429 return false; 430 431 return strcmp(parse_state->pmu_filter, pmu->name) != 0; 432 } 433 434 static int parse_events_add_pmu(struct parse_events_state *parse_state, 435 struct list_head *list, struct perf_pmu *pmu, 436 const struct parse_events_terms *const_parsed_terms, 437 struct evsel *first_wildcard_match); 438 439 static void tracepoint_error(struct parse_events_error *e, int err, 440 const char *sys, const char *name, int column) 441 { 442 const char *str; 443 char help[BUFSIZ]; 444 445 if (!e) 446 return; 447 448 /* 449 * We get error directly from syscall errno ( > 0), 450 * or from encoded pointer's error ( < 0). 451 */ 452 err = abs(err); 453 454 switch (err) { 455 case EACCES: 456 str = "can't access trace events"; 457 break; 458 case ENOENT: 459 str = "unknown tracepoint"; 460 break; 461 default: 462 str = "failed to add tracepoint"; 463 break; 464 } 465 466 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name); 467 parse_events_error__handle(e, column, strdup(str), strdup(help)); 468 } 469 470 static int add_tracepoint(struct parse_events_state *parse_state, 471 struct list_head *list, 472 const char *sys_name, const char *evt_name, 473 struct parse_events_error *err, 474 struct parse_events_terms *head_config, void *loc_) 475 { 476 YYLTYPE *loc = loc_; 477 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++, 478 !parse_state->fake_tp); 479 480 if (IS_ERR(evsel)) { 481 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column); 482 return PTR_ERR(evsel); 483 } 484 485 if (head_config) { 486 LIST_HEAD(config_terms); 487 488 if (get_config_terms(head_config, &config_terms)) 489 return -ENOMEM; 490 list_splice(&config_terms, &evsel->config_terms); 491 } 492 493 list_add_tail(&evsel->core.node, list); 494 return 0; 495 } 496 497 struct add_tracepoint_multi_args { 498 struct parse_events_state *parse_state; 499 struct list_head *list; 500 const char *sys_glob; 501 const char *evt_glob; 502 struct parse_events_error *err; 503 struct parse_events_terms *head_config; 504 YYLTYPE *loc; 505 int found; 506 }; 507 508 static int add_tracepoint_multi_event_cb(void *state, const char *sys_name, const char *evt_name) 509 { 510 struct add_tracepoint_multi_args *args = state; 511 int ret; 512 513 if (!strglobmatch(evt_name, args->evt_glob)) 514 return 0; 515 516 args->found++; 517 ret = add_tracepoint(args->parse_state, args->list, sys_name, evt_name, 518 args->err, args->head_config, args->loc); 519 520 return ret; 521 } 522 523 static int add_tracepoint_multi_event(struct add_tracepoint_multi_args *args, const char *sys_name) 524 { 525 if (strpbrk(args->evt_glob, "*?") == NULL) { 526 /* Not a glob. */ 527 args->found++; 528 return add_tracepoint(args->parse_state, args->list, sys_name, args->evt_glob, 529 args->err, args->head_config, args->loc); 530 } 531 532 return tp_pmu__for_each_tp_event(sys_name, args, add_tracepoint_multi_event_cb); 533 } 534 535 static int add_tracepoint_multi_sys_cb(void *state, const char *sys_name) 536 { 537 struct add_tracepoint_multi_args *args = state; 538 539 if (!strglobmatch(sys_name, args->sys_glob)) 540 return 0; 541 542 return add_tracepoint_multi_event(args, sys_name); 543 } 544 545 static int add_tracepoint_multi_sys(struct parse_events_state *parse_state, 546 struct list_head *list, 547 const char *sys_glob, const char *evt_glob, 548 struct parse_events_error *err, 549 struct parse_events_terms *head_config, YYLTYPE *loc) 550 { 551 struct add_tracepoint_multi_args args = { 552 .parse_state = parse_state, 553 .list = list, 554 .sys_glob = sys_glob, 555 .evt_glob = evt_glob, 556 .err = err, 557 .head_config = head_config, 558 .loc = loc, 559 .found = 0, 560 }; 561 int ret; 562 563 if (strpbrk(sys_glob, "*?") == NULL) { 564 /* Not a glob. */ 565 ret = add_tracepoint_multi_event(&args, sys_glob); 566 } else { 567 ret = tp_pmu__for_each_tp_sys(&args, add_tracepoint_multi_sys_cb); 568 } 569 if (args.found == 0) { 570 tracepoint_error(err, ENOENT, sys_glob, evt_glob, loc->first_column); 571 return -ENOENT; 572 } 573 return ret; 574 } 575 576 size_t default_breakpoint_len(void) 577 { 578 #if defined(__i386__) 579 static int len; 580 581 if (len == 0) { 582 struct perf_env env = {}; 583 584 perf_env__init(&env); 585 len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long); 586 perf_env__exit(&env); 587 } 588 return len; 589 #elif defined(__aarch64__) 590 return 4; 591 #else 592 return sizeof(long); 593 #endif 594 } 595 596 static int 597 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 598 { 599 int i; 600 601 for (i = 0; i < 3; i++) { 602 if (!type || !type[i]) 603 break; 604 605 #define CHECK_SET_TYPE(bit) \ 606 do { \ 607 if (attr->bp_type & bit) \ 608 return -EINVAL; \ 609 else \ 610 attr->bp_type |= bit; \ 611 } while (0) 612 613 switch (type[i]) { 614 case 'r': 615 CHECK_SET_TYPE(HW_BREAKPOINT_R); 616 break; 617 case 'w': 618 CHECK_SET_TYPE(HW_BREAKPOINT_W); 619 break; 620 case 'x': 621 CHECK_SET_TYPE(HW_BREAKPOINT_X); 622 break; 623 default: 624 return -EINVAL; 625 } 626 } 627 628 #undef CHECK_SET_TYPE 629 630 if (!attr->bp_type) /* Default */ 631 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 632 633 return 0; 634 } 635 636 int parse_events_add_breakpoint(struct parse_events_state *parse_state, 637 struct list_head *list, 638 u64 addr, char *type, u64 len, 639 struct parse_events_terms *head_config) 640 { 641 struct perf_event_attr attr; 642 LIST_HEAD(config_terms); 643 const char *name; 644 645 memset(&attr, 0, sizeof(attr)); 646 attr.bp_addr = addr; 647 648 if (parse_breakpoint_type(type, &attr)) 649 return -EINVAL; 650 651 /* Provide some defaults if len is not specified */ 652 if (!len) { 653 if (attr.bp_type == HW_BREAKPOINT_X) 654 len = default_breakpoint_len(); 655 else 656 len = HW_BREAKPOINT_LEN_4; 657 } 658 659 attr.bp_len = len; 660 661 attr.type = PERF_TYPE_BREAKPOINT; 662 attr.sample_period = 1; 663 664 if (head_config) { 665 if (config_attr(&attr, head_config, parse_state, config_term_common)) 666 return -EINVAL; 667 668 if (get_config_terms(head_config, &config_terms)) 669 return -ENOMEM; 670 } 671 672 name = get_config_name(head_config); 673 674 return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL, 675 &config_terms, /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 676 } 677 678 static int check_type_val(struct parse_events_term *term, 679 struct parse_events_error *err, 680 enum parse_events__term_val_type type) 681 { 682 if (type == term->type_val) 683 return 0; 684 685 if (err) { 686 parse_events_error__handle(err, term->err_val, 687 type == PARSE_EVENTS__TERM_TYPE_NUM 688 ? strdup("expected numeric value") 689 : strdup("expected string value"), 690 NULL); 691 } 692 return -EINVAL; 693 } 694 695 static bool config_term_shrinked; 696 697 const char *parse_events__term_type_str(enum parse_events__term_type term_type) 698 { 699 /* 700 * Update according to parse-events.l 701 */ 702 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { 703 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>", 704 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config", 705 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1", 706 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2", 707 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3", 708 [PARSE_EVENTS__TERM_TYPE_NAME] = "name", 709 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period", 710 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq", 711 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type", 712 [PARSE_EVENTS__TERM_TYPE_TIME] = "time", 713 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph", 714 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size", 715 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", 716 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", 717 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack", 718 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr", 719 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite", 720 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite", 721 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config", 722 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore", 723 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output", 724 [PARSE_EVENTS__TERM_TYPE_AUX_ACTION] = "aux-action", 725 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size", 726 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id", 727 [PARSE_EVENTS__TERM_TYPE_RAW] = "raw", 728 [PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG] = "legacy-hardware-config", 729 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG] = "legacy-cache-config", 730 [PARSE_EVENTS__TERM_TYPE_CPU] = "cpu", 731 [PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV] = "ratio-to-prev", 732 }; 733 if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR) 734 return "unknown term"; 735 736 return config_term_names[term_type]; 737 } 738 739 static bool 740 config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err) 741 { 742 char *err_str; 743 744 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) { 745 parse_events_error__handle(err, -1, 746 strdup("Invalid term_type"), NULL); 747 return false; 748 } 749 if (!config_term_shrinked) 750 return true; 751 752 switch (term_type) { 753 case PARSE_EVENTS__TERM_TYPE_CONFIG: 754 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 755 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 756 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 757 case PARSE_EVENTS__TERM_TYPE_NAME: 758 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 759 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 760 case PARSE_EVENTS__TERM_TYPE_PERCORE: 761 case PARSE_EVENTS__TERM_TYPE_CPU: 762 return true; 763 case PARSE_EVENTS__TERM_TYPE_USER: 764 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 765 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 766 case PARSE_EVENTS__TERM_TYPE_TIME: 767 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 768 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 769 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 770 case PARSE_EVENTS__TERM_TYPE_INHERIT: 771 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 772 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 773 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 774 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 775 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 776 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 777 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 778 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 779 case PARSE_EVENTS__TERM_TYPE_RAW: 780 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV: 781 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG: 782 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG: 783 default: 784 if (!err) 785 return false; 786 787 /* term_type is validated so indexing is safe */ 788 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'", 789 parse_events__term_type_str(term_type)) >= 0) 790 parse_events_error__handle(err, -1, err_str, NULL); 791 return false; 792 } 793 } 794 795 void parse_events__shrink_config_terms(void) 796 { 797 config_term_shrinked = true; 798 } 799 800 static int config_term_common(struct perf_event_attr *attr, 801 struct parse_events_term *term, 802 struct parse_events_state *parse_state) 803 { 804 #define CHECK_TYPE_VAL(type) \ 805 do { \ 806 if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_ ## type)) \ 807 return -EINVAL; \ 808 } while (0) 809 810 switch (term->type_term) { 811 case PARSE_EVENTS__TERM_TYPE_CONFIG: 812 CHECK_TYPE_VAL(NUM); 813 attr->config = term->val.num; 814 break; 815 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 816 CHECK_TYPE_VAL(NUM); 817 attr->config1 = term->val.num; 818 break; 819 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 820 CHECK_TYPE_VAL(NUM); 821 attr->config2 = term->val.num; 822 break; 823 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 824 CHECK_TYPE_VAL(NUM); 825 attr->config3 = term->val.num; 826 break; 827 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 828 CHECK_TYPE_VAL(NUM); 829 break; 830 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 831 CHECK_TYPE_VAL(NUM); 832 break; 833 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 834 CHECK_TYPE_VAL(STR); 835 if (strcmp(term->val.str, "no") && 836 parse_branch_str(term->val.str, 837 &attr->branch_sample_type)) { 838 parse_events_error__handle(parse_state->error, term->err_val, 839 strdup("invalid branch sample type"), 840 NULL); 841 return -EINVAL; 842 } 843 break; 844 case PARSE_EVENTS__TERM_TYPE_TIME: 845 CHECK_TYPE_VAL(NUM); 846 if (term->val.num > 1) { 847 parse_events_error__handle(parse_state->error, term->err_val, 848 strdup("expected 0 or 1"), 849 NULL); 850 return -EINVAL; 851 } 852 break; 853 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 854 CHECK_TYPE_VAL(STR); 855 break; 856 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 857 CHECK_TYPE_VAL(NUM); 858 break; 859 case PARSE_EVENTS__TERM_TYPE_INHERIT: 860 CHECK_TYPE_VAL(NUM); 861 break; 862 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 863 CHECK_TYPE_VAL(NUM); 864 break; 865 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 866 CHECK_TYPE_VAL(NUM); 867 break; 868 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 869 CHECK_TYPE_VAL(NUM); 870 break; 871 case PARSE_EVENTS__TERM_TYPE_NAME: 872 CHECK_TYPE_VAL(STR); 873 break; 874 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 875 CHECK_TYPE_VAL(STR); 876 break; 877 case PARSE_EVENTS__TERM_TYPE_RAW: 878 CHECK_TYPE_VAL(STR); 879 break; 880 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 881 CHECK_TYPE_VAL(NUM); 882 break; 883 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 884 CHECK_TYPE_VAL(NUM); 885 break; 886 case PARSE_EVENTS__TERM_TYPE_PERCORE: 887 CHECK_TYPE_VAL(NUM); 888 if ((unsigned int)term->val.num > 1) { 889 parse_events_error__handle(parse_state->error, term->err_val, 890 strdup("expected 0 or 1"), 891 NULL); 892 return -EINVAL; 893 } 894 break; 895 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 896 CHECK_TYPE_VAL(NUM); 897 break; 898 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 899 CHECK_TYPE_VAL(STR); 900 break; 901 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 902 CHECK_TYPE_VAL(NUM); 903 if (term->val.num > UINT_MAX) { 904 parse_events_error__handle(parse_state->error, term->err_val, 905 strdup("too big"), 906 NULL); 907 return -EINVAL; 908 } 909 break; 910 case PARSE_EVENTS__TERM_TYPE_CPU: { 911 struct perf_cpu_map *map; 912 913 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) { 914 if (term->val.num >= (u64)cpu__max_present_cpu().cpu) { 915 parse_events_error__handle(parse_state->error, term->err_val, 916 strdup("too big"), 917 /*help=*/NULL); 918 return -EINVAL; 919 } 920 break; 921 } 922 assert(term->type_val == PARSE_EVENTS__TERM_TYPE_STR); 923 if (perf_pmus__find(term->val.str) != NULL) 924 break; 925 926 map = perf_cpu_map__new(term->val.str); 927 if (!map && !parse_state->fake_pmu) { 928 parse_events_error__handle(parse_state->error, term->err_val, 929 strdup("not a valid PMU or CPU number"), 930 /*help=*/NULL); 931 return -EINVAL; 932 } 933 perf_cpu_map__put(map); 934 break; 935 } 936 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV: 937 CHECK_TYPE_VAL(STR); 938 if (strtod(term->val.str, NULL) <= 0) { 939 parse_events_error__handle(parse_state->error, term->err_val, 940 strdup("zero or negative"), 941 NULL); 942 return -EINVAL; 943 } 944 if (errno == ERANGE) { 945 parse_events_error__handle(parse_state->error, term->err_val, 946 strdup("too big"), 947 NULL); 948 return -EINVAL; 949 } 950 break; 951 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 952 case PARSE_EVENTS__TERM_TYPE_USER: 953 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG: 954 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG: 955 default: 956 parse_events_error__handle(parse_state->error, term->err_term, 957 strdup(parse_events__term_type_str(term->type_term)), 958 parse_events_formats_error_string(NULL)); 959 return -EINVAL; 960 } 961 962 /* 963 * Check term availability after basic checking so 964 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered. 965 * 966 * If check availability at the entry of this function, 967 * user will see "'<sysfs term>' is not usable in 'perf stat'" 968 * if an invalid config term is provided for legacy events 969 * (for example, instructions/badterm/...), which is confusing. 970 */ 971 if (!config_term_avail(term->type_term, parse_state->error)) 972 return -EINVAL; 973 return 0; 974 #undef CHECK_TYPE_VAL 975 } 976 977 static bool check_pmu_is_core(__u32 type, const struct parse_events_term *term, 978 struct parse_events_error *err) 979 { 980 struct perf_pmu *pmu = NULL; 981 982 /* Avoid loading all PMUs with perf_pmus__find_by_type, just scan the core ones. */ 983 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 984 if (pmu->type == type) 985 return true; 986 } 987 parse_events_error__handle(err, term->err_val, 988 strdup("needs a core PMU"), 989 NULL); 990 return false; 991 } 992 993 static int config_term_pmu(struct perf_event_attr *attr, 994 struct parse_events_term *term, 995 struct parse_events_state *parse_state) 996 { 997 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG) { 998 if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_NUM)) 999 return -EINVAL; 1000 if (term->val.num >= PERF_COUNT_HW_MAX) { 1001 parse_events_error__handle(parse_state->error, term->err_val, 1002 strdup("too big"), 1003 NULL); 1004 return -EINVAL; 1005 } 1006 if (!check_pmu_is_core(attr->type, term, parse_state->error)) 1007 return -EINVAL; 1008 attr->config = term->val.num; 1009 if (perf_pmus__supports_extended_type()) 1010 attr->config |= (__u64)attr->type << PERF_PMU_TYPE_SHIFT; 1011 attr->type = PERF_TYPE_HARDWARE; 1012 return 0; 1013 } 1014 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG) { 1015 int cache_type, cache_op, cache_result; 1016 1017 if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_NUM)) 1018 return -EINVAL; 1019 cache_type = term->val.num & 0xFF; 1020 cache_op = (term->val.num >> 8) & 0xFF; 1021 cache_result = (term->val.num >> 16) & 0xFF; 1022 if ((term->val.num & ~0xFFFFFF) || 1023 cache_type >= PERF_COUNT_HW_CACHE_MAX || 1024 cache_op >= PERF_COUNT_HW_CACHE_OP_MAX || 1025 cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) { 1026 parse_events_error__handle(parse_state->error, term->err_val, 1027 strdup("too big"), 1028 NULL); 1029 return -EINVAL; 1030 } 1031 if (!check_pmu_is_core(attr->type, term, parse_state->error)) 1032 return -EINVAL; 1033 attr->config = term->val.num; 1034 if (perf_pmus__supports_extended_type()) 1035 attr->config |= (__u64)attr->type << PERF_PMU_TYPE_SHIFT; 1036 attr->type = PERF_TYPE_HW_CACHE; 1037 return 0; 1038 } 1039 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || 1040 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) { 1041 /* 1042 * Always succeed for sysfs terms, as we dont know 1043 * at this point what type they need to have. 1044 */ 1045 return 0; 1046 } 1047 return config_term_common(attr, term, parse_state); 1048 } 1049 1050 static int config_term_tracepoint(struct perf_event_attr *attr, 1051 struct parse_events_term *term, 1052 struct parse_events_state *parse_state) 1053 { 1054 switch (term->type_term) { 1055 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1056 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1057 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1058 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1059 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1060 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1061 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1062 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1063 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1064 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1065 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1066 return config_term_common(attr, term, parse_state); 1067 case PARSE_EVENTS__TERM_TYPE_USER: 1068 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1069 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1070 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1071 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1072 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG: 1073 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG: 1074 case PARSE_EVENTS__TERM_TYPE_NAME: 1075 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1076 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1077 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1078 case PARSE_EVENTS__TERM_TYPE_TIME: 1079 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1080 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1081 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1082 case PARSE_EVENTS__TERM_TYPE_RAW: 1083 case PARSE_EVENTS__TERM_TYPE_CPU: 1084 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV: 1085 default: 1086 parse_events_error__handle(parse_state->error, term->err_term, 1087 strdup(parse_events__term_type_str(term->type_term)), 1088 strdup("valid terms: call-graph,stack-size\n") 1089 ); 1090 return -EINVAL; 1091 } 1092 1093 return 0; 1094 } 1095 1096 static int config_attr(struct perf_event_attr *attr, 1097 const struct parse_events_terms *head, 1098 struct parse_events_state *parse_state, 1099 config_term_func_t config_term) 1100 { 1101 struct parse_events_term *term; 1102 1103 list_for_each_entry(term, &head->terms, list) 1104 if (config_term(attr, term, parse_state)) 1105 return -EINVAL; 1106 1107 return 0; 1108 } 1109 1110 static int get_config_terms(const struct parse_events_terms *head_config, 1111 struct list_head *head_terms) 1112 { 1113 #define ADD_CONFIG_TERM(__type, __weak) \ 1114 struct evsel_config_term *__t; \ 1115 \ 1116 __t = zalloc(sizeof(*__t)); \ 1117 if (!__t) \ 1118 return -ENOMEM; \ 1119 \ 1120 INIT_LIST_HEAD(&__t->list); \ 1121 __t->type = EVSEL__CONFIG_TERM_ ## __type; \ 1122 __t->weak = __weak; \ 1123 list_add_tail(&__t->list, head_terms) 1124 1125 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \ 1126 do { \ 1127 ADD_CONFIG_TERM(__type, __weak); \ 1128 __t->val.__name = __val; \ 1129 } while (0) 1130 1131 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \ 1132 do { \ 1133 ADD_CONFIG_TERM(__type, __weak); \ 1134 __t->val.str = strdup(__val); \ 1135 if (!__t->val.str) { \ 1136 zfree(&__t); \ 1137 return -ENOMEM; \ 1138 } \ 1139 __t->free_str = true; \ 1140 } while (0) 1141 1142 struct parse_events_term *term; 1143 1144 list_for_each_entry(term, &head_config->terms, list) { 1145 switch (term->type_term) { 1146 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1147 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak); 1148 break; 1149 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1150 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak); 1151 break; 1152 case PARSE_EVENTS__TERM_TYPE_TIME: 1153 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak); 1154 break; 1155 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1156 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak); 1157 break; 1158 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1159 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak); 1160 break; 1161 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1162 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user, 1163 term->val.num, term->weak); 1164 break; 1165 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1166 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1167 term->val.num ? 1 : 0, term->weak); 1168 break; 1169 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1170 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1171 term->val.num ? 0 : 1, term->weak); 1172 break; 1173 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1174 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack, 1175 term->val.num, term->weak); 1176 break; 1177 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1178 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events, 1179 term->val.num, term->weak); 1180 break; 1181 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1182 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1183 term->val.num ? 1 : 0, term->weak); 1184 break; 1185 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1186 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1187 term->val.num ? 0 : 1, term->weak); 1188 break; 1189 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1190 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak); 1191 break; 1192 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1193 ADD_CONFIG_TERM_VAL(PERCORE, percore, 1194 term->val.num ? true : false, term->weak); 1195 break; 1196 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1197 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output, 1198 term->val.num ? 1 : 0, term->weak); 1199 break; 1200 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1201 ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak); 1202 break; 1203 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1204 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size, 1205 term->val.num, term->weak); 1206 break; 1207 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV: 1208 ADD_CONFIG_TERM_STR(RATIO_TO_PREV, term->val.str, term->weak); 1209 break; 1210 case PARSE_EVENTS__TERM_TYPE_USER: 1211 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1212 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1213 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1214 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1215 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG: 1216 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG: 1217 case PARSE_EVENTS__TERM_TYPE_NAME: 1218 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1219 case PARSE_EVENTS__TERM_TYPE_RAW: 1220 case PARSE_EVENTS__TERM_TYPE_CPU: 1221 default: 1222 break; 1223 } 1224 } 1225 return 0; 1226 } 1227 1228 /* 1229 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for 1230 * each bit of attr->config that the user has changed. 1231 */ 1232 static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config, 1233 struct list_head *head_terms) 1234 { 1235 struct parse_events_term *term; 1236 u64 bits = 0; 1237 int type; 1238 1239 list_for_each_entry(term, &head_config->terms, list) { 1240 switch (term->type_term) { 1241 case PARSE_EVENTS__TERM_TYPE_USER: 1242 type = perf_pmu__format_type(pmu, term->config); 1243 if (type != PERF_PMU_FORMAT_VALUE_CONFIG) 1244 continue; 1245 bits |= perf_pmu__format_bits(pmu, term->config); 1246 break; 1247 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1248 bits = ~(u64)0; 1249 break; 1250 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1251 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1252 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1253 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG: 1254 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG: 1255 case PARSE_EVENTS__TERM_TYPE_NAME: 1256 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1257 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1258 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1259 case PARSE_EVENTS__TERM_TYPE_TIME: 1260 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1261 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1262 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1263 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1264 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1265 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1266 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1267 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1268 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1269 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1270 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1271 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1272 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1273 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1274 case PARSE_EVENTS__TERM_TYPE_RAW: 1275 case PARSE_EVENTS__TERM_TYPE_CPU: 1276 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV: 1277 default: 1278 break; 1279 } 1280 } 1281 1282 if (bits) 1283 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false); 1284 1285 #undef ADD_CONFIG_TERM 1286 return 0; 1287 } 1288 1289 int parse_events_add_tracepoint(struct parse_events_state *parse_state, 1290 struct list_head *list, 1291 const char *sys, const char *event, 1292 struct parse_events_error *err, 1293 struct parse_events_terms *head_config, void *loc_) 1294 { 1295 YYLTYPE *loc = loc_; 1296 1297 if (head_config) { 1298 struct perf_event_attr attr; 1299 1300 if (config_attr(&attr, head_config, parse_state, config_term_tracepoint)) 1301 return -EINVAL; 1302 } 1303 1304 return add_tracepoint_multi_sys(parse_state, list, sys, event, 1305 err, head_config, loc); 1306 } 1307 1308 static int __parse_events_add_numeric(struct parse_events_state *parse_state, 1309 struct list_head *list, 1310 struct perf_pmu *pmu, u32 type, u32 extended_type, 1311 u64 config, const struct parse_events_terms *head_config, 1312 struct evsel *first_wildcard_match) 1313 { 1314 struct perf_event_attr attr; 1315 LIST_HEAD(config_terms); 1316 const char *name, *metric_id; 1317 struct perf_cpu_map *cpus; 1318 int ret; 1319 1320 memset(&attr, 0, sizeof(attr)); 1321 attr.type = type; 1322 attr.config = config; 1323 if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) { 1324 assert(perf_pmus__supports_extended_type()); 1325 attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT; 1326 } 1327 1328 if (head_config) { 1329 if (config_attr(&attr, head_config, parse_state, config_term_common)) 1330 return -EINVAL; 1331 1332 if (get_config_terms(head_config, &config_terms)) 1333 return -ENOMEM; 1334 } 1335 1336 name = get_config_name(head_config); 1337 metric_id = get_config_metric_id(head_config); 1338 cpus = get_config_cpu(head_config, parse_state->fake_pmu); 1339 ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name, 1340 metric_id, pmu, &config_terms, first_wildcard_match, 1341 cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) ? 0 : -ENOMEM; 1342 perf_cpu_map__put(cpus); 1343 free_config_terms(&config_terms); 1344 return ret; 1345 } 1346 1347 int parse_events_add_numeric(struct parse_events_state *parse_state, 1348 struct list_head *list, 1349 u32 type, u64 config, 1350 const struct parse_events_terms *head_config, 1351 bool wildcard) 1352 { 1353 struct perf_pmu *pmu = NULL; 1354 bool found_supported = false; 1355 1356 /* Wildcards on numeric values are only supported by core PMUs. */ 1357 if (wildcard && perf_pmus__supports_extended_type()) { 1358 struct evsel *first_wildcard_match = NULL; 1359 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 1360 int ret; 1361 1362 found_supported = true; 1363 if (parse_events__filter_pmu(parse_state, pmu)) 1364 continue; 1365 1366 ret = __parse_events_add_numeric(parse_state, list, pmu, 1367 type, pmu->type, 1368 config, head_config, 1369 first_wildcard_match); 1370 if (ret) 1371 return ret; 1372 if (first_wildcard_match == NULL) 1373 first_wildcard_match = 1374 container_of(list->prev, struct evsel, core.node); 1375 } 1376 if (found_supported) 1377 return 0; 1378 } 1379 return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type), 1380 type, /*extended_type=*/0, config, head_config, 1381 /*first_wildcard_match=*/NULL); 1382 } 1383 1384 static bool config_term_percore(struct list_head *config_terms) 1385 { 1386 struct evsel_config_term *term; 1387 1388 list_for_each_entry(term, config_terms, list) { 1389 if (term->type == EVSEL__CONFIG_TERM_PERCORE) 1390 return term->val.percore; 1391 } 1392 1393 return false; 1394 } 1395 1396 static int parse_events_add_pmu(struct parse_events_state *parse_state, 1397 struct list_head *list, struct perf_pmu *pmu, 1398 const struct parse_events_terms *const_parsed_terms, 1399 struct evsel *first_wildcard_match) 1400 { 1401 u64 alternate_hw_config = PERF_COUNT_HW_MAX; 1402 struct perf_event_attr attr; 1403 struct perf_pmu_info info; 1404 struct evsel *evsel; 1405 struct parse_events_error *err = parse_state->error; 1406 LIST_HEAD(config_terms); 1407 struct parse_events_terms parsed_terms; 1408 bool alias_rewrote_terms = false; 1409 struct perf_cpu_map *term_cpu = NULL; 1410 1411 if (verbose > 1) { 1412 struct strbuf sb; 1413 1414 strbuf_init(&sb, /*hint=*/ 0); 1415 if (pmu->selectable && const_parsed_terms && 1416 list_empty(&const_parsed_terms->terms)) { 1417 strbuf_addf(&sb, "%s//", pmu->name); 1418 } else { 1419 strbuf_addf(&sb, "%s/", pmu->name); 1420 parse_events_terms__to_strbuf(const_parsed_terms, &sb); 1421 strbuf_addch(&sb, '/'); 1422 } 1423 fprintf(stderr, "Attempt to add: %s\n", sb.buf); 1424 strbuf_release(&sb); 1425 } 1426 1427 memset(&attr, 0, sizeof(attr)); 1428 if (pmu->perf_event_attr_init_default) 1429 pmu->perf_event_attr_init_default(pmu, &attr); 1430 1431 attr.type = pmu->type; 1432 1433 if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) { 1434 evsel = __add_event(list, &parse_state->idx, &attr, 1435 /*init_attr=*/true, /*name=*/NULL, 1436 /*metric_id=*/NULL, pmu, 1437 /*config_terms=*/NULL, first_wildcard_match, 1438 /*cpu_list=*/NULL, alternate_hw_config); 1439 return evsel ? 0 : -ENOMEM; 1440 } 1441 1442 parse_events_terms__init(&parsed_terms); 1443 if (const_parsed_terms) { 1444 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1445 1446 if (ret) 1447 return ret; 1448 } 1449 fix_raw(&parsed_terms, pmu); 1450 1451 /* Configure attr/terms with a known PMU, this will set hardcoded terms. */ 1452 if (config_attr(&attr, &parsed_terms, parse_state, config_term_pmu)) { 1453 parse_events_terms__exit(&parsed_terms); 1454 return -EINVAL; 1455 } 1456 1457 /* Look for event names in the terms and rewrite into format based terms. */ 1458 if (perf_pmu__check_alias(pmu, &parsed_terms, 1459 &info, &alias_rewrote_terms, 1460 &alternate_hw_config, err)) { 1461 parse_events_terms__exit(&parsed_terms); 1462 return -EINVAL; 1463 } 1464 1465 if (verbose > 1) { 1466 struct strbuf sb; 1467 1468 strbuf_init(&sb, /*hint=*/ 0); 1469 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1470 fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf); 1471 strbuf_release(&sb); 1472 } 1473 1474 /* Configure attr/terms again if an alias was expanded. */ 1475 if (alias_rewrote_terms && 1476 config_attr(&attr, &parsed_terms, parse_state, config_term_pmu)) { 1477 parse_events_terms__exit(&parsed_terms); 1478 return -EINVAL; 1479 } 1480 1481 if (get_config_terms(&parsed_terms, &config_terms)) { 1482 parse_events_terms__exit(&parsed_terms); 1483 return -ENOMEM; 1484 } 1485 1486 /* 1487 * When using default config, record which bits of attr->config were 1488 * changed by the user. 1489 */ 1490 if (pmu->perf_event_attr_init_default && 1491 get_config_chgs(pmu, &parsed_terms, &config_terms)) { 1492 parse_events_terms__exit(&parsed_terms); 1493 return -ENOMEM; 1494 } 1495 1496 /* Skip configuring hard coded terms that were applied by config_attr. */ 1497 if (perf_pmu__config(pmu, &attr, &parsed_terms, /*apply_hardcoded=*/false, 1498 parse_state->error)) { 1499 free_config_terms(&config_terms); 1500 parse_events_terms__exit(&parsed_terms); 1501 return -EINVAL; 1502 } 1503 1504 term_cpu = get_config_cpu(&parsed_terms, parse_state->fake_pmu); 1505 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true, 1506 get_config_name(&parsed_terms), 1507 get_config_metric_id(&parsed_terms), pmu, 1508 &config_terms, first_wildcard_match, term_cpu, alternate_hw_config); 1509 perf_cpu_map__put(term_cpu); 1510 if (!evsel) { 1511 parse_events_terms__exit(&parsed_terms); 1512 return -ENOMEM; 1513 } 1514 1515 if (evsel->name) 1516 evsel->use_config_name = true; 1517 1518 evsel->percore = config_term_percore(&evsel->config_terms); 1519 1520 parse_events_terms__exit(&parsed_terms); 1521 free((char *)evsel->unit); 1522 evsel->unit = strdup(info.unit); 1523 evsel->scale = info.scale; 1524 evsel->per_pkg = info.per_pkg; 1525 evsel->snapshot = info.snapshot; 1526 evsel->retirement_latency.mean = info.retirement_latency_mean; 1527 evsel->retirement_latency.min = info.retirement_latency_min; 1528 evsel->retirement_latency.max = info.retirement_latency_max; 1529 1530 return 0; 1531 } 1532 1533 int parse_events_multi_pmu_add(struct parse_events_state *parse_state, 1534 const char *event_name, 1535 const struct parse_events_terms *const_parsed_terms, 1536 struct list_head **listp, void *loc_) 1537 { 1538 struct parse_events_term *term; 1539 struct list_head *list = NULL; 1540 struct perf_pmu *pmu = NULL; 1541 YYLTYPE *loc = loc_; 1542 int ok = 0; 1543 const char *config; 1544 struct parse_events_terms parsed_terms; 1545 struct evsel *first_wildcard_match = NULL; 1546 1547 *listp = NULL; 1548 1549 parse_events_terms__init(&parsed_terms); 1550 if (const_parsed_terms) { 1551 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1552 1553 if (ret) 1554 return ret; 1555 } 1556 1557 config = strdup(event_name); 1558 if (!config) 1559 goto out_err; 1560 1561 if (parse_events_term__num(&term, 1562 PARSE_EVENTS__TERM_TYPE_USER, 1563 config, /*num=*/1, /*novalue=*/true, 1564 loc, /*loc_val=*/NULL) < 0) { 1565 zfree(&config); 1566 goto out_err; 1567 } 1568 list_add_tail(&term->list, &parsed_terms.terms); 1569 1570 /* Add it for all PMUs that support the alias */ 1571 list = malloc(sizeof(struct list_head)); 1572 if (!list) 1573 goto out_err; 1574 1575 INIT_LIST_HEAD(list); 1576 1577 while ((pmu = perf_pmus__scan_for_event(pmu, event_name)) != NULL) { 1578 1579 if (parse_events__filter_pmu(parse_state, pmu)) 1580 continue; 1581 1582 if (!perf_pmu__have_event(pmu, event_name)) 1583 continue; 1584 1585 if (!parse_events_add_pmu(parse_state, list, pmu, 1586 &parsed_terms, first_wildcard_match)) { 1587 struct strbuf sb; 1588 1589 strbuf_init(&sb, /*hint=*/ 0); 1590 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1591 pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf); 1592 strbuf_release(&sb); 1593 ok++; 1594 } 1595 if (first_wildcard_match == NULL) 1596 first_wildcard_match = container_of(list->prev, struct evsel, core.node); 1597 } 1598 1599 if (parse_state->fake_pmu) { 1600 if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms, 1601 first_wildcard_match)) { 1602 struct strbuf sb; 1603 1604 strbuf_init(&sb, /*hint=*/ 0); 1605 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1606 pr_debug("%s -> fake/%s/\n", event_name, sb.buf); 1607 strbuf_release(&sb); 1608 ok++; 1609 } 1610 } 1611 1612 out_err: 1613 parse_events_terms__exit(&parsed_terms); 1614 if (ok) 1615 *listp = list; 1616 else 1617 free(list); 1618 1619 return ok ? 0 : -1; 1620 } 1621 1622 int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state, 1623 const char *event_or_pmu, 1624 const struct parse_events_terms *const_parsed_terms, 1625 struct list_head **listp, 1626 void *loc_) 1627 { 1628 YYLTYPE *loc = loc_; 1629 struct perf_pmu *pmu; 1630 int ok = 0; 1631 char *help; 1632 struct evsel *first_wildcard_match = NULL; 1633 1634 *listp = malloc(sizeof(**listp)); 1635 if (!*listp) 1636 return -ENOMEM; 1637 1638 INIT_LIST_HEAD(*listp); 1639 1640 /* Attempt to add to list assuming event_or_pmu is a PMU name. */ 1641 pmu = perf_pmus__find(event_or_pmu); 1642 if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms, 1643 first_wildcard_match)) 1644 return 0; 1645 1646 if (parse_state->fake_pmu) { 1647 if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(), 1648 const_parsed_terms, 1649 first_wildcard_match)) 1650 return 0; 1651 } 1652 1653 pmu = NULL; 1654 /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */ 1655 while ((pmu = perf_pmus__scan_matching_wildcard(pmu, event_or_pmu)) != NULL) { 1656 1657 if (parse_events__filter_pmu(parse_state, pmu)) 1658 continue; 1659 1660 if (!parse_events_add_pmu(parse_state, *listp, pmu, 1661 const_parsed_terms, 1662 first_wildcard_match)) { 1663 ok++; 1664 parse_state->wild_card_pmus = true; 1665 } 1666 if (first_wildcard_match == NULL) { 1667 first_wildcard_match = 1668 container_of((*listp)->prev, struct evsel, core.node); 1669 } 1670 } 1671 if (ok) 1672 return 0; 1673 1674 /* Failure to add, assume event_or_pmu is an event name. */ 1675 zfree(listp); 1676 if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, 1677 const_parsed_terms, listp, loc)) 1678 return 0; 1679 1680 if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0) 1681 help = NULL; 1682 parse_events_error__handle(parse_state->error, loc->first_column, 1683 strdup("Bad event or PMU"), 1684 help); 1685 zfree(listp); 1686 return -EINVAL; 1687 } 1688 1689 void parse_events__set_leader(char *name, struct list_head *list) 1690 { 1691 struct evsel *leader; 1692 1693 if (list_empty(list)) { 1694 WARN_ONCE(true, "WARNING: failed to set leader: empty list"); 1695 return; 1696 } 1697 1698 leader = list_first_entry(list, struct evsel, core.node); 1699 __perf_evlist__set_leader(list, &leader->core); 1700 zfree(&leader->group_name); 1701 leader->group_name = name; 1702 } 1703 1704 static int parse_events__modifier_list(struct parse_events_state *parse_state, 1705 YYLTYPE *loc, 1706 struct list_head *list, 1707 struct parse_events_modifier mod, 1708 bool group) 1709 { 1710 struct evsel *evsel; 1711 1712 if (!group && mod.weak) { 1713 parse_events_error__handle(parse_state->error, loc->first_column, 1714 strdup("Weak modifier is for use with groups"), NULL); 1715 return -EINVAL; 1716 } 1717 1718 __evlist__for_each_entry(list, evsel) { 1719 /* Translate modifiers into the equivalent evsel excludes. */ 1720 int eu = group ? evsel->core.attr.exclude_user : 0; 1721 int ek = group ? evsel->core.attr.exclude_kernel : 0; 1722 int eh = group ? evsel->core.attr.exclude_hv : 0; 1723 int eH = group ? evsel->core.attr.exclude_host : 0; 1724 int eG = group ? evsel->core.attr.exclude_guest : 0; 1725 int exclude = eu | ek | eh; 1726 int exclude_GH = eG | eH; 1727 1728 if (mod.user) { 1729 if (!exclude) 1730 exclude = eu = ek = eh = 1; 1731 eu = 0; 1732 } 1733 if (mod.kernel) { 1734 if (!exclude) 1735 exclude = eu = ek = eh = 1; 1736 ek = 0; 1737 } 1738 if (mod.hypervisor) { 1739 if (!exclude) 1740 exclude = eu = ek = eh = 1; 1741 eh = 0; 1742 } 1743 if (mod.guest) { 1744 if (!exclude_GH) 1745 exclude_GH = eG = eH = 1; 1746 eG = 0; 1747 } 1748 if (mod.host) { 1749 if (!exclude_GH) 1750 exclude_GH = eG = eH = 1; 1751 eH = 0; 1752 } 1753 if (!exclude_GH && exclude_GH_default) { 1754 if (perf_host) 1755 eG = 1; 1756 else if (perf_guest) 1757 eH = 1; 1758 } 1759 1760 evsel->core.attr.exclude_user = eu; 1761 evsel->core.attr.exclude_kernel = ek; 1762 evsel->core.attr.exclude_hv = eh; 1763 evsel->core.attr.exclude_host = eH; 1764 evsel->core.attr.exclude_guest = eG; 1765 evsel->exclude_GH = exclude_GH; 1766 1767 /* Simple modifiers copied to the evsel. */ 1768 if (mod.precise) { 1769 u8 precise = evsel->core.attr.precise_ip + mod.precise; 1770 /* 1771 * precise ip: 1772 * 1773 * 0 - SAMPLE_IP can have arbitrary skid 1774 * 1 - SAMPLE_IP must have constant skid 1775 * 2 - SAMPLE_IP requested to have 0 skid 1776 * 3 - SAMPLE_IP must have 0 skid 1777 * 1778 * See also PERF_RECORD_MISC_EXACT_IP 1779 */ 1780 if (precise > 3) { 1781 char *help; 1782 1783 if (asprintf(&help, 1784 "Maximum combined precise value is 3, adding precision to \"%s\"", 1785 evsel__name(evsel)) > 0) { 1786 parse_events_error__handle(parse_state->error, 1787 loc->first_column, 1788 help, NULL); 1789 } 1790 return -EINVAL; 1791 } 1792 evsel->core.attr.precise_ip = precise; 1793 } 1794 if (mod.precise_max) 1795 evsel->precise_max = 1; 1796 if (mod.non_idle) 1797 evsel->core.attr.exclude_idle = 1; 1798 if (mod.sample_read) 1799 evsel->sample_read = 1; 1800 if (mod.pinned && evsel__is_group_leader(evsel)) 1801 evsel->core.attr.pinned = 1; 1802 if (mod.exclusive && evsel__is_group_leader(evsel)) 1803 evsel->core.attr.exclusive = 1; 1804 if (mod.weak) 1805 evsel->weak_group = true; 1806 if (mod.bpf) 1807 evsel->bpf_counter = true; 1808 if (mod.retire_lat) 1809 evsel->retire_lat = true; 1810 if (mod.dont_regroup) 1811 evsel->dont_regroup = true; 1812 } 1813 return 0; 1814 } 1815 1816 int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc, 1817 struct list_head *list, 1818 struct parse_events_modifier mod) 1819 { 1820 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true); 1821 } 1822 1823 int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc, 1824 struct list_head *list, 1825 struct parse_events_modifier mod) 1826 { 1827 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false); 1828 } 1829 1830 int parse_events__set_default_name(struct list_head *list, char *name) 1831 { 1832 struct evsel *evsel; 1833 bool used_name = false; 1834 1835 __evlist__for_each_entry(list, evsel) { 1836 if (!evsel->name) { 1837 evsel->name = used_name ? strdup(name) : name; 1838 used_name = true; 1839 if (!evsel->name) 1840 return -ENOMEM; 1841 } 1842 } 1843 if (!used_name) 1844 free(name); 1845 return 0; 1846 } 1847 1848 static int parse_events__scanner(const char *str, 1849 struct parse_events_state *parse_state) 1850 { 1851 YY_BUFFER_STATE buffer; 1852 void *scanner; 1853 int ret; 1854 1855 ret = parse_events_lex_init_extra(parse_state, &scanner); 1856 if (ret) 1857 return ret; 1858 1859 buffer = parse_events__scan_string(str, scanner); 1860 1861 #ifdef PARSER_DEBUG 1862 parse_events_debug = 1; 1863 parse_events_set_debug(1, scanner); 1864 #endif 1865 ret = parse_events_parse(parse_state, scanner); 1866 1867 parse_events__flush_buffer(buffer, scanner); 1868 parse_events__delete_buffer(buffer, scanner); 1869 parse_events_lex_destroy(scanner); 1870 return ret; 1871 } 1872 1873 /* 1874 * parse event config string, return a list of event terms. 1875 */ 1876 int parse_events_terms(struct parse_events_terms *terms, const char *str) 1877 { 1878 struct parse_events_state parse_state = { 1879 .terms = NULL, 1880 .stoken = PE_START_TERMS, 1881 }; 1882 int ret; 1883 1884 ret = parse_events__scanner(str, &parse_state); 1885 if (!ret) 1886 list_splice(&parse_state.terms->terms, &terms->terms); 1887 1888 zfree(&parse_state.terms); 1889 return ret; 1890 } 1891 1892 static int evsel__compute_group_pmu_name(struct evsel *evsel, 1893 const struct list_head *head) 1894 { 1895 struct evsel *leader = evsel__leader(evsel); 1896 struct evsel *pos; 1897 const char *group_pmu_name; 1898 struct perf_pmu *pmu = evsel__find_pmu(evsel); 1899 1900 if (!pmu) { 1901 /* 1902 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU 1903 * is a core PMU, but in heterogeneous systems this is 1904 * unknown. For now pick the first core PMU. 1905 */ 1906 pmu = perf_pmus__scan_core(NULL); 1907 } 1908 if (!pmu) { 1909 pr_debug("No PMU found for '%s'\n", evsel__name(evsel)); 1910 return -EINVAL; 1911 } 1912 group_pmu_name = pmu->name; 1913 /* 1914 * Software events may be in a group with other uncore PMU events. Use 1915 * the pmu_name of the first non-software event to avoid breaking the 1916 * software event out of the group. 1917 * 1918 * Aux event leaders, like intel_pt, expect a group with events from 1919 * other PMUs, so substitute the AUX event's PMU in this case. 1920 */ 1921 if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) { 1922 struct perf_pmu *leader_pmu = evsel__find_pmu(leader); 1923 1924 if (!leader_pmu) { 1925 /* As with determining pmu above. */ 1926 leader_pmu = perf_pmus__scan_core(NULL); 1927 } 1928 /* 1929 * Starting with the leader, find the first event with a named 1930 * non-software PMU. for_each_group_(member|evsel) isn't used as 1931 * the list isn't yet sorted putting evsel's in the same group 1932 * together. 1933 */ 1934 if (leader_pmu && !perf_pmu__is_software(leader_pmu)) { 1935 group_pmu_name = leader_pmu->name; 1936 } else if (leader->core.nr_members > 1) { 1937 list_for_each_entry(pos, head, core.node) { 1938 struct perf_pmu *pos_pmu; 1939 1940 if (pos == leader || evsel__leader(pos) != leader) 1941 continue; 1942 pos_pmu = evsel__find_pmu(pos); 1943 if (!pos_pmu) { 1944 /* As with determining pmu above. */ 1945 pos_pmu = perf_pmus__scan_core(NULL); 1946 } 1947 if (pos_pmu && !perf_pmu__is_software(pos_pmu)) { 1948 group_pmu_name = pos_pmu->name; 1949 break; 1950 } 1951 } 1952 } 1953 } 1954 /* Record computed name. */ 1955 evsel->group_pmu_name = strdup(group_pmu_name); 1956 return evsel->group_pmu_name ? 0 : -ENOMEM; 1957 } 1958 1959 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs) 1960 { 1961 /* Order by insertion index. */ 1962 return lhs->core.idx - rhs->core.idx; 1963 } 1964 1965 static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r) 1966 { 1967 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node); 1968 const struct evsel *lhs = container_of(lhs_core, struct evsel, core); 1969 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node); 1970 const struct evsel *rhs = container_of(rhs_core, struct evsel, core); 1971 int *force_grouped_idx = _fg_idx; 1972 int lhs_sort_idx, rhs_sort_idx, ret; 1973 const char *lhs_pmu_name, *rhs_pmu_name; 1974 1975 /* 1976 * Get the indexes of the 2 events to sort. If the events are 1977 * in groups then the leader's index is used otherwise the 1978 * event's index is used. An index may be forced for events that 1979 * must be in the same group, namely Intel topdown events. 1980 */ 1981 if (lhs->dont_regroup) { 1982 lhs_sort_idx = lhs_core->idx; 1983 } else if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) { 1984 lhs_sort_idx = *force_grouped_idx; 1985 } else { 1986 bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1; 1987 1988 lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx; 1989 } 1990 if (rhs->dont_regroup) { 1991 rhs_sort_idx = rhs_core->idx; 1992 } else if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) { 1993 rhs_sort_idx = *force_grouped_idx; 1994 } else { 1995 bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1; 1996 1997 rhs_sort_idx = rhs_has_group ? rhs_core->leader->idx : rhs_core->idx; 1998 } 1999 2000 /* If the indices differ then respect the insertion order. */ 2001 if (lhs_sort_idx != rhs_sort_idx) 2002 return lhs_sort_idx - rhs_sort_idx; 2003 2004 /* 2005 * Ignoring forcing, lhs_sort_idx == rhs_sort_idx so lhs and rhs should 2006 * be in the same group. Events in the same group need to be ordered by 2007 * their grouping PMU name as the group will be broken to ensure only 2008 * events on the same PMU are programmed together. 2009 * 2010 * With forcing the lhs_sort_idx == rhs_sort_idx shows that one or both 2011 * events are being forced to be at force_group_index. If only one event 2012 * is being forced then the other event is the group leader of the group 2013 * we're trying to force the event into. Ensure for the force grouped 2014 * case that the PMU name ordering is also respected. 2015 */ 2016 lhs_pmu_name = lhs->group_pmu_name; 2017 rhs_pmu_name = rhs->group_pmu_name; 2018 ret = strcmp(lhs_pmu_name, rhs_pmu_name); 2019 if (ret) 2020 return ret; 2021 2022 /* 2023 * Architecture specific sorting, by default sort events in the same 2024 * group with the same PMU by their insertion index. On Intel topdown 2025 * constraints must be adhered to - slots first, etc. 2026 */ 2027 return arch_evlist__cmp(lhs, rhs); 2028 } 2029 2030 int __weak arch_evlist__add_required_events(struct list_head *list __always_unused) 2031 { 2032 return 0; 2033 } 2034 2035 static int parse_events__sort_events_and_fix_groups(struct list_head *list) 2036 { 2037 int idx = 0, force_grouped_idx = -1; 2038 struct evsel *pos, *cur_leader = NULL; 2039 struct perf_evsel *cur_leaders_grp = NULL; 2040 bool idx_changed = false; 2041 int orig_num_leaders = 0, num_leaders = 0; 2042 int ret; 2043 struct evsel *force_grouped_leader = NULL; 2044 bool last_event_was_forced_leader = false; 2045 2046 /* On x86 topdown metrics events require a slots event. */ 2047 ret = arch_evlist__add_required_events(list); 2048 if (ret) 2049 return ret; 2050 2051 /* 2052 * Compute index to insert ungrouped events at. Place them where the 2053 * first ungrouped event appears. 2054 */ 2055 list_for_each_entry(pos, list, core.node) { 2056 const struct evsel *pos_leader = evsel__leader(pos); 2057 2058 ret = evsel__compute_group_pmu_name(pos, list); 2059 if (ret) 2060 return ret; 2061 2062 if (pos == pos_leader) 2063 orig_num_leaders++; 2064 2065 /* 2066 * Ensure indexes are sequential, in particular for multiple 2067 * event lists being merged. The indexes are used to detect when 2068 * the user order is modified. 2069 */ 2070 pos->core.idx = idx++; 2071 2072 /* 2073 * Remember an index to sort all forced grouped events 2074 * together to. Use the group leader as some events 2075 * must appear first within the group. 2076 */ 2077 if (force_grouped_idx == -1 && arch_evsel__must_be_in_group(pos)) 2078 force_grouped_idx = pos_leader->core.idx; 2079 } 2080 2081 /* Sort events. */ 2082 list_sort(&force_grouped_idx, list, evlist__cmp); 2083 2084 /* 2085 * Recompute groups, splitting for PMUs and adding groups for events 2086 * that require them. 2087 */ 2088 idx = 0; 2089 list_for_each_entry(pos, list, core.node) { 2090 struct evsel *pos_leader = evsel__leader(pos); 2091 const char *pos_pmu_name = pos->group_pmu_name; 2092 const char *cur_leader_pmu_name; 2093 bool pos_force_grouped = force_grouped_idx != -1 && !pos->dont_regroup && 2094 arch_evsel__must_be_in_group(pos); 2095 2096 /* Reset index and nr_members. */ 2097 if (pos->core.idx != idx) 2098 idx_changed = true; 2099 pos->core.idx = idx++; 2100 pos->core.nr_members = 0; 2101 2102 /* 2103 * Set the group leader respecting the given groupings and that 2104 * groups can't span PMUs. 2105 */ 2106 if (!cur_leader || pos->dont_regroup) { 2107 cur_leader = pos->dont_regroup ? pos_leader : pos; 2108 cur_leaders_grp = &cur_leader->core; 2109 if (pos_force_grouped) 2110 force_grouped_leader = pos; 2111 } 2112 cur_leader_pmu_name = cur_leader->group_pmu_name; 2113 if (strcmp(cur_leader_pmu_name, pos_pmu_name)) { 2114 /* PMU changed so the group/leader must change. */ 2115 cur_leader = pos; 2116 cur_leaders_grp = pos->core.leader; 2117 if (pos_force_grouped && force_grouped_leader == NULL) 2118 force_grouped_leader = pos; 2119 } else if (cur_leaders_grp != pos->core.leader) { 2120 bool split_even_if_last_leader_was_forced = true; 2121 2122 /* 2123 * Event is for a different group. If the last event was 2124 * the forced group leader then subsequent group events 2125 * and forced events should be in the same group. If 2126 * there are no other forced group events then the 2127 * forced group leader wasn't really being forced into a 2128 * group, it just set arch_evsel__must_be_in_group, and 2129 * we don't want the group to split here. 2130 */ 2131 if (force_grouped_idx != -1 && last_event_was_forced_leader) { 2132 struct evsel *pos2 = pos; 2133 /* 2134 * Search the whole list as the group leaders 2135 * aren't currently valid. 2136 */ 2137 list_for_each_entry_continue(pos2, list, core.node) { 2138 if (pos->core.leader == pos2->core.leader && 2139 arch_evsel__must_be_in_group(pos2)) { 2140 split_even_if_last_leader_was_forced = false; 2141 break; 2142 } 2143 } 2144 } 2145 if (!last_event_was_forced_leader || split_even_if_last_leader_was_forced) { 2146 if (pos_force_grouped) { 2147 if (force_grouped_leader) { 2148 cur_leader = force_grouped_leader; 2149 cur_leaders_grp = force_grouped_leader->core.leader; 2150 } else { 2151 cur_leader = force_grouped_leader = pos; 2152 cur_leaders_grp = &pos->core; 2153 } 2154 } else { 2155 cur_leader = pos; 2156 cur_leaders_grp = pos->core.leader; 2157 } 2158 } 2159 } 2160 if (pos_leader != cur_leader) { 2161 /* The leader changed so update it. */ 2162 evsel__set_leader(pos, cur_leader); 2163 } 2164 last_event_was_forced_leader = (force_grouped_leader == pos); 2165 } 2166 list_for_each_entry(pos, list, core.node) { 2167 struct evsel *pos_leader = evsel__leader(pos); 2168 2169 if (pos == pos_leader) 2170 num_leaders++; 2171 pos_leader->core.nr_members++; 2172 } 2173 return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0; 2174 } 2175 2176 int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter, 2177 struct parse_events_error *err, bool fake_pmu, 2178 bool warn_if_reordered, bool fake_tp) 2179 { 2180 struct parse_events_state parse_state = { 2181 .list = LIST_HEAD_INIT(parse_state.list), 2182 .idx = evlist->core.nr_entries, 2183 .error = err, 2184 .stoken = PE_START_EVENTS, 2185 .fake_pmu = fake_pmu, 2186 .fake_tp = fake_tp, 2187 .pmu_filter = pmu_filter, 2188 .match_legacy_cache_terms = true, 2189 }; 2190 int ret, ret2; 2191 2192 ret = parse_events__scanner(str, &parse_state); 2193 2194 if (!ret && list_empty(&parse_state.list)) { 2195 WARN_ONCE(true, "WARNING: event parser found nothing\n"); 2196 return -1; 2197 } 2198 2199 ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list); 2200 if (ret2 < 0) 2201 return ret; 2202 2203 /* 2204 * Add list to the evlist even with errors to allow callers to clean up. 2205 */ 2206 evlist__splice_list_tail(evlist, &parse_state.list); 2207 2208 if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) { 2209 pr_warning("WARNING: events were regrouped to match PMUs\n"); 2210 2211 if (verbose > 0) { 2212 struct strbuf sb = STRBUF_INIT; 2213 2214 evlist__uniquify_evsel_names(evlist, &stat_config); 2215 evlist__format_evsels(evlist, &sb, 2048); 2216 pr_debug("evlist after sorting/fixing: '%s'\n", sb.buf); 2217 strbuf_release(&sb); 2218 } 2219 } 2220 if (!ret) { 2221 struct evsel *last; 2222 2223 last = evlist__last(evlist); 2224 last->cmdline_group_boundary = true; 2225 2226 return 0; 2227 } 2228 2229 /* 2230 * There are 2 users - builtin-record and builtin-test objects. 2231 * Both call evlist__delete in case of error, so we dont 2232 * need to bother. 2233 */ 2234 return ret; 2235 } 2236 2237 int parse_event(struct evlist *evlist, const char *str) 2238 { 2239 struct parse_events_error err; 2240 int ret; 2241 2242 parse_events_error__init(&err); 2243 ret = parse_events(evlist, str, &err); 2244 if (ret && verbose > 0) 2245 parse_events_error__print(&err, str); 2246 parse_events_error__exit(&err); 2247 return ret; 2248 } 2249 2250 struct parse_events_error_entry { 2251 /** @list: The list the error is part of. */ 2252 struct list_head list; 2253 /** @idx: index in the parsed string */ 2254 int idx; 2255 /** @str: string to display at the index */ 2256 char *str; 2257 /** @help: optional help string */ 2258 char *help; 2259 }; 2260 2261 void parse_events_error__init(struct parse_events_error *err) 2262 { 2263 INIT_LIST_HEAD(&err->list); 2264 } 2265 2266 void parse_events_error__exit(struct parse_events_error *err) 2267 { 2268 struct parse_events_error_entry *pos, *tmp; 2269 2270 list_for_each_entry_safe(pos, tmp, &err->list, list) { 2271 zfree(&pos->str); 2272 zfree(&pos->help); 2273 list_del_init(&pos->list); 2274 free(pos); 2275 } 2276 } 2277 2278 void parse_events_error__handle(struct parse_events_error *err, int idx, 2279 char *str, char *help) 2280 { 2281 struct parse_events_error_entry *entry; 2282 2283 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) 2284 goto out_free; 2285 2286 entry = zalloc(sizeof(*entry)); 2287 if (!entry) { 2288 pr_err("Failed to allocate memory for event parsing error: %s (%s)\n", 2289 str, help ?: "<no help>"); 2290 goto out_free; 2291 } 2292 entry->idx = idx; 2293 entry->str = str; 2294 entry->help = help; 2295 list_add(&entry->list, &err->list); 2296 return; 2297 out_free: 2298 free(str); 2299 free(help); 2300 } 2301 2302 #define MAX_WIDTH 1000 2303 static int get_term_width(void) 2304 { 2305 struct winsize ws; 2306 2307 get_term_dimensions(&ws); 2308 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; 2309 } 2310 2311 static void __parse_events_error__print(int err_idx, const char *err_str, 2312 const char *err_help, const char *event) 2313 { 2314 const char *str = "invalid or unsupported event: "; 2315 char _buf[MAX_WIDTH]; 2316 char *buf = (char *) event; 2317 int idx = 0; 2318 if (err_str) { 2319 /* -2 for extra '' in the final fprintf */ 2320 int width = get_term_width() - 2; 2321 int len_event = strlen(event); 2322 int len_str, max_len, cut = 0; 2323 2324 /* 2325 * Maximum error index indent, we will cut 2326 * the event string if it's bigger. 2327 */ 2328 int max_err_idx = 13; 2329 2330 /* 2331 * Let's be specific with the message when 2332 * we have the precise error. 2333 */ 2334 str = "event syntax error: "; 2335 len_str = strlen(str); 2336 max_len = width - len_str; 2337 2338 buf = _buf; 2339 2340 /* We're cutting from the beginning. */ 2341 if (err_idx > max_err_idx) 2342 cut = err_idx - max_err_idx; 2343 2344 strncpy(buf, event + cut, max_len); 2345 2346 /* Mark cut parts with '..' on both sides. */ 2347 if (cut) 2348 buf[0] = buf[1] = '.'; 2349 2350 if ((len_event - cut) > max_len) { 2351 buf[max_len - 1] = buf[max_len - 2] = '.'; 2352 buf[max_len] = 0; 2353 } 2354 2355 idx = len_str + err_idx - cut; 2356 } 2357 2358 fprintf(stderr, "%s'%s'\n", str, buf); 2359 if (idx) { 2360 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str); 2361 if (err_help) 2362 fprintf(stderr, "\n%s\n", err_help); 2363 } 2364 } 2365 2366 void parse_events_error__print(const struct parse_events_error *err, 2367 const char *event) 2368 { 2369 struct parse_events_error_entry *pos; 2370 bool first = true; 2371 2372 list_for_each_entry(pos, &err->list, list) { 2373 if (!first) 2374 fputs("\n", stderr); 2375 __parse_events_error__print(pos->idx, pos->str, pos->help, event); 2376 first = false; 2377 } 2378 } 2379 2380 /* 2381 * In the list of errors err, do any of the error strings (str) contain the 2382 * given needle string? 2383 */ 2384 bool parse_events_error__contains(const struct parse_events_error *err, 2385 const char *needle) 2386 { 2387 struct parse_events_error_entry *pos; 2388 2389 list_for_each_entry(pos, &err->list, list) { 2390 if (strstr(pos->str, needle) != NULL) 2391 return true; 2392 } 2393 return false; 2394 } 2395 2396 #undef MAX_WIDTH 2397 2398 int parse_events_option(const struct option *opt, const char *str, 2399 int unset __maybe_unused) 2400 { 2401 struct parse_events_option_args *args = opt->value; 2402 struct parse_events_error err; 2403 int ret; 2404 2405 parse_events_error__init(&err); 2406 ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err, 2407 /*fake_pmu=*/false, /*warn_if_reordered=*/true, 2408 /*fake_tp=*/false); 2409 2410 if (ret) { 2411 parse_events_error__print(&err, str); 2412 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 2413 } 2414 parse_events_error__exit(&err); 2415 2416 return ret; 2417 } 2418 2419 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset) 2420 { 2421 struct parse_events_option_args *args = opt->value; 2422 int ret; 2423 2424 if (*args->evlistp == NULL) { 2425 *args->evlistp = evlist__new(); 2426 2427 if (*args->evlistp == NULL) { 2428 fprintf(stderr, "Not enough memory to create evlist\n"); 2429 return -1; 2430 } 2431 } 2432 ret = parse_events_option(opt, str, unset); 2433 if (ret) { 2434 evlist__delete(*args->evlistp); 2435 *args->evlistp = NULL; 2436 } 2437 2438 return ret; 2439 } 2440 2441 static int 2442 foreach_evsel_in_last_glob(struct evlist *evlist, 2443 int (*func)(struct evsel *evsel, 2444 const void *arg), 2445 const void *arg) 2446 { 2447 struct evsel *last = NULL; 2448 int err; 2449 2450 /* 2451 * Don't return when list_empty, give func a chance to report 2452 * error when it found last == NULL. 2453 * 2454 * So no need to WARN here, let *func do this. 2455 */ 2456 if (evlist->core.nr_entries > 0) 2457 last = evlist__last(evlist); 2458 2459 do { 2460 err = (*func)(last, arg); 2461 if (err) 2462 return -1; 2463 if (!last) 2464 return 0; 2465 2466 if (last->core.node.prev == &evlist->core.entries) 2467 return 0; 2468 last = list_entry(last->core.node.prev, struct evsel, core.node); 2469 } while (!last->cmdline_group_boundary); 2470 2471 return 0; 2472 } 2473 2474 /* Will a tracepoint filter work for str or should a BPF filter be used? */ 2475 static bool is_possible_tp_filter(const char *str) 2476 { 2477 return strstr(str, "uid") == NULL; 2478 } 2479 2480 static int set_filter(struct evsel *evsel, const void *arg) 2481 { 2482 const char *str = arg; 2483 int nr_addr_filters = 0; 2484 struct perf_pmu *pmu; 2485 2486 if (evsel == NULL) { 2487 fprintf(stderr, 2488 "--filter option should follow a -e tracepoint or HW tracer option\n"); 2489 return -1; 2490 } 2491 2492 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && is_possible_tp_filter(str)) { 2493 if (evsel__append_tp_filter(evsel, str) < 0) { 2494 fprintf(stderr, 2495 "not enough memory to hold filter string\n"); 2496 return -1; 2497 } 2498 2499 return 0; 2500 } 2501 2502 pmu = evsel__find_pmu(evsel); 2503 if (pmu) { 2504 perf_pmu__scan_file(pmu, "nr_addr_filters", 2505 "%d", &nr_addr_filters); 2506 } 2507 if (!nr_addr_filters) 2508 return perf_bpf_filter__parse(&evsel->bpf_filters, str); 2509 2510 if (evsel__append_addr_filter(evsel, str) < 0) { 2511 fprintf(stderr, 2512 "not enough memory to hold filter string\n"); 2513 return -1; 2514 } 2515 2516 return 0; 2517 } 2518 2519 int parse_filter(const struct option *opt, const char *str, 2520 int unset __maybe_unused) 2521 { 2522 struct evlist *evlist = *(struct evlist **)opt->value; 2523 2524 return foreach_evsel_in_last_glob(evlist, set_filter, 2525 (const void *)str); 2526 } 2527 2528 int parse_uid_filter(struct evlist *evlist, uid_t uid) 2529 { 2530 struct option opt = { 2531 .value = &evlist, 2532 }; 2533 char buf[128]; 2534 int ret; 2535 2536 snprintf(buf, sizeof(buf), "uid == %d", uid); 2537 ret = parse_filter(&opt, buf, /*unset=*/0); 2538 if (ret) { 2539 if (use_browser >= 1) { 2540 /* 2541 * Use ui__warning so a pop up appears above the 2542 * underlying BPF error message. 2543 */ 2544 ui__warning("Failed to add UID filtering that uses BPF filtering.\n"); 2545 } else { 2546 fprintf(stderr, "Failed to add UID filtering that uses BPF filtering.\n"); 2547 } 2548 } 2549 return ret; 2550 } 2551 2552 static int add_exclude_perf_filter(struct evsel *evsel, 2553 const void *arg __maybe_unused) 2554 { 2555 char new_filter[64]; 2556 2557 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2558 fprintf(stderr, 2559 "--exclude-perf option should follow a -e tracepoint option\n"); 2560 return -1; 2561 } 2562 2563 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid()); 2564 2565 if (evsel__append_tp_filter(evsel, new_filter) < 0) { 2566 fprintf(stderr, 2567 "not enough memory to hold filter string\n"); 2568 return -1; 2569 } 2570 2571 return 0; 2572 } 2573 2574 int exclude_perf(const struct option *opt, 2575 const char *arg __maybe_unused, 2576 int unset __maybe_unused) 2577 { 2578 struct evlist *evlist = *(struct evlist **)opt->value; 2579 2580 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter, 2581 NULL); 2582 } 2583 2584 int parse_events__is_hardcoded_term(struct parse_events_term *term) 2585 { 2586 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 2587 } 2588 2589 static int new_term(struct parse_events_term **_term, 2590 struct parse_events_term *temp, 2591 char *str, u64 num) 2592 { 2593 struct parse_events_term *term; 2594 2595 term = malloc(sizeof(*term)); 2596 if (!term) 2597 return -ENOMEM; 2598 2599 *term = *temp; 2600 INIT_LIST_HEAD(&term->list); 2601 term->weak = false; 2602 2603 switch (term->type_val) { 2604 case PARSE_EVENTS__TERM_TYPE_NUM: 2605 term->val.num = num; 2606 break; 2607 case PARSE_EVENTS__TERM_TYPE_STR: 2608 term->val.str = str; 2609 break; 2610 default: 2611 free(term); 2612 return -EINVAL; 2613 } 2614 2615 *_term = term; 2616 return 0; 2617 } 2618 2619 int parse_events_term__num(struct parse_events_term **term, 2620 enum parse_events__term_type type_term, 2621 const char *config, u64 num, 2622 bool no_value, 2623 void *loc_term_, void *loc_val_) 2624 { 2625 YYLTYPE *loc_term = loc_term_; 2626 YYLTYPE *loc_val = loc_val_; 2627 2628 struct parse_events_term temp = { 2629 .type_val = PARSE_EVENTS__TERM_TYPE_NUM, 2630 .type_term = type_term, 2631 .config = config ? : strdup(parse_events__term_type_str(type_term)), 2632 .no_value = no_value, 2633 .err_term = loc_term ? loc_term->first_column : 0, 2634 .err_val = loc_val ? loc_val->first_column : 0, 2635 }; 2636 2637 return new_term(term, &temp, /*str=*/NULL, num); 2638 } 2639 2640 int parse_events_term__str(struct parse_events_term **term, 2641 enum parse_events__term_type type_term, 2642 char *config, char *str, 2643 void *loc_term_, void *loc_val_) 2644 { 2645 YYLTYPE *loc_term = loc_term_; 2646 YYLTYPE *loc_val = loc_val_; 2647 2648 struct parse_events_term temp = { 2649 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2650 .type_term = type_term, 2651 .config = config, 2652 .err_term = loc_term ? loc_term->first_column : 0, 2653 .err_val = loc_val ? loc_val->first_column : 0, 2654 }; 2655 2656 return new_term(term, &temp, str, /*num=*/0); 2657 } 2658 2659 int parse_events_term__term(struct parse_events_term **term, 2660 enum parse_events__term_type term_lhs, 2661 enum parse_events__term_type term_rhs, 2662 void *loc_term, void *loc_val) 2663 { 2664 return parse_events_term__str(term, term_lhs, NULL, 2665 strdup(parse_events__term_type_str(term_rhs)), 2666 loc_term, loc_val); 2667 } 2668 2669 int parse_events_term__clone(struct parse_events_term **new, 2670 const struct parse_events_term *term) 2671 { 2672 char *str; 2673 struct parse_events_term temp = *term; 2674 2675 temp.used = false; 2676 if (term->config) { 2677 temp.config = strdup(term->config); 2678 if (!temp.config) 2679 return -ENOMEM; 2680 } 2681 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2682 return new_term(new, &temp, /*str=*/NULL, term->val.num); 2683 2684 str = strdup(term->val.str); 2685 if (!str) { 2686 zfree(&temp.config); 2687 return -ENOMEM; 2688 } 2689 return new_term(new, &temp, str, /*num=*/0); 2690 } 2691 2692 void parse_events_term__delete(struct parse_events_term *term) 2693 { 2694 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) 2695 zfree(&term->val.str); 2696 2697 zfree(&term->config); 2698 free(term); 2699 } 2700 2701 static int parse_events_terms__copy(const struct parse_events_terms *src, 2702 struct parse_events_terms *dest) 2703 { 2704 struct parse_events_term *term; 2705 2706 list_for_each_entry (term, &src->terms, list) { 2707 struct parse_events_term *n; 2708 int ret; 2709 2710 ret = parse_events_term__clone(&n, term); 2711 if (ret) 2712 return ret; 2713 2714 list_add_tail(&n->list, &dest->terms); 2715 } 2716 return 0; 2717 } 2718 2719 void parse_events_terms__init(struct parse_events_terms *terms) 2720 { 2721 INIT_LIST_HEAD(&terms->terms); 2722 } 2723 2724 void parse_events_terms__exit(struct parse_events_terms *terms) 2725 { 2726 struct parse_events_term *term, *h; 2727 2728 list_for_each_entry_safe(term, h, &terms->terms, list) { 2729 list_del_init(&term->list); 2730 parse_events_term__delete(term); 2731 } 2732 } 2733 2734 void parse_events_terms__delete(struct parse_events_terms *terms) 2735 { 2736 if (!terms) 2737 return; 2738 parse_events_terms__exit(terms); 2739 free(terms); 2740 } 2741 2742 static int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb) 2743 { 2744 struct parse_events_term *term; 2745 bool first = true; 2746 2747 if (!terms) 2748 return 0; 2749 2750 list_for_each_entry(term, &terms->terms, list) { 2751 int ret; 2752 2753 if (!first) { 2754 ret = strbuf_addch(sb, ','); 2755 if (ret < 0) 2756 return ret; 2757 } 2758 first = false; 2759 2760 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2761 if (term->no_value) { 2762 assert(term->val.num == 1); 2763 ret = strbuf_addf(sb, "%s", term->config); 2764 } else 2765 ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num); 2766 else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) { 2767 if (term->config) { 2768 ret = strbuf_addf(sb, "%s=", term->config); 2769 if (ret < 0) 2770 return ret; 2771 } else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) { 2772 ret = strbuf_addf(sb, "%s=", 2773 parse_events__term_type_str(term->type_term)); 2774 if (ret < 0) 2775 return ret; 2776 } 2777 assert(!term->no_value); 2778 ret = strbuf_addf(sb, "%s", term->val.str); 2779 } 2780 if (ret < 0) 2781 return ret; 2782 } 2783 return 0; 2784 } 2785 2786 static void config_terms_list(char *buf, size_t buf_sz) 2787 { 2788 int i; 2789 bool first = true; 2790 2791 buf[0] = '\0'; 2792 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) { 2793 const char *name = parse_events__term_type_str(i); 2794 2795 if (!config_term_avail(i, NULL)) 2796 continue; 2797 if (!name) 2798 continue; 2799 if (name[0] == '<') 2800 continue; 2801 2802 if (strlen(buf) + strlen(name) + 2 >= buf_sz) 2803 return; 2804 2805 if (!first) 2806 strcat(buf, ","); 2807 else 2808 first = false; 2809 strcat(buf, name); 2810 } 2811 } 2812 2813 /* 2814 * Return string contains valid config terms of an event. 2815 * @additional_terms: For terms such as PMU sysfs terms. 2816 */ 2817 char *parse_events_formats_error_string(char *additional_terms) 2818 { 2819 char *str; 2820 /* "no-overwrite" is the longest name */ 2821 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR * 2822 (sizeof("no-overwrite") - 1)]; 2823 2824 config_terms_list(static_terms, sizeof(static_terms)); 2825 /* valid terms */ 2826 if (additional_terms) { 2827 if (asprintf(&str, "valid terms: %s,%s", 2828 additional_terms, static_terms) < 0) 2829 goto fail; 2830 } else { 2831 if (asprintf(&str, "valid terms: %s", static_terms) < 0) 2832 goto fail; 2833 } 2834 return str; 2835 2836 fail: 2837 return NULL; 2838 } 2839