1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/hw_breakpoint.h> 3 #include <linux/err.h> 4 #include <linux/list_sort.h> 5 #include <linux/zalloc.h> 6 #include <dirent.h> 7 #include <errno.h> 8 #include <sys/ioctl.h> 9 #include <sys/param.h> 10 #include "cpumap.h" 11 #include "term.h" 12 #include "env.h" 13 #include "evlist.h" 14 #include "evsel.h" 15 #include <subcmd/parse-options.h> 16 #include "parse-events.h" 17 #include "string2.h" 18 #include "strbuf.h" 19 #include "debug.h" 20 #include <perf/cpumap.h> 21 #include <util/parse-events-bison.h> 22 #include <util/parse-events-flex.h> 23 #include "pmu.h" 24 #include "pmus.h" 25 #include "tp_pmu.h" 26 #include "asm/bug.h" 27 #include "ui/ui.h" 28 #include "util/parse-branch-options.h" 29 #include "util/evsel_config.h" 30 #include "util/event.h" 31 #include "util/bpf-filter.h" 32 #include "util/stat.h" 33 #include "util/util.h" 34 #include "tracepoint.h" 35 #include <api/fs/tracing_path.h> 36 37 #define MAX_NAME_LEN 100 38 39 static int get_config_terms(const struct parse_events_terms *head_config, 40 struct list_head *head_terms); 41 static int parse_events_terms__copy(const struct parse_events_terms *src, 42 struct parse_events_terms *dest); 43 static int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb); 44 45 static const char *const event_types[] = { 46 [PERF_TYPE_HARDWARE] = "hardware", 47 [PERF_TYPE_SOFTWARE] = "software", 48 [PERF_TYPE_TRACEPOINT] = "tracepoint", 49 [PERF_TYPE_HW_CACHE] = "hardware-cache", 50 [PERF_TYPE_RAW] = "raw", 51 [PERF_TYPE_BREAKPOINT] = "breakpoint", 52 }; 53 54 const char *event_type(size_t type) 55 { 56 if (type >= PERF_TYPE_MAX) 57 return "unknown"; 58 59 return event_types[type]; 60 } 61 62 static char *get_config_str(const struct parse_events_terms *head_terms, 63 enum parse_events__term_type type_term) 64 { 65 struct parse_events_term *term; 66 67 if (!head_terms) 68 return NULL; 69 70 list_for_each_entry(term, &head_terms->terms, list) 71 if (term->type_term == type_term) 72 return term->val.str; 73 74 return NULL; 75 } 76 77 static char *get_config_metric_id(const struct parse_events_terms *head_terms) 78 { 79 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID); 80 } 81 82 static char *get_config_name(const struct parse_events_terms *head_terms) 83 { 84 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME); 85 } 86 87 static struct perf_cpu_map *get_config_cpu(const struct parse_events_terms *head_terms, 88 bool fake_pmu) 89 { 90 struct parse_events_term *term; 91 struct perf_cpu_map *cpus = NULL; 92 93 if (!head_terms) 94 return NULL; 95 96 list_for_each_entry(term, &head_terms->terms, list) { 97 struct perf_cpu_map *term_cpus; 98 99 if (term->type_term != PARSE_EVENTS__TERM_TYPE_CPU) 100 continue; 101 102 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) { 103 term_cpus = perf_cpu_map__new_int(term->val.num); 104 } else { 105 struct perf_pmu *pmu = perf_pmus__find(term->val.str); 106 107 if (pmu) { 108 term_cpus = pmu->is_core && perf_cpu_map__is_empty(pmu->cpus) 109 ? cpu_map__online() 110 : perf_cpu_map__get(pmu->cpus); 111 } else { 112 term_cpus = perf_cpu_map__new(term->val.str); 113 if (!term_cpus && fake_pmu) { 114 /* 115 * Assume the PMU string makes sense on a different 116 * machine and fake a value with all online CPUs. 117 */ 118 term_cpus = cpu_map__online(); 119 } 120 } 121 } 122 perf_cpu_map__merge(&cpus, term_cpus); 123 perf_cpu_map__put(term_cpus); 124 } 125 126 return cpus; 127 } 128 129 /** 130 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that 131 * matches the raw's string value. If the string value matches an 132 * event then change the term to be an event, if not then change it to 133 * be a config term. For example, "read" may be an event of the PMU or 134 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of 135 * the event can be determined and we don't need to scan all PMUs 136 * ahead-of-time. 137 * @config_terms: the list of terms that may contain a raw term. 138 * @pmu: the PMU to scan for events from. 139 */ 140 static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu) 141 { 142 struct parse_events_term *term; 143 144 list_for_each_entry(term, &config_terms->terms, list) { 145 u64 num; 146 147 if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW) 148 continue; 149 150 if (perf_pmu__have_event(pmu, term->val.str)) { 151 zfree(&term->config); 152 term->config = term->val.str; 153 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 154 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 155 term->val.num = 1; 156 term->no_value = true; 157 continue; 158 } 159 160 zfree(&term->config); 161 term->config = strdup("config"); 162 errno = 0; 163 num = strtoull(term->val.str + 1, NULL, 16); 164 assert(errno == 0); 165 free(term->val.str); 166 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 167 term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG; 168 term->val.num = num; 169 term->no_value = false; 170 } 171 } 172 173 static struct evsel * 174 __add_event(struct list_head *list, int *idx, 175 struct perf_event_attr *attr, 176 bool init_attr, 177 const char *name, const char *metric_id, struct perf_pmu *pmu, 178 struct list_head *config_terms, struct evsel *first_wildcard_match, 179 struct perf_cpu_map *user_cpus, u64 alternate_hw_config) 180 { 181 struct evsel *evsel; 182 bool is_pmu_core; 183 struct perf_cpu_map *cpus, *pmu_cpus; 184 bool has_user_cpus = !perf_cpu_map__is_empty(user_cpus); 185 186 /* 187 * Ensure the first_wildcard_match's PMU matches that of the new event 188 * being added. Otherwise try to match with another event further down 189 * the evlist. 190 */ 191 if (first_wildcard_match) { 192 struct evsel *pos = list_prev_entry(first_wildcard_match, core.node); 193 194 first_wildcard_match = NULL; 195 list_for_each_entry_continue(pos, list, core.node) { 196 if (perf_pmu__name_no_suffix_match(pos->pmu, pmu->name)) { 197 first_wildcard_match = pos; 198 break; 199 } 200 if (pos->pmu->is_core && (!pmu || pmu->is_core)) { 201 first_wildcard_match = pos; 202 break; 203 } 204 } 205 } 206 207 if (pmu) { 208 perf_pmu__warn_invalid_formats(pmu); 209 if (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX) { 210 perf_pmu__warn_invalid_config(pmu, attr->config, name, 211 PERF_PMU_FORMAT_VALUE_CONFIG, "config"); 212 perf_pmu__warn_invalid_config(pmu, attr->config1, name, 213 PERF_PMU_FORMAT_VALUE_CONFIG1, "config1"); 214 perf_pmu__warn_invalid_config(pmu, attr->config2, name, 215 PERF_PMU_FORMAT_VALUE_CONFIG2, "config2"); 216 perf_pmu__warn_invalid_config(pmu, attr->config3, name, 217 PERF_PMU_FORMAT_VALUE_CONFIG3, "config3"); 218 perf_pmu__warn_invalid_config(pmu, attr->config4, name, 219 PERF_PMU_FORMAT_VALUE_CONFIG4, "config4"); 220 } 221 } 222 /* 223 * If a PMU wasn't given, such as for legacy events, find now that 224 * warnings won't be generated. 225 */ 226 if (!pmu) 227 pmu = perf_pmus__find_by_attr(attr); 228 229 if (pmu) { 230 is_pmu_core = pmu->is_core; 231 pmu_cpus = perf_cpu_map__get(pmu->cpus); 232 if (perf_cpu_map__is_empty(pmu_cpus)) 233 pmu_cpus = cpu_map__online(); 234 } else { 235 is_pmu_core = (attr->type == PERF_TYPE_HARDWARE || 236 attr->type == PERF_TYPE_HW_CACHE); 237 pmu_cpus = is_pmu_core ? cpu_map__online() : NULL; 238 } 239 240 if (has_user_cpus) 241 cpus = perf_cpu_map__get(user_cpus); 242 else 243 cpus = perf_cpu_map__get(pmu_cpus); 244 245 if (init_attr) 246 event_attr_init(attr); 247 248 evsel = evsel__new_idx(attr, *idx); 249 if (!evsel) { 250 perf_cpu_map__put(cpus); 251 perf_cpu_map__put(pmu_cpus); 252 return NULL; 253 } 254 255 if (name) { 256 evsel->name = strdup(name); 257 if (!evsel->name) 258 goto out_err; 259 } 260 261 if (metric_id) { 262 evsel->metric_id = strdup(metric_id); 263 if (!evsel->metric_id) 264 goto out_err; 265 } 266 267 (*idx)++; 268 evsel->core.cpus = cpus; 269 evsel->core.pmu_cpus = pmu_cpus; 270 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false; 271 evsel->core.is_pmu_core = is_pmu_core; 272 evsel->core.reads_only_on_cpu_idx0 = perf_pmu__reads_only_on_cpu_idx0(attr); 273 evsel->pmu = pmu; 274 evsel->alternate_hw_config = alternate_hw_config; 275 evsel->first_wildcard_match = first_wildcard_match; 276 277 if (config_terms) 278 list_splice_init(config_terms, &evsel->config_terms); 279 280 if (list) 281 list_add_tail(&evsel->core.node, list); 282 283 if (has_user_cpus) 284 evsel__warn_user_requested_cpus(evsel, user_cpus); 285 286 return evsel; 287 out_err: 288 perf_cpu_map__put(cpus); 289 perf_cpu_map__put(pmu_cpus); 290 zfree(&evsel->name); 291 zfree(&evsel->metric_id); 292 free(evsel); 293 return NULL; 294 } 295 296 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr, 297 const char *name, const char *metric_id, 298 struct perf_pmu *pmu) 299 { 300 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name, 301 metric_id, pmu, /*config_terms=*/NULL, 302 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL, 303 /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 304 } 305 306 static int add_event(struct list_head *list, int *idx, 307 struct perf_event_attr *attr, const char *name, 308 const char *metric_id, struct list_head *config_terms, 309 u64 alternate_hw_config) 310 { 311 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id, 312 /*pmu=*/NULL, config_terms, 313 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL, 314 alternate_hw_config) ? 0 : -ENOMEM; 315 } 316 317 /** 318 * parse_aliases - search names for entries beginning or equalling str ignoring 319 * case. If mutliple entries in names match str then the longest 320 * is chosen. 321 * @str: The needle to look for. 322 * @names: The haystack to search. 323 * @size: The size of the haystack. 324 * @longest: Out argument giving the length of the matching entry. 325 */ 326 static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size, 327 int *longest) 328 { 329 *longest = -1; 330 for (int i = 0; i < size; i++) { 331 for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) { 332 int n = strlen(names[i][j]); 333 334 if (n > *longest && !strncasecmp(str, names[i][j], n)) 335 *longest = n; 336 } 337 if (*longest > 0) 338 return i; 339 } 340 341 return -1; 342 } 343 344 typedef int config_term_func_t(struct perf_event_attr *attr, 345 struct parse_events_term *term, 346 struct parse_events_state *parse_state); 347 static int config_term_common(struct perf_event_attr *attr, 348 struct parse_events_term *term, 349 struct parse_events_state *parse_state); 350 static int config_attr(struct perf_event_attr *attr, 351 const struct parse_events_terms *head, 352 struct parse_events_state *parse_state, 353 config_term_func_t config_term); 354 355 /** 356 * parse_events__decode_legacy_cache - Search name for the legacy cache event 357 * name composed of 1, 2 or 3 hyphen 358 * separated sections. The first section is 359 * the cache type while the others are the 360 * optional op and optional result. To make 361 * life hard the names in the table also 362 * contain hyphens and the longest name 363 * should always be selected. 364 */ 365 int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config) 366 { 367 int len, cache_type = -1, cache_op = -1, cache_result = -1; 368 const char *name_end = &name[strlen(name) + 1]; 369 const char *str = name; 370 371 cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len); 372 if (cache_type == -1) 373 return -EINVAL; 374 str += len + 1; 375 376 if (str < name_end) { 377 cache_op = parse_aliases(str, evsel__hw_cache_op, 378 PERF_COUNT_HW_CACHE_OP_MAX, &len); 379 if (cache_op >= 0) { 380 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 381 return -EINVAL; 382 str += len + 1; 383 } else { 384 cache_result = parse_aliases(str, evsel__hw_cache_result, 385 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 386 if (cache_result >= 0) 387 str += len + 1; 388 } 389 } 390 if (str < name_end) { 391 if (cache_op < 0) { 392 cache_op = parse_aliases(str, evsel__hw_cache_op, 393 PERF_COUNT_HW_CACHE_OP_MAX, &len); 394 if (cache_op >= 0) { 395 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 396 return -EINVAL; 397 } 398 } else if (cache_result < 0) { 399 cache_result = parse_aliases(str, evsel__hw_cache_result, 400 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 401 } 402 } 403 404 /* 405 * Fall back to reads: 406 */ 407 if (cache_op == -1) 408 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 409 410 /* 411 * Fall back to accesses: 412 */ 413 if (cache_result == -1) 414 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 415 416 *config = cache_type | (cache_op << 8) | (cache_result << 16); 417 if (perf_pmus__supports_extended_type()) 418 *config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT; 419 return 0; 420 } 421 422 /** 423 * parse_events__filter_pmu - returns false if a wildcard PMU should be 424 * considered, true if it should be filtered. 425 */ 426 bool parse_events__filter_pmu(const struct parse_events_state *parse_state, 427 const struct perf_pmu *pmu) 428 { 429 if (parse_state->pmu_filter == NULL) 430 return false; 431 432 return perf_pmu__wildcard_match(pmu, parse_state->pmu_filter) == 0; 433 } 434 435 static int parse_events_add_pmu(struct parse_events_state *parse_state, 436 struct list_head *list, struct perf_pmu *pmu, 437 const struct parse_events_terms *const_parsed_terms, 438 struct evsel *first_wildcard_match); 439 440 static void tracepoint_error(struct parse_events_error *e, int err, 441 const char *sys, const char *name, int column) 442 { 443 const char *str; 444 char help[BUFSIZ]; 445 446 if (!e) 447 return; 448 449 /* 450 * We get error directly from syscall errno ( > 0), 451 * or from encoded pointer's error ( < 0). 452 */ 453 err = abs(err); 454 455 switch (err) { 456 case EACCES: 457 str = "can't access trace events"; 458 break; 459 case ENOENT: 460 str = "unknown tracepoint"; 461 break; 462 default: 463 str = "failed to add tracepoint"; 464 break; 465 } 466 467 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name); 468 parse_events_error__handle(e, column, strdup(str), strdup(help)); 469 } 470 471 static int add_tracepoint(struct parse_events_state *parse_state, 472 struct list_head *list, 473 const char *sys_name, const char *evt_name, 474 struct parse_events_error *err, 475 struct parse_events_terms *head_config, void *loc_) 476 { 477 YYLTYPE *loc = loc_; 478 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++, 479 !parse_state->fake_tp); 480 481 if (IS_ERR(evsel)) { 482 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column); 483 return PTR_ERR(evsel); 484 } 485 486 if (head_config) { 487 LIST_HEAD(config_terms); 488 489 if (get_config_terms(head_config, &config_terms)) 490 return -ENOMEM; 491 list_splice(&config_terms, &evsel->config_terms); 492 } 493 494 list_add_tail(&evsel->core.node, list); 495 return 0; 496 } 497 498 struct add_tracepoint_multi_args { 499 struct parse_events_state *parse_state; 500 struct list_head *list; 501 const char *sys_glob; 502 const char *evt_glob; 503 struct parse_events_error *err; 504 struct parse_events_terms *head_config; 505 YYLTYPE *loc; 506 int found; 507 }; 508 509 static int add_tracepoint_multi_event_cb(void *state, const char *sys_name, const char *evt_name) 510 { 511 struct add_tracepoint_multi_args *args = state; 512 int ret; 513 514 if (!strglobmatch(evt_name, args->evt_glob)) 515 return 0; 516 517 args->found++; 518 ret = add_tracepoint(args->parse_state, args->list, sys_name, evt_name, 519 args->err, args->head_config, args->loc); 520 521 return ret; 522 } 523 524 static int add_tracepoint_multi_event(struct add_tracepoint_multi_args *args, const char *sys_name) 525 { 526 if (strpbrk(args->evt_glob, "*?") == NULL) { 527 /* Not a glob. */ 528 args->found++; 529 return add_tracepoint(args->parse_state, args->list, sys_name, args->evt_glob, 530 args->err, args->head_config, args->loc); 531 } 532 533 return tp_pmu__for_each_tp_event(sys_name, args, add_tracepoint_multi_event_cb); 534 } 535 536 static int add_tracepoint_multi_sys_cb(void *state, const char *sys_name) 537 { 538 struct add_tracepoint_multi_args *args = state; 539 540 if (!strglobmatch(sys_name, args->sys_glob)) 541 return 0; 542 543 return add_tracepoint_multi_event(args, sys_name); 544 } 545 546 static int add_tracepoint_multi_sys(struct parse_events_state *parse_state, 547 struct list_head *list, 548 const char *sys_glob, const char *evt_glob, 549 struct parse_events_error *err, 550 struct parse_events_terms *head_config, YYLTYPE *loc) 551 { 552 struct add_tracepoint_multi_args args = { 553 .parse_state = parse_state, 554 .list = list, 555 .sys_glob = sys_glob, 556 .evt_glob = evt_glob, 557 .err = err, 558 .head_config = head_config, 559 .loc = loc, 560 .found = 0, 561 }; 562 int ret; 563 564 if (strpbrk(sys_glob, "*?") == NULL) { 565 /* Not a glob. */ 566 ret = add_tracepoint_multi_event(&args, sys_glob); 567 } else { 568 ret = tp_pmu__for_each_tp_sys(&args, add_tracepoint_multi_sys_cb); 569 } 570 if (args.found == 0) { 571 tracepoint_error(err, ENOENT, sys_glob, evt_glob, loc->first_column); 572 return -ENOENT; 573 } 574 return ret; 575 } 576 577 size_t default_breakpoint_len(void) 578 { 579 #if defined(__i386__) 580 static int len; 581 582 if (len == 0) { 583 struct perf_env env = {}; 584 585 perf_env__init(&env); 586 len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long); 587 perf_env__exit(&env); 588 } 589 return len; 590 #elif defined(__aarch64__) 591 return 4; 592 #else 593 return sizeof(long); 594 #endif 595 } 596 597 static int 598 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 599 { 600 int i; 601 602 for (i = 0; i < 3; i++) { 603 if (!type || !type[i]) 604 break; 605 606 #define CHECK_SET_TYPE(bit) \ 607 do { \ 608 if (attr->bp_type & bit) \ 609 return -EINVAL; \ 610 else \ 611 attr->bp_type |= bit; \ 612 } while (0) 613 614 switch (type[i]) { 615 case 'r': 616 CHECK_SET_TYPE(HW_BREAKPOINT_R); 617 break; 618 case 'w': 619 CHECK_SET_TYPE(HW_BREAKPOINT_W); 620 break; 621 case 'x': 622 CHECK_SET_TYPE(HW_BREAKPOINT_X); 623 break; 624 default: 625 return -EINVAL; 626 } 627 } 628 629 #undef CHECK_SET_TYPE 630 631 if (!attr->bp_type) /* Default */ 632 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 633 634 return 0; 635 } 636 637 int parse_events_add_breakpoint(struct parse_events_state *parse_state, 638 struct list_head *list, 639 u64 addr, char *type, u64 len, 640 struct parse_events_terms *head_config) 641 { 642 struct perf_event_attr attr; 643 LIST_HEAD(config_terms); 644 const char *name; 645 646 memset(&attr, 0, sizeof(attr)); 647 attr.bp_addr = addr; 648 649 if (parse_breakpoint_type(type, &attr)) 650 return -EINVAL; 651 652 /* Provide some defaults if len is not specified */ 653 if (!len) { 654 if (attr.bp_type == HW_BREAKPOINT_X) 655 len = default_breakpoint_len(); 656 else 657 len = HW_BREAKPOINT_LEN_4; 658 } 659 660 attr.bp_len = len; 661 662 attr.type = PERF_TYPE_BREAKPOINT; 663 attr.sample_period = 1; 664 665 if (head_config) { 666 if (config_attr(&attr, head_config, parse_state, config_term_common)) 667 return -EINVAL; 668 669 if (get_config_terms(head_config, &config_terms)) 670 return -ENOMEM; 671 } 672 673 name = get_config_name(head_config); 674 675 return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL, 676 &config_terms, /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 677 } 678 679 static int check_type_val(struct parse_events_term *term, 680 struct parse_events_error *err, 681 enum parse_events__term_val_type type) 682 { 683 if (type == term->type_val) 684 return 0; 685 686 if (err) { 687 parse_events_error__handle(err, term->err_val, 688 type == PARSE_EVENTS__TERM_TYPE_NUM 689 ? strdup("expected numeric value") 690 : strdup("expected string value"), 691 NULL); 692 } 693 return -EINVAL; 694 } 695 696 static bool config_term_shrinked; 697 698 const char *parse_events__term_type_str(enum parse_events__term_type term_type) 699 { 700 /* 701 * Update according to parse-events.l 702 */ 703 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { 704 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>", 705 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config", 706 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1", 707 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2", 708 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3", 709 [PARSE_EVENTS__TERM_TYPE_CONFIG4] = "config4", 710 [PARSE_EVENTS__TERM_TYPE_NAME] = "name", 711 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period", 712 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq", 713 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type", 714 [PARSE_EVENTS__TERM_TYPE_TIME] = "time", 715 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph", 716 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size", 717 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", 718 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", 719 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack", 720 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr", 721 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite", 722 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite", 723 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config", 724 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore", 725 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output", 726 [PARSE_EVENTS__TERM_TYPE_AUX_ACTION] = "aux-action", 727 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size", 728 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id", 729 [PARSE_EVENTS__TERM_TYPE_RAW] = "raw", 730 [PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG] = "legacy-hardware-config", 731 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG] = "legacy-cache-config", 732 [PARSE_EVENTS__TERM_TYPE_CPU] = "cpu", 733 [PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV] = "ratio-to-prev", 734 }; 735 if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR) 736 return "unknown term"; 737 738 return config_term_names[term_type]; 739 } 740 741 static bool 742 config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err) 743 { 744 char *err_str; 745 746 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) { 747 parse_events_error__handle(err, -1, 748 strdup("Invalid term_type"), NULL); 749 return false; 750 } 751 if (!config_term_shrinked) 752 return true; 753 754 switch (term_type) { 755 case PARSE_EVENTS__TERM_TYPE_CONFIG: 756 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 757 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 758 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 759 case PARSE_EVENTS__TERM_TYPE_CONFIG4: 760 case PARSE_EVENTS__TERM_TYPE_NAME: 761 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 762 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 763 case PARSE_EVENTS__TERM_TYPE_PERCORE: 764 case PARSE_EVENTS__TERM_TYPE_CPU: 765 return true; 766 case PARSE_EVENTS__TERM_TYPE_USER: 767 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 768 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 769 case PARSE_EVENTS__TERM_TYPE_TIME: 770 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 771 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 772 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 773 case PARSE_EVENTS__TERM_TYPE_INHERIT: 774 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 775 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 776 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 777 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 778 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 779 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 780 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 781 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 782 case PARSE_EVENTS__TERM_TYPE_RAW: 783 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV: 784 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG: 785 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG: 786 default: 787 if (!err) 788 return false; 789 790 /* term_type is validated so indexing is safe */ 791 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'", 792 parse_events__term_type_str(term_type)) >= 0) 793 parse_events_error__handle(err, -1, err_str, NULL); 794 return false; 795 } 796 } 797 798 void parse_events__shrink_config_terms(void) 799 { 800 config_term_shrinked = true; 801 } 802 803 static int config_term_common(struct perf_event_attr *attr, 804 struct parse_events_term *term, 805 struct parse_events_state *parse_state) 806 { 807 #define CHECK_TYPE_VAL(type) \ 808 do { \ 809 if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_ ## type)) \ 810 return -EINVAL; \ 811 } while (0) 812 813 switch (term->type_term) { 814 case PARSE_EVENTS__TERM_TYPE_CONFIG: 815 CHECK_TYPE_VAL(NUM); 816 attr->config = term->val.num; 817 break; 818 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 819 CHECK_TYPE_VAL(NUM); 820 attr->config1 = term->val.num; 821 break; 822 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 823 CHECK_TYPE_VAL(NUM); 824 attr->config2 = term->val.num; 825 break; 826 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 827 CHECK_TYPE_VAL(NUM); 828 attr->config3 = term->val.num; 829 break; 830 case PARSE_EVENTS__TERM_TYPE_CONFIG4: 831 CHECK_TYPE_VAL(NUM); 832 attr->config4 = term->val.num; 833 break; 834 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 835 CHECK_TYPE_VAL(NUM); 836 break; 837 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 838 CHECK_TYPE_VAL(NUM); 839 break; 840 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 841 CHECK_TYPE_VAL(STR); 842 if (strcmp(term->val.str, "no") && 843 parse_branch_str(term->val.str, 844 &attr->branch_sample_type)) { 845 parse_events_error__handle(parse_state->error, term->err_val, 846 strdup("invalid branch sample type"), 847 NULL); 848 return -EINVAL; 849 } 850 break; 851 case PARSE_EVENTS__TERM_TYPE_TIME: 852 CHECK_TYPE_VAL(NUM); 853 if (term->val.num > 1) { 854 parse_events_error__handle(parse_state->error, term->err_val, 855 strdup("expected 0 or 1"), 856 NULL); 857 return -EINVAL; 858 } 859 break; 860 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 861 CHECK_TYPE_VAL(STR); 862 break; 863 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 864 CHECK_TYPE_VAL(NUM); 865 break; 866 case PARSE_EVENTS__TERM_TYPE_INHERIT: 867 CHECK_TYPE_VAL(NUM); 868 break; 869 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 870 CHECK_TYPE_VAL(NUM); 871 break; 872 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 873 CHECK_TYPE_VAL(NUM); 874 break; 875 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 876 CHECK_TYPE_VAL(NUM); 877 break; 878 case PARSE_EVENTS__TERM_TYPE_NAME: 879 CHECK_TYPE_VAL(STR); 880 break; 881 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 882 CHECK_TYPE_VAL(STR); 883 break; 884 case PARSE_EVENTS__TERM_TYPE_RAW: 885 CHECK_TYPE_VAL(STR); 886 break; 887 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 888 CHECK_TYPE_VAL(NUM); 889 break; 890 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 891 CHECK_TYPE_VAL(NUM); 892 break; 893 case PARSE_EVENTS__TERM_TYPE_PERCORE: 894 CHECK_TYPE_VAL(NUM); 895 if ((unsigned int)term->val.num > 1) { 896 parse_events_error__handle(parse_state->error, term->err_val, 897 strdup("expected 0 or 1"), 898 NULL); 899 return -EINVAL; 900 } 901 break; 902 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 903 CHECK_TYPE_VAL(NUM); 904 break; 905 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 906 CHECK_TYPE_VAL(STR); 907 break; 908 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 909 CHECK_TYPE_VAL(NUM); 910 if (term->val.num > UINT_MAX) { 911 parse_events_error__handle(parse_state->error, term->err_val, 912 strdup("too big"), 913 NULL); 914 return -EINVAL; 915 } 916 break; 917 case PARSE_EVENTS__TERM_TYPE_CPU: { 918 struct perf_cpu_map *map; 919 920 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) { 921 if (term->val.num >= (u64)cpu__max_present_cpu().cpu) { 922 parse_events_error__handle(parse_state->error, term->err_val, 923 strdup("too big"), 924 /*help=*/NULL); 925 return -EINVAL; 926 } 927 break; 928 } 929 assert(term->type_val == PARSE_EVENTS__TERM_TYPE_STR); 930 if (perf_pmus__find(term->val.str) != NULL) 931 break; 932 933 map = perf_cpu_map__new(term->val.str); 934 if (!map && !parse_state->fake_pmu) { 935 parse_events_error__handle(parse_state->error, term->err_val, 936 strdup("not a valid PMU or CPU number"), 937 /*help=*/NULL); 938 return -EINVAL; 939 } 940 perf_cpu_map__put(map); 941 break; 942 } 943 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV: 944 CHECK_TYPE_VAL(STR); 945 if (strtod(term->val.str, NULL) <= 0) { 946 parse_events_error__handle(parse_state->error, term->err_val, 947 strdup("zero or negative"), 948 NULL); 949 return -EINVAL; 950 } 951 if (errno == ERANGE) { 952 parse_events_error__handle(parse_state->error, term->err_val, 953 strdup("too big"), 954 NULL); 955 return -EINVAL; 956 } 957 break; 958 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 959 case PARSE_EVENTS__TERM_TYPE_USER: 960 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG: 961 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG: 962 default: 963 parse_events_error__handle(parse_state->error, term->err_term, 964 strdup(parse_events__term_type_str(term->type_term)), 965 parse_events_formats_error_string(NULL)); 966 return -EINVAL; 967 } 968 969 /* 970 * Check term availability after basic checking so 971 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered. 972 * 973 * If check availability at the entry of this function, 974 * user will see "'<sysfs term>' is not usable in 'perf stat'" 975 * if an invalid config term is provided for legacy events 976 * (for example, instructions/badterm/...), which is confusing. 977 */ 978 if (!config_term_avail(term->type_term, parse_state->error)) 979 return -EINVAL; 980 return 0; 981 #undef CHECK_TYPE_VAL 982 } 983 984 static bool check_pmu_is_core(__u32 type, const struct parse_events_term *term, 985 struct parse_events_error *err) 986 { 987 struct perf_pmu *pmu = NULL; 988 989 /* Avoid loading all PMUs with perf_pmus__find_by_type, just scan the core ones. */ 990 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 991 if (pmu->type == type) 992 return true; 993 } 994 parse_events_error__handle(err, term->err_val, 995 strdup("needs a core PMU"), 996 NULL); 997 return false; 998 } 999 1000 static int config_term_pmu(struct perf_event_attr *attr, 1001 struct parse_events_term *term, 1002 struct parse_events_state *parse_state) 1003 { 1004 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG) { 1005 if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_NUM)) 1006 return -EINVAL; 1007 if (term->val.num >= PERF_COUNT_HW_MAX) { 1008 parse_events_error__handle(parse_state->error, term->err_val, 1009 strdup("too big"), 1010 NULL); 1011 return -EINVAL; 1012 } 1013 if (!check_pmu_is_core(attr->type, term, parse_state->error)) 1014 return -EINVAL; 1015 attr->config = term->val.num; 1016 if (perf_pmus__supports_extended_type()) 1017 attr->config |= (__u64)attr->type << PERF_PMU_TYPE_SHIFT; 1018 attr->type = PERF_TYPE_HARDWARE; 1019 return 0; 1020 } 1021 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG) { 1022 int cache_type, cache_op, cache_result; 1023 1024 if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_NUM)) 1025 return -EINVAL; 1026 cache_type = term->val.num & 0xFF; 1027 cache_op = (term->val.num >> 8) & 0xFF; 1028 cache_result = (term->val.num >> 16) & 0xFF; 1029 if ((term->val.num & ~0xFFFFFF) || 1030 cache_type >= PERF_COUNT_HW_CACHE_MAX || 1031 cache_op >= PERF_COUNT_HW_CACHE_OP_MAX || 1032 cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) { 1033 parse_events_error__handle(parse_state->error, term->err_val, 1034 strdup("too big"), 1035 NULL); 1036 return -EINVAL; 1037 } 1038 if (!check_pmu_is_core(attr->type, term, parse_state->error)) 1039 return -EINVAL; 1040 attr->config = term->val.num; 1041 if (perf_pmus__supports_extended_type()) 1042 attr->config |= (__u64)attr->type << PERF_PMU_TYPE_SHIFT; 1043 attr->type = PERF_TYPE_HW_CACHE; 1044 return 0; 1045 } 1046 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || 1047 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) { 1048 /* 1049 * Always succeed for sysfs terms, as we dont know 1050 * at this point what type they need to have. 1051 */ 1052 return 0; 1053 } 1054 return config_term_common(attr, term, parse_state); 1055 } 1056 1057 static int config_term_tracepoint(struct perf_event_attr *attr, 1058 struct parse_events_term *term, 1059 struct parse_events_state *parse_state) 1060 { 1061 switch (term->type_term) { 1062 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1063 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1064 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1065 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1066 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1067 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1068 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1069 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1070 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1071 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1072 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1073 return config_term_common(attr, term, parse_state); 1074 case PARSE_EVENTS__TERM_TYPE_USER: 1075 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1076 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1077 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1078 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1079 case PARSE_EVENTS__TERM_TYPE_CONFIG4: 1080 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG: 1081 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG: 1082 case PARSE_EVENTS__TERM_TYPE_NAME: 1083 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1084 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1085 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1086 case PARSE_EVENTS__TERM_TYPE_TIME: 1087 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1088 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1089 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1090 case PARSE_EVENTS__TERM_TYPE_RAW: 1091 case PARSE_EVENTS__TERM_TYPE_CPU: 1092 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV: 1093 default: 1094 parse_events_error__handle(parse_state->error, term->err_term, 1095 strdup(parse_events__term_type_str(term->type_term)), 1096 strdup("valid terms: call-graph,stack-size\n") 1097 ); 1098 return -EINVAL; 1099 } 1100 1101 return 0; 1102 } 1103 1104 static int config_attr(struct perf_event_attr *attr, 1105 const struct parse_events_terms *head, 1106 struct parse_events_state *parse_state, 1107 config_term_func_t config_term) 1108 { 1109 struct parse_events_term *term; 1110 1111 list_for_each_entry(term, &head->terms, list) 1112 if (config_term(attr, term, parse_state)) 1113 return -EINVAL; 1114 1115 return 0; 1116 } 1117 1118 static struct evsel_config_term *add_config_term(enum evsel_term_type type, 1119 struct list_head *head_terms, 1120 bool weak, char *str, u64 val) 1121 { 1122 struct evsel_config_term *t; 1123 1124 t = zalloc(sizeof(*t)); 1125 if (!t) 1126 return NULL; 1127 1128 INIT_LIST_HEAD(&t->list); 1129 t->type = type; 1130 t->weak = weak; 1131 1132 switch (type) { 1133 case EVSEL__CONFIG_TERM_PERIOD: 1134 case EVSEL__CONFIG_TERM_FREQ: 1135 case EVSEL__CONFIG_TERM_STACK_USER: 1136 case EVSEL__CONFIG_TERM_USR_CHG_CONFIG: 1137 case EVSEL__CONFIG_TERM_USR_CHG_CONFIG1: 1138 case EVSEL__CONFIG_TERM_USR_CHG_CONFIG2: 1139 case EVSEL__CONFIG_TERM_USR_CHG_CONFIG3: 1140 case EVSEL__CONFIG_TERM_USR_CHG_CONFIG4: 1141 t->val.val = val; 1142 break; 1143 case EVSEL__CONFIG_TERM_TIME: 1144 t->val.time = val; 1145 break; 1146 case EVSEL__CONFIG_TERM_INHERIT: 1147 t->val.inherit = val; 1148 break; 1149 case EVSEL__CONFIG_TERM_OVERWRITE: 1150 t->val.overwrite = val; 1151 break; 1152 case EVSEL__CONFIG_TERM_MAX_STACK: 1153 t->val.max_stack = val; 1154 break; 1155 case EVSEL__CONFIG_TERM_MAX_EVENTS: 1156 t->val.max_events = val; 1157 break; 1158 case EVSEL__CONFIG_TERM_PERCORE: 1159 t->val.percore = val; 1160 break; 1161 case EVSEL__CONFIG_TERM_AUX_OUTPUT: 1162 t->val.aux_output = val; 1163 break; 1164 case EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE: 1165 t->val.aux_sample_size = val; 1166 break; 1167 case EVSEL__CONFIG_TERM_CALLGRAPH: 1168 case EVSEL__CONFIG_TERM_BRANCH: 1169 case EVSEL__CONFIG_TERM_DRV_CFG: 1170 case EVSEL__CONFIG_TERM_RATIO_TO_PREV: 1171 case EVSEL__CONFIG_TERM_AUX_ACTION: 1172 if (str) { 1173 t->val.str = strdup(str); 1174 if (!t->val.str) { 1175 zfree(&t); 1176 return NULL; 1177 } 1178 t->free_str = true; 1179 } 1180 break; 1181 default: 1182 t->val.val = val; 1183 break; 1184 } 1185 1186 list_add_tail(&t->list, head_terms); 1187 return t; 1188 } 1189 1190 static int get_config_terms(const struct parse_events_terms *head_config, 1191 struct list_head *head_terms) 1192 { 1193 struct parse_events_term *term; 1194 1195 list_for_each_entry(term, &head_config->terms, list) { 1196 struct evsel_config_term *new_term; 1197 enum evsel_term_type new_type; 1198 bool str_type = false; 1199 u64 val = 0; 1200 1201 switch (term->type_term) { 1202 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1203 new_type = EVSEL__CONFIG_TERM_PERIOD; 1204 val = term->val.num; 1205 break; 1206 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1207 new_type = EVSEL__CONFIG_TERM_FREQ; 1208 val = term->val.num; 1209 break; 1210 case PARSE_EVENTS__TERM_TYPE_TIME: 1211 new_type = EVSEL__CONFIG_TERM_TIME; 1212 val = term->val.num; 1213 break; 1214 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1215 new_type = EVSEL__CONFIG_TERM_CALLGRAPH; 1216 str_type = true; 1217 break; 1218 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1219 new_type = EVSEL__CONFIG_TERM_BRANCH; 1220 str_type = true; 1221 break; 1222 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1223 new_type = EVSEL__CONFIG_TERM_STACK_USER; 1224 val = term->val.num; 1225 break; 1226 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1227 new_type = EVSEL__CONFIG_TERM_INHERIT; 1228 val = term->val.num ? 1 : 0; 1229 break; 1230 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1231 new_type = EVSEL__CONFIG_TERM_INHERIT; 1232 val = term->val.num ? 0 : 1; 1233 break; 1234 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1235 new_type = EVSEL__CONFIG_TERM_MAX_STACK; 1236 val = term->val.num; 1237 break; 1238 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1239 new_type = EVSEL__CONFIG_TERM_MAX_EVENTS; 1240 val = term->val.num; 1241 break; 1242 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1243 new_type = EVSEL__CONFIG_TERM_OVERWRITE; 1244 val = term->val.num ? 1 : 0; 1245 break; 1246 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1247 new_type = EVSEL__CONFIG_TERM_OVERWRITE; 1248 val = term->val.num ? 0 : 1; 1249 break; 1250 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1251 new_type = EVSEL__CONFIG_TERM_DRV_CFG; 1252 str_type = true; 1253 break; 1254 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1255 new_type = EVSEL__CONFIG_TERM_PERCORE; 1256 val = term->val.num ? true : false; 1257 break; 1258 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1259 new_type = EVSEL__CONFIG_TERM_AUX_OUTPUT; 1260 val = term->val.num ? 1 : 0; 1261 break; 1262 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1263 new_type = EVSEL__CONFIG_TERM_AUX_ACTION; 1264 str_type = true; 1265 break; 1266 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1267 new_type = EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE; 1268 val = term->val.num; 1269 break; 1270 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV: 1271 new_type = EVSEL__CONFIG_TERM_RATIO_TO_PREV; 1272 str_type = true; 1273 break; 1274 case PARSE_EVENTS__TERM_TYPE_USER: 1275 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1276 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1277 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1278 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1279 case PARSE_EVENTS__TERM_TYPE_CONFIG4: 1280 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG: 1281 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG: 1282 case PARSE_EVENTS__TERM_TYPE_NAME: 1283 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1284 case PARSE_EVENTS__TERM_TYPE_RAW: 1285 case PARSE_EVENTS__TERM_TYPE_CPU: 1286 default: 1287 /* Don't add a new term for these ones */ 1288 continue; 1289 } 1290 1291 /* 1292 * Note: Members evsel_config_term::val and 1293 * parse_events_term::val are unions and endianness needs 1294 * to be taken into account when changing such union members. 1295 */ 1296 new_term = add_config_term(new_type, head_terms, term->weak, 1297 str_type ? term->val.str : NULL, val); 1298 if (!new_term) 1299 return -ENOMEM; 1300 } 1301 return 0; 1302 } 1303 1304 static int add_cfg_chg(const struct perf_pmu *pmu, 1305 const struct parse_events_terms *head_config, 1306 struct list_head *head_terms, 1307 int format_type, 1308 enum parse_events__term_type term_type, 1309 enum evsel_term_type new_term_type) 1310 { 1311 struct parse_events_term *term; 1312 u64 bits = 0; 1313 int type; 1314 1315 list_for_each_entry(term, &head_config->terms, list) { 1316 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER) { 1317 type = perf_pmu__format_type(pmu, term->config); 1318 if (type != format_type) 1319 continue; 1320 bits |= perf_pmu__format_bits(pmu, term->config); 1321 } else if (term->type_term == term_type) { 1322 bits = ~(u64)0; 1323 } 1324 } 1325 1326 if (bits) { 1327 struct evsel_config_term *new_term; 1328 1329 new_term = add_config_term(new_term_type, head_terms, false, NULL, bits); 1330 if (!new_term) 1331 return -ENOMEM; 1332 } 1333 1334 return 0; 1335 } 1336 1337 /* 1338 * Add EVSEL__CONFIG_TERM_USR_CFG_CONFIGn where cfg_chg will have a bit set for 1339 * each bit of attr->configN that the user has changed. 1340 */ 1341 static int get_config_chgs(const struct perf_pmu *pmu, 1342 const struct parse_events_terms *head_config, 1343 struct list_head *head_terms) 1344 { 1345 int ret; 1346 1347 ret = add_cfg_chg(pmu, head_config, head_terms, 1348 PERF_PMU_FORMAT_VALUE_CONFIG, 1349 PARSE_EVENTS__TERM_TYPE_CONFIG, 1350 EVSEL__CONFIG_TERM_USR_CHG_CONFIG); 1351 if (ret) 1352 return ret; 1353 1354 ret = add_cfg_chg(pmu, head_config, head_terms, 1355 PERF_PMU_FORMAT_VALUE_CONFIG1, 1356 PARSE_EVENTS__TERM_TYPE_CONFIG1, 1357 EVSEL__CONFIG_TERM_USR_CHG_CONFIG1); 1358 if (ret) 1359 return ret; 1360 1361 ret = add_cfg_chg(pmu, head_config, head_terms, 1362 PERF_PMU_FORMAT_VALUE_CONFIG2, 1363 PARSE_EVENTS__TERM_TYPE_CONFIG2, 1364 EVSEL__CONFIG_TERM_USR_CHG_CONFIG2); 1365 if (ret) 1366 return ret; 1367 1368 ret = add_cfg_chg(pmu, head_config, head_terms, 1369 PERF_PMU_FORMAT_VALUE_CONFIG3, 1370 PARSE_EVENTS__TERM_TYPE_CONFIG3, 1371 EVSEL__CONFIG_TERM_USR_CHG_CONFIG3); 1372 if (ret) 1373 return ret; 1374 1375 return add_cfg_chg(pmu, head_config, head_terms, 1376 PERF_PMU_FORMAT_VALUE_CONFIG4, 1377 PARSE_EVENTS__TERM_TYPE_CONFIG4, 1378 EVSEL__CONFIG_TERM_USR_CHG_CONFIG4); 1379 } 1380 1381 int parse_events_add_tracepoint(struct parse_events_state *parse_state, 1382 struct list_head *list, 1383 const char *sys, const char *event, 1384 struct parse_events_error *err, 1385 struct parse_events_terms *head_config, void *loc_) 1386 { 1387 YYLTYPE *loc = loc_; 1388 1389 if (head_config) { 1390 struct perf_event_attr attr; 1391 1392 if (config_attr(&attr, head_config, parse_state, config_term_tracepoint)) 1393 return -EINVAL; 1394 } 1395 1396 return add_tracepoint_multi_sys(parse_state, list, sys, event, 1397 err, head_config, loc); 1398 } 1399 1400 static int __parse_events_add_numeric(struct parse_events_state *parse_state, 1401 struct list_head *list, 1402 struct perf_pmu *pmu, u32 type, u32 extended_type, 1403 u64 config, const struct parse_events_terms *head_config, 1404 struct evsel *first_wildcard_match) 1405 { 1406 struct perf_event_attr attr; 1407 LIST_HEAD(config_terms); 1408 const char *name, *metric_id; 1409 struct perf_cpu_map *cpus; 1410 int ret; 1411 1412 memset(&attr, 0, sizeof(attr)); 1413 attr.type = type; 1414 attr.config = config; 1415 if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) { 1416 assert(perf_pmus__supports_extended_type()); 1417 attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT; 1418 } 1419 1420 if (head_config) { 1421 if (config_attr(&attr, head_config, parse_state, config_term_common)) 1422 return -EINVAL; 1423 1424 if (get_config_terms(head_config, &config_terms)) 1425 return -ENOMEM; 1426 } 1427 1428 name = get_config_name(head_config); 1429 metric_id = get_config_metric_id(head_config); 1430 cpus = get_config_cpu(head_config, parse_state->fake_pmu); 1431 ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name, 1432 metric_id, pmu, &config_terms, first_wildcard_match, 1433 cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) ? 0 : -ENOMEM; 1434 perf_cpu_map__put(cpus); 1435 free_config_terms(&config_terms); 1436 return ret; 1437 } 1438 1439 int parse_events_add_numeric(struct parse_events_state *parse_state, 1440 struct list_head *list, 1441 u32 type, u64 config, 1442 const struct parse_events_terms *head_config, 1443 bool wildcard) 1444 { 1445 struct perf_pmu *pmu = NULL; 1446 bool found_supported = false; 1447 1448 /* Wildcards on numeric values are only supported by core PMUs. */ 1449 if (wildcard && perf_pmus__supports_extended_type()) { 1450 struct evsel *first_wildcard_match = NULL; 1451 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 1452 int ret; 1453 1454 found_supported = true; 1455 if (parse_events__filter_pmu(parse_state, pmu)) 1456 continue; 1457 1458 ret = __parse_events_add_numeric(parse_state, list, pmu, 1459 type, pmu->type, 1460 config, head_config, 1461 first_wildcard_match); 1462 if (ret) 1463 return ret; 1464 if (first_wildcard_match == NULL) 1465 first_wildcard_match = 1466 container_of(list->prev, struct evsel, core.node); 1467 } 1468 if (found_supported) 1469 return 0; 1470 } 1471 return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type), 1472 type, /*extended_type=*/0, config, head_config, 1473 /*first_wildcard_match=*/NULL); 1474 } 1475 1476 static bool config_term_percore(struct list_head *config_terms) 1477 { 1478 struct evsel_config_term *term; 1479 1480 list_for_each_entry(term, config_terms, list) { 1481 if (term->type == EVSEL__CONFIG_TERM_PERCORE) 1482 return term->val.percore; 1483 } 1484 1485 return false; 1486 } 1487 1488 static int parse_events_add_pmu(struct parse_events_state *parse_state, 1489 struct list_head *list, struct perf_pmu *pmu, 1490 const struct parse_events_terms *const_parsed_terms, 1491 struct evsel *first_wildcard_match) 1492 { 1493 u64 alternate_hw_config = PERF_COUNT_HW_MAX; 1494 struct perf_event_attr attr; 1495 struct perf_pmu_info info; 1496 struct evsel *evsel; 1497 struct parse_events_error *err = parse_state->error; 1498 LIST_HEAD(config_terms); 1499 struct parse_events_terms parsed_terms; 1500 bool alias_rewrote_terms = false; 1501 struct perf_cpu_map *term_cpu = NULL; 1502 1503 if (verbose > 1) { 1504 struct strbuf sb; 1505 1506 strbuf_init(&sb, /*hint=*/ 0); 1507 if (pmu->selectable && const_parsed_terms && 1508 list_empty(&const_parsed_terms->terms)) { 1509 strbuf_addf(&sb, "%s//", pmu->name); 1510 } else { 1511 strbuf_addf(&sb, "%s/", pmu->name); 1512 parse_events_terms__to_strbuf(const_parsed_terms, &sb); 1513 strbuf_addch(&sb, '/'); 1514 } 1515 fprintf(stderr, "Attempt to add: %s\n", sb.buf); 1516 strbuf_release(&sb); 1517 } 1518 1519 memset(&attr, 0, sizeof(attr)); 1520 if (pmu->perf_event_attr_init_default) 1521 pmu->perf_event_attr_init_default(pmu, &attr); 1522 1523 attr.type = pmu->type; 1524 1525 if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) { 1526 evsel = __add_event(list, &parse_state->idx, &attr, 1527 /*init_attr=*/true, /*name=*/NULL, 1528 /*metric_id=*/NULL, pmu, 1529 /*config_terms=*/NULL, first_wildcard_match, 1530 /*cpu_list=*/NULL, alternate_hw_config); 1531 return evsel ? 0 : -ENOMEM; 1532 } 1533 1534 parse_events_terms__init(&parsed_terms); 1535 if (const_parsed_terms) { 1536 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1537 1538 if (ret) 1539 return ret; 1540 } 1541 fix_raw(&parsed_terms, pmu); 1542 1543 /* Configure attr/terms with a known PMU, this will set hardcoded terms. */ 1544 if (config_attr(&attr, &parsed_terms, parse_state, config_term_pmu)) { 1545 parse_events_terms__exit(&parsed_terms); 1546 return -EINVAL; 1547 } 1548 1549 /* Look for event names in the terms and rewrite into format based terms. */ 1550 if (perf_pmu__check_alias(pmu, &parsed_terms, 1551 &info, &alias_rewrote_terms, 1552 &alternate_hw_config, err)) { 1553 parse_events_terms__exit(&parsed_terms); 1554 return -EINVAL; 1555 } 1556 1557 if (verbose > 1) { 1558 struct strbuf sb; 1559 1560 strbuf_init(&sb, /*hint=*/ 0); 1561 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1562 fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf); 1563 strbuf_release(&sb); 1564 } 1565 1566 /* Configure attr/terms again if an alias was expanded. */ 1567 if (alias_rewrote_terms && 1568 config_attr(&attr, &parsed_terms, parse_state, config_term_pmu)) { 1569 parse_events_terms__exit(&parsed_terms); 1570 return -EINVAL; 1571 } 1572 1573 if (get_config_terms(&parsed_terms, &config_terms)) { 1574 parse_events_terms__exit(&parsed_terms); 1575 return -ENOMEM; 1576 } 1577 1578 /* Record which bits of attr->config were changed by the user. */ 1579 if (get_config_chgs(pmu, &parsed_terms, &config_terms)) { 1580 parse_events_terms__exit(&parsed_terms); 1581 return -ENOMEM; 1582 } 1583 1584 /* Skip configuring hard coded terms that were applied by config_attr. */ 1585 if (perf_pmu__config(pmu, &attr, &parsed_terms, /*apply_hardcoded=*/false, 1586 parse_state->error)) { 1587 free_config_terms(&config_terms); 1588 parse_events_terms__exit(&parsed_terms); 1589 return -EINVAL; 1590 } 1591 1592 term_cpu = get_config_cpu(&parsed_terms, parse_state->fake_pmu); 1593 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true, 1594 get_config_name(&parsed_terms), 1595 get_config_metric_id(&parsed_terms), pmu, 1596 &config_terms, first_wildcard_match, term_cpu, alternate_hw_config); 1597 perf_cpu_map__put(term_cpu); 1598 if (!evsel) { 1599 parse_events_terms__exit(&parsed_terms); 1600 return -ENOMEM; 1601 } 1602 1603 if (evsel->name) 1604 evsel->use_config_name = true; 1605 1606 evsel->percore = config_term_percore(&evsel->config_terms); 1607 1608 parse_events_terms__exit(&parsed_terms); 1609 free((char *)evsel->unit); 1610 evsel->unit = strdup(info.unit); 1611 evsel->scale = info.scale; 1612 evsel->per_pkg = info.per_pkg; 1613 evsel->snapshot = info.snapshot; 1614 evsel->retirement_latency.mean = info.retirement_latency_mean; 1615 evsel->retirement_latency.min = info.retirement_latency_min; 1616 evsel->retirement_latency.max = info.retirement_latency_max; 1617 1618 return 0; 1619 } 1620 1621 int parse_events_multi_pmu_add(struct parse_events_state *parse_state, 1622 const char *event_name, 1623 const struct parse_events_terms *const_parsed_terms, 1624 struct list_head **listp, void *loc_) 1625 { 1626 struct parse_events_term *term; 1627 struct list_head *list = NULL; 1628 struct perf_pmu *pmu = NULL; 1629 YYLTYPE *loc = loc_; 1630 int ok = 0; 1631 const char *config; 1632 struct parse_events_terms parsed_terms; 1633 struct evsel *first_wildcard_match = NULL; 1634 1635 *listp = NULL; 1636 1637 parse_events_terms__init(&parsed_terms); 1638 if (const_parsed_terms) { 1639 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1640 1641 if (ret) 1642 return ret; 1643 } 1644 1645 config = strdup(event_name); 1646 if (!config) 1647 goto out_err; 1648 1649 if (parse_events_term__num(&term, 1650 PARSE_EVENTS__TERM_TYPE_USER, 1651 config, /*num=*/1, /*novalue=*/true, 1652 loc, /*loc_val=*/NULL) < 0) { 1653 zfree(&config); 1654 goto out_err; 1655 } 1656 list_add_tail(&term->list, &parsed_terms.terms); 1657 1658 /* Add it for all PMUs that support the alias */ 1659 list = malloc(sizeof(struct list_head)); 1660 if (!list) 1661 goto out_err; 1662 1663 INIT_LIST_HEAD(list); 1664 1665 while ((pmu = perf_pmus__scan_for_event(pmu, event_name)) != NULL) { 1666 1667 if (parse_events__filter_pmu(parse_state, pmu)) 1668 continue; 1669 1670 if (!perf_pmu__have_event(pmu, event_name)) 1671 continue; 1672 1673 if (!parse_events_add_pmu(parse_state, list, pmu, 1674 &parsed_terms, first_wildcard_match)) { 1675 struct strbuf sb; 1676 1677 strbuf_init(&sb, /*hint=*/ 0); 1678 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1679 pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf); 1680 strbuf_release(&sb); 1681 ok++; 1682 } 1683 if (first_wildcard_match == NULL) 1684 first_wildcard_match = container_of(list->prev, struct evsel, core.node); 1685 } 1686 1687 if (parse_state->fake_pmu) { 1688 if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms, 1689 first_wildcard_match)) { 1690 struct strbuf sb; 1691 1692 strbuf_init(&sb, /*hint=*/ 0); 1693 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1694 pr_debug("%s -> fake/%s/\n", event_name, sb.buf); 1695 strbuf_release(&sb); 1696 ok++; 1697 } 1698 } 1699 1700 out_err: 1701 parse_events_terms__exit(&parsed_terms); 1702 if (ok) 1703 *listp = list; 1704 else 1705 free(list); 1706 1707 return ok ? 0 : -1; 1708 } 1709 1710 int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state, 1711 const char *event_or_pmu, 1712 const struct parse_events_terms *const_parsed_terms, 1713 struct list_head **listp, 1714 void *loc_) 1715 { 1716 YYLTYPE *loc = loc_; 1717 struct perf_pmu *pmu; 1718 int ok = 0; 1719 char *help; 1720 struct evsel *first_wildcard_match = NULL; 1721 1722 *listp = malloc(sizeof(**listp)); 1723 if (!*listp) 1724 return -ENOMEM; 1725 1726 INIT_LIST_HEAD(*listp); 1727 1728 /* Attempt to add to list assuming event_or_pmu is a PMU name. */ 1729 pmu = perf_pmus__find(event_or_pmu); 1730 if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms, 1731 first_wildcard_match)) 1732 return 0; 1733 1734 if (parse_state->fake_pmu) { 1735 if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(), 1736 const_parsed_terms, 1737 first_wildcard_match)) 1738 return 0; 1739 } 1740 1741 pmu = NULL; 1742 /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */ 1743 while ((pmu = perf_pmus__scan_matching_wildcard(pmu, event_or_pmu)) != NULL) { 1744 1745 if (parse_events__filter_pmu(parse_state, pmu)) 1746 continue; 1747 1748 if (!parse_events_add_pmu(parse_state, *listp, pmu, 1749 const_parsed_terms, 1750 first_wildcard_match)) { 1751 ok++; 1752 parse_state->wild_card_pmus = true; 1753 } 1754 if (first_wildcard_match == NULL) { 1755 first_wildcard_match = 1756 container_of((*listp)->prev, struct evsel, core.node); 1757 } 1758 } 1759 if (ok) 1760 return 0; 1761 1762 /* Failure to add, assume event_or_pmu is an event name. */ 1763 zfree(listp); 1764 if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, 1765 const_parsed_terms, listp, loc)) 1766 return 0; 1767 1768 if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0) 1769 help = NULL; 1770 parse_events_error__handle(parse_state->error, loc->first_column, 1771 strdup("Bad event or PMU"), 1772 help); 1773 zfree(listp); 1774 return -EINVAL; 1775 } 1776 1777 void parse_events__set_leader(char *name, struct list_head *list) 1778 { 1779 struct evsel *leader; 1780 1781 if (list_empty(list)) { 1782 WARN_ONCE(true, "WARNING: failed to set leader: empty list"); 1783 return; 1784 } 1785 1786 leader = list_first_entry(list, struct evsel, core.node); 1787 __perf_evlist__set_leader(list, &leader->core); 1788 zfree(&leader->group_name); 1789 leader->group_name = name; 1790 } 1791 1792 static int parse_events__modifier_list(struct parse_events_state *parse_state, 1793 YYLTYPE *loc, 1794 struct list_head *list, 1795 struct parse_events_modifier mod, 1796 bool group) 1797 { 1798 struct evsel *evsel; 1799 1800 if (!group && mod.weak) { 1801 parse_events_error__handle(parse_state->error, loc->first_column, 1802 strdup("Weak modifier is for use with groups"), NULL); 1803 return -EINVAL; 1804 } 1805 1806 __evlist__for_each_entry(list, evsel) { 1807 /* Translate modifiers into the equivalent evsel excludes. */ 1808 int eu = group ? evsel->core.attr.exclude_user : 0; 1809 int ek = group ? evsel->core.attr.exclude_kernel : 0; 1810 int eh = group ? evsel->core.attr.exclude_hv : 0; 1811 int eH = group ? evsel->core.attr.exclude_host : 0; 1812 int eG = group ? evsel->core.attr.exclude_guest : 0; 1813 int exclude = eu | ek | eh; 1814 int exclude_GH = eG | eH; 1815 1816 if (mod.user) { 1817 if (!exclude) 1818 exclude = eu = ek = eh = 1; 1819 eu = 0; 1820 } 1821 if (mod.kernel) { 1822 if (!exclude) 1823 exclude = eu = ek = eh = 1; 1824 ek = 0; 1825 } 1826 if (mod.hypervisor) { 1827 if (!exclude) 1828 exclude = eu = ek = eh = 1; 1829 eh = 0; 1830 } 1831 if (mod.guest) { 1832 if (!exclude_GH) 1833 exclude_GH = eG = eH = 1; 1834 eG = 0; 1835 } 1836 if (mod.host) { 1837 if (!exclude_GH) 1838 exclude_GH = eG = eH = 1; 1839 eH = 0; 1840 } 1841 if (!exclude_GH && exclude_GH_default) { 1842 if (perf_host) 1843 eG = 1; 1844 else if (perf_guest) 1845 eH = 1; 1846 } 1847 1848 evsel->core.attr.exclude_user = eu; 1849 evsel->core.attr.exclude_kernel = ek; 1850 evsel->core.attr.exclude_hv = eh; 1851 evsel->core.attr.exclude_host = eH; 1852 evsel->core.attr.exclude_guest = eG; 1853 evsel->exclude_GH = exclude_GH; 1854 1855 /* Simple modifiers copied to the evsel. */ 1856 if (mod.precise) { 1857 u8 precise = evsel->core.attr.precise_ip + mod.precise; 1858 /* 1859 * precise ip: 1860 * 1861 * 0 - SAMPLE_IP can have arbitrary skid 1862 * 1 - SAMPLE_IP must have constant skid 1863 * 2 - SAMPLE_IP requested to have 0 skid 1864 * 3 - SAMPLE_IP must have 0 skid 1865 * 1866 * See also PERF_RECORD_MISC_EXACT_IP 1867 */ 1868 if (precise > 3) { 1869 char *help; 1870 1871 if (asprintf(&help, 1872 "Maximum combined precise value is 3, adding precision to \"%s\"", 1873 evsel__name(evsel)) > 0) { 1874 parse_events_error__handle(parse_state->error, 1875 loc->first_column, 1876 help, NULL); 1877 } 1878 return -EINVAL; 1879 } 1880 evsel->core.attr.precise_ip = precise; 1881 } 1882 if (mod.precise_max) 1883 evsel->precise_max = 1; 1884 if (mod.non_idle) 1885 evsel->core.attr.exclude_idle = 1; 1886 if (mod.sample_read) 1887 evsel->sample_read = 1; 1888 if (mod.pinned && evsel__is_group_leader(evsel)) 1889 evsel->core.attr.pinned = 1; 1890 if (mod.exclusive && evsel__is_group_leader(evsel)) 1891 evsel->core.attr.exclusive = 1; 1892 if (mod.weak) 1893 evsel->weak_group = true; 1894 if (mod.bpf) 1895 evsel->bpf_counter = true; 1896 if (mod.retire_lat) 1897 evsel->retire_lat = true; 1898 if (mod.dont_regroup) 1899 evsel->dont_regroup = true; 1900 } 1901 return 0; 1902 } 1903 1904 int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc, 1905 struct list_head *list, 1906 struct parse_events_modifier mod) 1907 { 1908 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true); 1909 } 1910 1911 int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc, 1912 struct list_head *list, 1913 struct parse_events_modifier mod) 1914 { 1915 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false); 1916 } 1917 1918 int parse_events__set_default_name(struct list_head *list, char *name) 1919 { 1920 struct evsel *evsel; 1921 bool used_name = false; 1922 1923 __evlist__for_each_entry(list, evsel) { 1924 if (!evsel->name) { 1925 evsel->name = used_name ? strdup(name) : name; 1926 used_name = true; 1927 if (!evsel->name) 1928 return -ENOMEM; 1929 } 1930 } 1931 if (!used_name) 1932 free(name); 1933 return 0; 1934 } 1935 1936 static int parse_events__scanner(const char *str, 1937 struct parse_events_state *parse_state) 1938 { 1939 YY_BUFFER_STATE buffer; 1940 void *scanner; 1941 int ret; 1942 1943 ret = parse_events_lex_init_extra(parse_state, &scanner); 1944 if (ret) 1945 return ret; 1946 1947 buffer = parse_events__scan_string(str, scanner); 1948 1949 #ifdef PARSER_DEBUG 1950 parse_events_debug = 1; 1951 parse_events_set_debug(1, scanner); 1952 #endif 1953 ret = parse_events_parse(parse_state, scanner); 1954 1955 parse_events__flush_buffer(buffer, scanner); 1956 parse_events__delete_buffer(buffer, scanner); 1957 parse_events_lex_destroy(scanner); 1958 return ret; 1959 } 1960 1961 /* 1962 * parse event config string, return a list of event terms. 1963 */ 1964 int parse_events_terms(struct parse_events_terms *terms, const char *str) 1965 { 1966 struct parse_events_state parse_state = { 1967 .terms = NULL, 1968 .stoken = PE_START_TERMS, 1969 }; 1970 int ret; 1971 1972 ret = parse_events__scanner(str, &parse_state); 1973 if (!ret) 1974 list_splice(&parse_state.terms->terms, &terms->terms); 1975 1976 zfree(&parse_state.terms); 1977 return ret; 1978 } 1979 1980 static int evsel__compute_group_pmu_name(struct evsel *evsel, 1981 const struct list_head *head) 1982 { 1983 struct evsel *leader = evsel__leader(evsel); 1984 struct evsel *pos; 1985 const char *group_pmu_name; 1986 struct perf_pmu *pmu = evsel__find_pmu(evsel); 1987 1988 if (!pmu) { 1989 /* 1990 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU 1991 * is a core PMU, but in heterogeneous systems this is 1992 * unknown. For now pick the first core PMU. 1993 */ 1994 pmu = perf_pmus__scan_core(NULL); 1995 } 1996 if (!pmu) { 1997 pr_debug("No PMU found for '%s'\n", evsel__name(evsel)); 1998 return -EINVAL; 1999 } 2000 group_pmu_name = pmu->name; 2001 /* 2002 * Software events may be in a group with other uncore PMU events. Use 2003 * the pmu_name of the first non-software event to avoid breaking the 2004 * software event out of the group. 2005 * 2006 * Aux event leaders, like intel_pt, expect a group with events from 2007 * other PMUs, so substitute the AUX event's PMU in this case. 2008 */ 2009 if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) { 2010 struct perf_pmu *leader_pmu = evsel__find_pmu(leader); 2011 2012 if (!leader_pmu) { 2013 /* As with determining pmu above. */ 2014 leader_pmu = perf_pmus__scan_core(NULL); 2015 } 2016 /* 2017 * Starting with the leader, find the first event with a named 2018 * non-software PMU. for_each_group_(member|evsel) isn't used as 2019 * the list isn't yet sorted putting evsel's in the same group 2020 * together. 2021 */ 2022 if (leader_pmu && !perf_pmu__is_software(leader_pmu)) { 2023 group_pmu_name = leader_pmu->name; 2024 } else if (leader->core.nr_members > 1) { 2025 list_for_each_entry(pos, head, core.node) { 2026 struct perf_pmu *pos_pmu; 2027 2028 if (pos == leader || evsel__leader(pos) != leader) 2029 continue; 2030 pos_pmu = evsel__find_pmu(pos); 2031 if (!pos_pmu) { 2032 /* As with determining pmu above. */ 2033 pos_pmu = perf_pmus__scan_core(NULL); 2034 } 2035 if (pos_pmu && !perf_pmu__is_software(pos_pmu)) { 2036 group_pmu_name = pos_pmu->name; 2037 break; 2038 } 2039 } 2040 } 2041 } 2042 /* Record computed name. */ 2043 evsel->group_pmu_name = strdup(group_pmu_name); 2044 return evsel->group_pmu_name ? 0 : -ENOMEM; 2045 } 2046 2047 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs) 2048 { 2049 /* Order by insertion index. */ 2050 return lhs->core.idx - rhs->core.idx; 2051 } 2052 2053 static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r) 2054 { 2055 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node); 2056 const struct evsel *lhs = container_of(lhs_core, struct evsel, core); 2057 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node); 2058 const struct evsel *rhs = container_of(rhs_core, struct evsel, core); 2059 int *force_grouped_idx = _fg_idx; 2060 int lhs_sort_idx, rhs_sort_idx, ret; 2061 const char *lhs_pmu_name, *rhs_pmu_name; 2062 2063 /* 2064 * Get the indexes of the 2 events to sort. If the events are 2065 * in groups then the leader's index is used otherwise the 2066 * event's index is used. An index may be forced for events that 2067 * must be in the same group, namely Intel topdown events. 2068 */ 2069 if (lhs->dont_regroup) { 2070 lhs_sort_idx = lhs_core->idx; 2071 } else if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) { 2072 lhs_sort_idx = *force_grouped_idx; 2073 } else { 2074 bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1; 2075 2076 lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx; 2077 } 2078 if (rhs->dont_regroup) { 2079 rhs_sort_idx = rhs_core->idx; 2080 } else if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) { 2081 rhs_sort_idx = *force_grouped_idx; 2082 } else { 2083 bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1; 2084 2085 rhs_sort_idx = rhs_has_group ? rhs_core->leader->idx : rhs_core->idx; 2086 } 2087 2088 /* If the indices differ then respect the insertion order. */ 2089 if (lhs_sort_idx != rhs_sort_idx) 2090 return lhs_sort_idx - rhs_sort_idx; 2091 2092 /* 2093 * Ignoring forcing, lhs_sort_idx == rhs_sort_idx so lhs and rhs should 2094 * be in the same group. Events in the same group need to be ordered by 2095 * their grouping PMU name as the group will be broken to ensure only 2096 * events on the same PMU are programmed together. 2097 * 2098 * With forcing the lhs_sort_idx == rhs_sort_idx shows that one or both 2099 * events are being forced to be at force_group_index. If only one event 2100 * is being forced then the other event is the group leader of the group 2101 * we're trying to force the event into. Ensure for the force grouped 2102 * case that the PMU name ordering is also respected. 2103 */ 2104 lhs_pmu_name = lhs->group_pmu_name; 2105 rhs_pmu_name = rhs->group_pmu_name; 2106 ret = strcmp(lhs_pmu_name, rhs_pmu_name); 2107 if (ret) 2108 return ret; 2109 2110 /* 2111 * Architecture specific sorting, by default sort events in the same 2112 * group with the same PMU by their insertion index. On Intel topdown 2113 * constraints must be adhered to - slots first, etc. 2114 */ 2115 return arch_evlist__cmp(lhs, rhs); 2116 } 2117 2118 int __weak arch_evlist__add_required_events(struct list_head *list __always_unused) 2119 { 2120 return 0; 2121 } 2122 2123 static int parse_events__sort_events_and_fix_groups(struct list_head *list) 2124 { 2125 int idx = 0, force_grouped_idx = -1; 2126 struct evsel *pos, *cur_leader = NULL; 2127 struct perf_evsel *cur_leaders_grp = NULL; 2128 bool idx_changed = false; 2129 int orig_num_leaders = 0, num_leaders = 0; 2130 int ret; 2131 struct evsel *force_grouped_leader = NULL; 2132 bool last_event_was_forced_leader = false; 2133 2134 /* On x86 topdown metrics events require a slots event. */ 2135 ret = arch_evlist__add_required_events(list); 2136 if (ret) 2137 return ret; 2138 2139 /* 2140 * Compute index to insert ungrouped events at. Place them where the 2141 * first ungrouped event appears. 2142 */ 2143 list_for_each_entry(pos, list, core.node) { 2144 const struct evsel *pos_leader = evsel__leader(pos); 2145 2146 ret = evsel__compute_group_pmu_name(pos, list); 2147 if (ret) 2148 return ret; 2149 2150 if (pos == pos_leader) 2151 orig_num_leaders++; 2152 2153 /* 2154 * Ensure indexes are sequential, in particular for multiple 2155 * event lists being merged. The indexes are used to detect when 2156 * the user order is modified. 2157 */ 2158 pos->core.idx = idx++; 2159 2160 /* 2161 * Remember an index to sort all forced grouped events 2162 * together to. Use the group leader as some events 2163 * must appear first within the group. 2164 */ 2165 if (force_grouped_idx == -1 && arch_evsel__must_be_in_group(pos)) 2166 force_grouped_idx = pos_leader->core.idx; 2167 } 2168 2169 /* Sort events. */ 2170 list_sort(&force_grouped_idx, list, evlist__cmp); 2171 2172 /* 2173 * Recompute groups, splitting for PMUs and adding groups for events 2174 * that require them. 2175 */ 2176 idx = 0; 2177 list_for_each_entry(pos, list, core.node) { 2178 struct evsel *pos_leader = evsel__leader(pos); 2179 const char *pos_pmu_name = pos->group_pmu_name; 2180 const char *cur_leader_pmu_name; 2181 bool pos_force_grouped = force_grouped_idx != -1 && !pos->dont_regroup && 2182 arch_evsel__must_be_in_group(pos); 2183 2184 /* Reset index and nr_members. */ 2185 if (pos->core.idx != idx) 2186 idx_changed = true; 2187 pos->core.idx = idx++; 2188 pos->core.nr_members = 0; 2189 2190 /* 2191 * Set the group leader respecting the given groupings and that 2192 * groups can't span PMUs. 2193 */ 2194 if (!cur_leader || pos->dont_regroup) { 2195 cur_leader = pos->dont_regroup ? pos_leader : pos; 2196 cur_leaders_grp = &cur_leader->core; 2197 if (pos_force_grouped) 2198 force_grouped_leader = pos; 2199 } 2200 cur_leader_pmu_name = cur_leader->group_pmu_name; 2201 if (strcmp(cur_leader_pmu_name, pos_pmu_name)) { 2202 /* PMU changed so the group/leader must change. */ 2203 cur_leader = pos; 2204 cur_leaders_grp = pos->core.leader; 2205 if (pos_force_grouped && force_grouped_leader == NULL) 2206 force_grouped_leader = pos; 2207 } else if (cur_leaders_grp != pos->core.leader) { 2208 bool split_even_if_last_leader_was_forced = true; 2209 2210 /* 2211 * Event is for a different group. If the last event was 2212 * the forced group leader then subsequent group events 2213 * and forced events should be in the same group. If 2214 * there are no other forced group events then the 2215 * forced group leader wasn't really being forced into a 2216 * group, it just set arch_evsel__must_be_in_group, and 2217 * we don't want the group to split here. 2218 */ 2219 if (force_grouped_idx != -1 && last_event_was_forced_leader) { 2220 struct evsel *pos2 = pos; 2221 /* 2222 * Search the whole list as the group leaders 2223 * aren't currently valid. 2224 */ 2225 list_for_each_entry_continue(pos2, list, core.node) { 2226 if (pos->core.leader == pos2->core.leader && 2227 arch_evsel__must_be_in_group(pos2)) { 2228 split_even_if_last_leader_was_forced = false; 2229 break; 2230 } 2231 } 2232 } 2233 if (!last_event_was_forced_leader || split_even_if_last_leader_was_forced) { 2234 if (pos_force_grouped) { 2235 if (force_grouped_leader) { 2236 cur_leader = force_grouped_leader; 2237 cur_leaders_grp = force_grouped_leader->core.leader; 2238 } else { 2239 cur_leader = force_grouped_leader = pos; 2240 cur_leaders_grp = &pos->core; 2241 } 2242 } else { 2243 cur_leader = pos; 2244 cur_leaders_grp = pos->core.leader; 2245 } 2246 } 2247 } 2248 if (pos_leader != cur_leader) { 2249 /* The leader changed so update it. */ 2250 evsel__set_leader(pos, cur_leader); 2251 } 2252 last_event_was_forced_leader = (force_grouped_leader == pos); 2253 } 2254 list_for_each_entry(pos, list, core.node) { 2255 struct evsel *pos_leader = evsel__leader(pos); 2256 2257 if (pos == pos_leader) 2258 num_leaders++; 2259 pos_leader->core.nr_members++; 2260 } 2261 return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0; 2262 } 2263 2264 int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter, 2265 struct parse_events_error *err, bool fake_pmu, 2266 bool warn_if_reordered, bool fake_tp) 2267 { 2268 struct parse_events_state parse_state = { 2269 .list = LIST_HEAD_INIT(parse_state.list), 2270 .idx = evlist->core.nr_entries, 2271 .error = err, 2272 .stoken = PE_START_EVENTS, 2273 .fake_pmu = fake_pmu, 2274 .fake_tp = fake_tp, 2275 .pmu_filter = pmu_filter, 2276 .match_legacy_cache_terms = true, 2277 }; 2278 int ret, ret2; 2279 2280 ret = parse_events__scanner(str, &parse_state); 2281 2282 if (!ret && list_empty(&parse_state.list)) { 2283 WARN_ONCE(true, "WARNING: event parser found nothing\n"); 2284 return -1; 2285 } 2286 2287 ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list); 2288 if (ret2 < 0) 2289 return ret; 2290 2291 /* 2292 * Add list to the evlist even with errors to allow callers to clean up. 2293 */ 2294 evlist__splice_list_tail(evlist, &parse_state.list); 2295 2296 if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) { 2297 evlist__uniquify_evsel_names(evlist, &stat_config); 2298 pr_warning("WARNING: events were regrouped to match PMUs\n"); 2299 2300 if (verbose > 0) { 2301 struct strbuf sb = STRBUF_INIT; 2302 2303 evlist__format_evsels(evlist, &sb, 2048); 2304 pr_debug("evlist after sorting/fixing: '%s'\n", sb.buf); 2305 strbuf_release(&sb); 2306 } 2307 } 2308 if (!ret) { 2309 struct evsel *last; 2310 2311 last = evlist__last(evlist); 2312 last->cmdline_group_boundary = true; 2313 2314 return 0; 2315 } 2316 2317 /* 2318 * There are 2 users - builtin-record and builtin-test objects. 2319 * Both call evlist__delete in case of error, so we dont 2320 * need to bother. 2321 */ 2322 return ret; 2323 } 2324 2325 int parse_event(struct evlist *evlist, const char *str) 2326 { 2327 struct parse_events_error err; 2328 int ret; 2329 2330 parse_events_error__init(&err); 2331 ret = parse_events(evlist, str, &err); 2332 if (ret && verbose > 0) 2333 parse_events_error__print(&err, str); 2334 parse_events_error__exit(&err); 2335 return ret; 2336 } 2337 2338 struct parse_events_error_entry { 2339 /** @list: The list the error is part of. */ 2340 struct list_head list; 2341 /** @idx: index in the parsed string */ 2342 int idx; 2343 /** @str: string to display at the index */ 2344 char *str; 2345 /** @help: optional help string */ 2346 char *help; 2347 }; 2348 2349 void parse_events_error__init(struct parse_events_error *err) 2350 { 2351 INIT_LIST_HEAD(&err->list); 2352 } 2353 2354 void parse_events_error__exit(struct parse_events_error *err) 2355 { 2356 struct parse_events_error_entry *pos, *tmp; 2357 2358 list_for_each_entry_safe(pos, tmp, &err->list, list) { 2359 zfree(&pos->str); 2360 zfree(&pos->help); 2361 list_del_init(&pos->list); 2362 free(pos); 2363 } 2364 } 2365 2366 void parse_events_error__handle(struct parse_events_error *err, int idx, 2367 char *str, char *help) 2368 { 2369 struct parse_events_error_entry *entry; 2370 2371 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) 2372 goto out_free; 2373 2374 entry = zalloc(sizeof(*entry)); 2375 if (!entry) { 2376 pr_err("Failed to allocate memory for event parsing error: %s (%s)\n", 2377 str, help ?: "<no help>"); 2378 goto out_free; 2379 } 2380 entry->idx = idx; 2381 entry->str = str; 2382 entry->help = help; 2383 list_add(&entry->list, &err->list); 2384 return; 2385 out_free: 2386 free(str); 2387 free(help); 2388 } 2389 2390 #define MAX_WIDTH 1000 2391 static int get_term_width(void) 2392 { 2393 struct winsize ws; 2394 2395 get_term_dimensions(&ws); 2396 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; 2397 } 2398 2399 static void __parse_events_error__print(int err_idx, const char *err_str, 2400 const char *err_help, const char *event) 2401 { 2402 const char *str = "invalid or unsupported event: "; 2403 char _buf[MAX_WIDTH]; 2404 char *buf = (char *) event; 2405 int idx = 0; 2406 if (err_str) { 2407 /* -2 for extra '' in the final fprintf */ 2408 int width = get_term_width() - 2; 2409 int len_event = strlen(event); 2410 int len_str, max_len, cut = 0; 2411 2412 /* 2413 * Maximum error index indent, we will cut 2414 * the event string if it's bigger. 2415 */ 2416 int max_err_idx = 13; 2417 2418 /* 2419 * Let's be specific with the message when 2420 * we have the precise error. 2421 */ 2422 str = "event syntax error: "; 2423 len_str = strlen(str); 2424 max_len = width - len_str; 2425 2426 buf = _buf; 2427 2428 /* We're cutting from the beginning. */ 2429 if (err_idx > max_err_idx) 2430 cut = err_idx - max_err_idx; 2431 2432 strncpy(buf, event + cut, max_len); 2433 2434 /* Mark cut parts with '..' on both sides. */ 2435 if (cut) 2436 buf[0] = buf[1] = '.'; 2437 2438 if ((len_event - cut) > max_len) { 2439 buf[max_len - 1] = buf[max_len - 2] = '.'; 2440 buf[max_len] = 0; 2441 } 2442 2443 idx = len_str + err_idx - cut; 2444 } 2445 2446 fprintf(stderr, "%s'%s'\n", str, buf); 2447 if (idx) { 2448 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str); 2449 if (err_help) 2450 fprintf(stderr, "\n%s\n", err_help); 2451 } 2452 } 2453 2454 void parse_events_error__print(const struct parse_events_error *err, 2455 const char *event) 2456 { 2457 struct parse_events_error_entry *pos; 2458 bool first = true; 2459 2460 list_for_each_entry(pos, &err->list, list) { 2461 if (!first) 2462 fputs("\n", stderr); 2463 __parse_events_error__print(pos->idx, pos->str, pos->help, event); 2464 first = false; 2465 } 2466 } 2467 2468 /* 2469 * In the list of errors err, do any of the error strings (str) contain the 2470 * given needle string? 2471 */ 2472 bool parse_events_error__contains(const struct parse_events_error *err, 2473 const char *needle) 2474 { 2475 struct parse_events_error_entry *pos; 2476 2477 list_for_each_entry(pos, &err->list, list) { 2478 if (strstr(pos->str, needle) != NULL) 2479 return true; 2480 } 2481 return false; 2482 } 2483 2484 #undef MAX_WIDTH 2485 2486 int parse_events_option(const struct option *opt, const char *str, 2487 int unset __maybe_unused) 2488 { 2489 struct parse_events_option_args *args = opt->value; 2490 struct parse_events_error err; 2491 int ret; 2492 2493 parse_events_error__init(&err); 2494 ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err, 2495 /*fake_pmu=*/false, /*warn_if_reordered=*/true, 2496 /*fake_tp=*/false); 2497 2498 if (ret) { 2499 parse_events_error__print(&err, str); 2500 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 2501 } 2502 parse_events_error__exit(&err); 2503 2504 return ret; 2505 } 2506 2507 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset) 2508 { 2509 struct parse_events_option_args *args = opt->value; 2510 int ret; 2511 2512 if (*args->evlistp == NULL) { 2513 *args->evlistp = evlist__new(); 2514 2515 if (*args->evlistp == NULL) { 2516 fprintf(stderr, "Not enough memory to create evlist\n"); 2517 return -1; 2518 } 2519 } 2520 ret = parse_events_option(opt, str, unset); 2521 if (ret) { 2522 evlist__delete(*args->evlistp); 2523 *args->evlistp = NULL; 2524 } 2525 2526 return ret; 2527 } 2528 2529 static int 2530 foreach_evsel_in_last_glob(struct evlist *evlist, 2531 int (*func)(struct evsel *evsel, 2532 const void *arg), 2533 const void *arg) 2534 { 2535 struct evsel *last = NULL; 2536 int err; 2537 2538 /* 2539 * Don't return when list_empty, give func a chance to report 2540 * error when it found last == NULL. 2541 * 2542 * So no need to WARN here, let *func do this. 2543 */ 2544 if (evlist->core.nr_entries > 0) 2545 last = evlist__last(evlist); 2546 2547 do { 2548 err = (*func)(last, arg); 2549 if (err) 2550 return -1; 2551 if (!last) 2552 return 0; 2553 2554 if (last->core.node.prev == &evlist->core.entries) 2555 return 0; 2556 last = list_entry(last->core.node.prev, struct evsel, core.node); 2557 } while (!last->cmdline_group_boundary); 2558 2559 return 0; 2560 } 2561 2562 /* Will a tracepoint filter work for str or should a BPF filter be used? */ 2563 static bool is_possible_tp_filter(const char *str) 2564 { 2565 return strstr(str, "uid") == NULL; 2566 } 2567 2568 static int set_filter(struct evsel *evsel, const void *arg) 2569 { 2570 const char *str = arg; 2571 int nr_addr_filters = 0; 2572 struct perf_pmu *pmu; 2573 2574 if (evsel == NULL) { 2575 fprintf(stderr, 2576 "--filter option should follow a -e tracepoint or HW tracer option\n"); 2577 return -1; 2578 } 2579 2580 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && is_possible_tp_filter(str)) { 2581 if (evsel__append_tp_filter(evsel, str) < 0) { 2582 fprintf(stderr, 2583 "not enough memory to hold filter string\n"); 2584 return -1; 2585 } 2586 2587 return 0; 2588 } 2589 2590 pmu = evsel__find_pmu(evsel); 2591 if (pmu) { 2592 perf_pmu__scan_file(pmu, "nr_addr_filters", 2593 "%d", &nr_addr_filters); 2594 } 2595 if (!nr_addr_filters) 2596 return perf_bpf_filter__parse(&evsel->bpf_filters, str); 2597 2598 if (evsel__append_addr_filter(evsel, str) < 0) { 2599 fprintf(stderr, 2600 "not enough memory to hold filter string\n"); 2601 return -1; 2602 } 2603 2604 return 0; 2605 } 2606 2607 int parse_filter(const struct option *opt, const char *str, 2608 int unset __maybe_unused) 2609 { 2610 struct evlist *evlist = *(struct evlist **)opt->value; 2611 2612 return foreach_evsel_in_last_glob(evlist, set_filter, 2613 (const void *)str); 2614 } 2615 2616 int parse_uid_filter(struct evlist *evlist, uid_t uid) 2617 { 2618 struct option opt = { 2619 .value = &evlist, 2620 }; 2621 char buf[128]; 2622 int ret; 2623 2624 snprintf(buf, sizeof(buf), "uid == %d", uid); 2625 ret = parse_filter(&opt, buf, /*unset=*/0); 2626 if (ret) { 2627 if (use_browser >= 1) { 2628 /* 2629 * Use ui__warning so a pop up appears above the 2630 * underlying BPF error message. 2631 */ 2632 ui__warning("Failed to add UID filtering that uses BPF filtering.\n"); 2633 } else { 2634 fprintf(stderr, "Failed to add UID filtering that uses BPF filtering.\n"); 2635 } 2636 } 2637 return ret; 2638 } 2639 2640 static int add_exclude_perf_filter(struct evsel *evsel, 2641 const void *arg __maybe_unused) 2642 { 2643 char new_filter[64]; 2644 2645 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2646 fprintf(stderr, 2647 "--exclude-perf option should follow a -e tracepoint option\n"); 2648 return -1; 2649 } 2650 2651 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid()); 2652 2653 if (evsel__append_tp_filter(evsel, new_filter) < 0) { 2654 fprintf(stderr, 2655 "not enough memory to hold filter string\n"); 2656 return -1; 2657 } 2658 2659 return 0; 2660 } 2661 2662 int exclude_perf(const struct option *opt, 2663 const char *arg __maybe_unused, 2664 int unset __maybe_unused) 2665 { 2666 struct evlist *evlist = *(struct evlist **)opt->value; 2667 2668 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter, 2669 NULL); 2670 } 2671 2672 int parse_events__is_hardcoded_term(struct parse_events_term *term) 2673 { 2674 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 2675 } 2676 2677 static int new_term(struct parse_events_term **_term, 2678 struct parse_events_term *temp, 2679 char *str, u64 num) 2680 { 2681 struct parse_events_term *term; 2682 2683 term = malloc(sizeof(*term)); 2684 if (!term) 2685 return -ENOMEM; 2686 2687 *term = *temp; 2688 INIT_LIST_HEAD(&term->list); 2689 term->weak = false; 2690 2691 switch (term->type_val) { 2692 case PARSE_EVENTS__TERM_TYPE_NUM: 2693 term->val.num = num; 2694 break; 2695 case PARSE_EVENTS__TERM_TYPE_STR: 2696 term->val.str = str; 2697 break; 2698 default: 2699 free(term); 2700 return -EINVAL; 2701 } 2702 2703 *_term = term; 2704 return 0; 2705 } 2706 2707 int parse_events_term__num(struct parse_events_term **term, 2708 enum parse_events__term_type type_term, 2709 const char *config, u64 num, 2710 bool no_value, 2711 void *loc_term_, void *loc_val_) 2712 { 2713 YYLTYPE *loc_term = loc_term_; 2714 YYLTYPE *loc_val = loc_val_; 2715 2716 struct parse_events_term temp = { 2717 .type_val = PARSE_EVENTS__TERM_TYPE_NUM, 2718 .type_term = type_term, 2719 .config = config ? : strdup(parse_events__term_type_str(type_term)), 2720 .no_value = no_value, 2721 .err_term = loc_term ? loc_term->first_column : 0, 2722 .err_val = loc_val ? loc_val->first_column : 0, 2723 }; 2724 2725 return new_term(term, &temp, /*str=*/NULL, num); 2726 } 2727 2728 int parse_events_term__str(struct parse_events_term **term, 2729 enum parse_events__term_type type_term, 2730 char *config, char *str, 2731 void *loc_term_, void *loc_val_) 2732 { 2733 YYLTYPE *loc_term = loc_term_; 2734 YYLTYPE *loc_val = loc_val_; 2735 2736 struct parse_events_term temp = { 2737 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2738 .type_term = type_term, 2739 .config = config, 2740 .err_term = loc_term ? loc_term->first_column : 0, 2741 .err_val = loc_val ? loc_val->first_column : 0, 2742 }; 2743 2744 return new_term(term, &temp, str, /*num=*/0); 2745 } 2746 2747 int parse_events_term__term(struct parse_events_term **term, 2748 enum parse_events__term_type term_lhs, 2749 enum parse_events__term_type term_rhs, 2750 void *loc_term, void *loc_val) 2751 { 2752 return parse_events_term__str(term, term_lhs, NULL, 2753 strdup(parse_events__term_type_str(term_rhs)), 2754 loc_term, loc_val); 2755 } 2756 2757 int parse_events_term__clone(struct parse_events_term **new, 2758 const struct parse_events_term *term) 2759 { 2760 char *str; 2761 struct parse_events_term temp = *term; 2762 2763 temp.used = false; 2764 if (term->config) { 2765 temp.config = strdup(term->config); 2766 if (!temp.config) 2767 return -ENOMEM; 2768 } 2769 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2770 return new_term(new, &temp, /*str=*/NULL, term->val.num); 2771 2772 str = strdup(term->val.str); 2773 if (!str) { 2774 zfree(&temp.config); 2775 return -ENOMEM; 2776 } 2777 return new_term(new, &temp, str, /*num=*/0); 2778 } 2779 2780 void parse_events_term__delete(struct parse_events_term *term) 2781 { 2782 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) 2783 zfree(&term->val.str); 2784 2785 zfree(&term->config); 2786 free(term); 2787 } 2788 2789 static int parse_events_terms__copy(const struct parse_events_terms *src, 2790 struct parse_events_terms *dest) 2791 { 2792 struct parse_events_term *term; 2793 2794 list_for_each_entry (term, &src->terms, list) { 2795 struct parse_events_term *n; 2796 int ret; 2797 2798 ret = parse_events_term__clone(&n, term); 2799 if (ret) 2800 return ret; 2801 2802 list_add_tail(&n->list, &dest->terms); 2803 } 2804 return 0; 2805 } 2806 2807 void parse_events_terms__init(struct parse_events_terms *terms) 2808 { 2809 INIT_LIST_HEAD(&terms->terms); 2810 } 2811 2812 void parse_events_terms__exit(struct parse_events_terms *terms) 2813 { 2814 struct parse_events_term *term, *h; 2815 2816 list_for_each_entry_safe(term, h, &terms->terms, list) { 2817 list_del_init(&term->list); 2818 parse_events_term__delete(term); 2819 } 2820 } 2821 2822 void parse_events_terms__delete(struct parse_events_terms *terms) 2823 { 2824 if (!terms) 2825 return; 2826 parse_events_terms__exit(terms); 2827 free(terms); 2828 } 2829 2830 static int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb) 2831 { 2832 struct parse_events_term *term; 2833 bool first = true; 2834 2835 if (!terms) 2836 return 0; 2837 2838 list_for_each_entry(term, &terms->terms, list) { 2839 int ret; 2840 2841 if (!first) { 2842 ret = strbuf_addch(sb, ','); 2843 if (ret < 0) 2844 return ret; 2845 } 2846 first = false; 2847 2848 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2849 if (term->no_value) { 2850 assert(term->val.num == 1); 2851 ret = strbuf_addf(sb, "%s", term->config); 2852 } else 2853 ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num); 2854 else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) { 2855 if (term->config) { 2856 ret = strbuf_addf(sb, "%s=", term->config); 2857 if (ret < 0) 2858 return ret; 2859 } else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) { 2860 ret = strbuf_addf(sb, "%s=", 2861 parse_events__term_type_str(term->type_term)); 2862 if (ret < 0) 2863 return ret; 2864 } 2865 assert(!term->no_value); 2866 ret = strbuf_addf(sb, "%s", term->val.str); 2867 } 2868 if (ret < 0) 2869 return ret; 2870 } 2871 return 0; 2872 } 2873 2874 static void config_terms_list(char *buf, size_t buf_sz) 2875 { 2876 int i; 2877 bool first = true; 2878 2879 buf[0] = '\0'; 2880 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) { 2881 const char *name = parse_events__term_type_str(i); 2882 2883 if (!config_term_avail(i, NULL)) 2884 continue; 2885 if (!name) 2886 continue; 2887 if (name[0] == '<') 2888 continue; 2889 2890 if (strlen(buf) + strlen(name) + 2 >= buf_sz) 2891 return; 2892 2893 if (!first) 2894 strcat(buf, ","); 2895 else 2896 first = false; 2897 strcat(buf, name); 2898 } 2899 } 2900 2901 /* 2902 * Return string contains valid config terms of an event. 2903 * @additional_terms: For terms such as PMU sysfs terms. 2904 */ 2905 char *parse_events_formats_error_string(char *additional_terms) 2906 { 2907 char *str; 2908 /* "no-overwrite" is the longest name */ 2909 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR * 2910 (sizeof("no-overwrite") - 1)]; 2911 2912 config_terms_list(static_terms, sizeof(static_terms)); 2913 /* valid terms */ 2914 if (additional_terms) { 2915 if (asprintf(&str, "valid terms: %s,%s", 2916 additional_terms, static_terms) < 0) 2917 goto fail; 2918 } else { 2919 if (asprintf(&str, "valid terms: %s", static_terms) < 0) 2920 goto fail; 2921 } 2922 return str; 2923 2924 fail: 2925 return NULL; 2926 } 2927