1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/hw_breakpoint.h> 3 #include <linux/err.h> 4 #include <linux/list_sort.h> 5 #include <linux/zalloc.h> 6 #include <dirent.h> 7 #include <errno.h> 8 #include <sys/ioctl.h> 9 #include <sys/param.h> 10 #include "cpumap.h" 11 #include "term.h" 12 #include "env.h" 13 #include "evlist.h" 14 #include "evsel.h" 15 #include <subcmd/parse-options.h> 16 #include "parse-events.h" 17 #include "string2.h" 18 #include "strbuf.h" 19 #include "debug.h" 20 #include <api/fs/tracing_path.h> 21 #include <api/io_dir.h> 22 #include <perf/cpumap.h> 23 #include <util/parse-events-bison.h> 24 #include <util/parse-events-flex.h> 25 #include "pmu.h" 26 #include "pmus.h" 27 #include "asm/bug.h" 28 #include "ui/ui.h" 29 #include "util/parse-branch-options.h" 30 #include "util/evsel_config.h" 31 #include "util/event.h" 32 #include "util/bpf-filter.h" 33 #include "util/stat.h" 34 #include "util/util.h" 35 #include "tracepoint.h" 36 37 #define MAX_NAME_LEN 100 38 39 static int get_config_terms(const struct parse_events_terms *head_config, 40 struct list_head *head_terms); 41 static int parse_events_terms__copy(const struct parse_events_terms *src, 42 struct parse_events_terms *dest); 43 44 const struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { 45 [PERF_COUNT_HW_CPU_CYCLES] = { 46 .symbol = "cpu-cycles", 47 .alias = "cycles", 48 }, 49 [PERF_COUNT_HW_INSTRUCTIONS] = { 50 .symbol = "instructions", 51 .alias = "", 52 }, 53 [PERF_COUNT_HW_CACHE_REFERENCES] = { 54 .symbol = "cache-references", 55 .alias = "", 56 }, 57 [PERF_COUNT_HW_CACHE_MISSES] = { 58 .symbol = "cache-misses", 59 .alias = "", 60 }, 61 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 62 .symbol = "branch-instructions", 63 .alias = "branches", 64 }, 65 [PERF_COUNT_HW_BRANCH_MISSES] = { 66 .symbol = "branch-misses", 67 .alias = "", 68 }, 69 [PERF_COUNT_HW_BUS_CYCLES] = { 70 .symbol = "bus-cycles", 71 .alias = "", 72 }, 73 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = { 74 .symbol = "stalled-cycles-frontend", 75 .alias = "idle-cycles-frontend", 76 }, 77 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = { 78 .symbol = "stalled-cycles-backend", 79 .alias = "idle-cycles-backend", 80 }, 81 [PERF_COUNT_HW_REF_CPU_CYCLES] = { 82 .symbol = "ref-cycles", 83 .alias = "", 84 }, 85 }; 86 87 const struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { 88 [PERF_COUNT_SW_CPU_CLOCK] = { 89 .symbol = "cpu-clock", 90 .alias = "", 91 }, 92 [PERF_COUNT_SW_TASK_CLOCK] = { 93 .symbol = "task-clock", 94 .alias = "", 95 }, 96 [PERF_COUNT_SW_PAGE_FAULTS] = { 97 .symbol = "page-faults", 98 .alias = "faults", 99 }, 100 [PERF_COUNT_SW_CONTEXT_SWITCHES] = { 101 .symbol = "context-switches", 102 .alias = "cs", 103 }, 104 [PERF_COUNT_SW_CPU_MIGRATIONS] = { 105 .symbol = "cpu-migrations", 106 .alias = "migrations", 107 }, 108 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = { 109 .symbol = "minor-faults", 110 .alias = "", 111 }, 112 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = { 113 .symbol = "major-faults", 114 .alias = "", 115 }, 116 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = { 117 .symbol = "alignment-faults", 118 .alias = "", 119 }, 120 [PERF_COUNT_SW_EMULATION_FAULTS] = { 121 .symbol = "emulation-faults", 122 .alias = "", 123 }, 124 [PERF_COUNT_SW_DUMMY] = { 125 .symbol = "dummy", 126 .alias = "", 127 }, 128 [PERF_COUNT_SW_BPF_OUTPUT] = { 129 .symbol = "bpf-output", 130 .alias = "", 131 }, 132 [PERF_COUNT_SW_CGROUP_SWITCHES] = { 133 .symbol = "cgroup-switches", 134 .alias = "", 135 }, 136 }; 137 138 const char *event_type(int type) 139 { 140 switch (type) { 141 case PERF_TYPE_HARDWARE: 142 return "hardware"; 143 144 case PERF_TYPE_SOFTWARE: 145 return "software"; 146 147 case PERF_TYPE_TRACEPOINT: 148 return "tracepoint"; 149 150 case PERF_TYPE_HW_CACHE: 151 return "hardware-cache"; 152 153 default: 154 break; 155 } 156 157 return "unknown"; 158 } 159 160 static char *get_config_str(const struct parse_events_terms *head_terms, 161 enum parse_events__term_type type_term) 162 { 163 struct parse_events_term *term; 164 165 if (!head_terms) 166 return NULL; 167 168 list_for_each_entry(term, &head_terms->terms, list) 169 if (term->type_term == type_term) 170 return term->val.str; 171 172 return NULL; 173 } 174 175 static char *get_config_metric_id(const struct parse_events_terms *head_terms) 176 { 177 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID); 178 } 179 180 static char *get_config_name(const struct parse_events_terms *head_terms) 181 { 182 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME); 183 } 184 185 static struct perf_cpu_map *get_config_cpu(const struct parse_events_terms *head_terms) 186 { 187 struct parse_events_term *term; 188 struct perf_cpu_map *cpus = NULL; 189 190 if (!head_terms) 191 return NULL; 192 193 list_for_each_entry(term, &head_terms->terms, list) { 194 if (term->type_term == PARSE_EVENTS__TERM_TYPE_CPU) { 195 struct perf_cpu_map *cpu = perf_cpu_map__new_int(term->val.num); 196 197 perf_cpu_map__merge(&cpus, cpu); 198 perf_cpu_map__put(cpu); 199 } 200 } 201 202 return cpus; 203 } 204 205 /** 206 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that 207 * matches the raw's string value. If the string value matches an 208 * event then change the term to be an event, if not then change it to 209 * be a config term. For example, "read" may be an event of the PMU or 210 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of 211 * the event can be determined and we don't need to scan all PMUs 212 * ahead-of-time. 213 * @config_terms: the list of terms that may contain a raw term. 214 * @pmu: the PMU to scan for events from. 215 */ 216 static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu) 217 { 218 struct parse_events_term *term; 219 220 list_for_each_entry(term, &config_terms->terms, list) { 221 u64 num; 222 223 if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW) 224 continue; 225 226 if (perf_pmu__have_event(pmu, term->val.str)) { 227 zfree(&term->config); 228 term->config = term->val.str; 229 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 230 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 231 term->val.num = 1; 232 term->no_value = true; 233 continue; 234 } 235 236 zfree(&term->config); 237 term->config = strdup("config"); 238 errno = 0; 239 num = strtoull(term->val.str + 1, NULL, 16); 240 assert(errno == 0); 241 free(term->val.str); 242 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 243 term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG; 244 term->val.num = num; 245 term->no_value = false; 246 } 247 } 248 249 static struct evsel * 250 __add_event(struct list_head *list, int *idx, 251 struct perf_event_attr *attr, 252 bool init_attr, 253 const char *name, const char *metric_id, struct perf_pmu *pmu, 254 struct list_head *config_terms, struct evsel *first_wildcard_match, 255 struct perf_cpu_map *cpu_list, u64 alternate_hw_config) 256 { 257 struct evsel *evsel; 258 bool is_pmu_core; 259 struct perf_cpu_map *cpus; 260 261 /* 262 * Ensure the first_wildcard_match's PMU matches that of the new event 263 * being added. Otherwise try to match with another event further down 264 * the evlist. 265 */ 266 if (first_wildcard_match) { 267 struct evsel *pos = list_prev_entry(first_wildcard_match, core.node); 268 269 first_wildcard_match = NULL; 270 list_for_each_entry_continue(pos, list, core.node) { 271 if (perf_pmu__name_no_suffix_match(pos->pmu, pmu->name)) { 272 first_wildcard_match = pos; 273 break; 274 } 275 if (pos->pmu->is_core && (!pmu || pmu->is_core)) { 276 first_wildcard_match = pos; 277 break; 278 } 279 } 280 } 281 282 if (pmu) { 283 is_pmu_core = pmu->is_core; 284 cpus = perf_cpu_map__get(perf_cpu_map__is_empty(cpu_list) ? pmu->cpus : cpu_list); 285 perf_pmu__warn_invalid_formats(pmu); 286 if (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX) { 287 perf_pmu__warn_invalid_config(pmu, attr->config, name, 288 PERF_PMU_FORMAT_VALUE_CONFIG, "config"); 289 perf_pmu__warn_invalid_config(pmu, attr->config1, name, 290 PERF_PMU_FORMAT_VALUE_CONFIG1, "config1"); 291 perf_pmu__warn_invalid_config(pmu, attr->config2, name, 292 PERF_PMU_FORMAT_VALUE_CONFIG2, "config2"); 293 perf_pmu__warn_invalid_config(pmu, attr->config3, name, 294 PERF_PMU_FORMAT_VALUE_CONFIG3, "config3"); 295 } 296 } else { 297 is_pmu_core = (attr->type == PERF_TYPE_HARDWARE || 298 attr->type == PERF_TYPE_HW_CACHE); 299 if (perf_cpu_map__is_empty(cpu_list)) 300 cpus = is_pmu_core ? perf_cpu_map__new_online_cpus() : NULL; 301 else 302 cpus = perf_cpu_map__get(cpu_list); 303 } 304 if (init_attr) 305 event_attr_init(attr); 306 307 evsel = evsel__new_idx(attr, *idx); 308 if (!evsel) { 309 perf_cpu_map__put(cpus); 310 return NULL; 311 } 312 313 (*idx)++; 314 evsel->core.cpus = cpus; 315 evsel->core.own_cpus = perf_cpu_map__get(cpus); 316 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false; 317 evsel->core.is_pmu_core = is_pmu_core; 318 evsel->pmu = pmu; 319 evsel->alternate_hw_config = alternate_hw_config; 320 evsel->first_wildcard_match = first_wildcard_match; 321 322 if (name) 323 evsel->name = strdup(name); 324 325 if (metric_id) 326 evsel->metric_id = strdup(metric_id); 327 328 if (config_terms) 329 list_splice_init(config_terms, &evsel->config_terms); 330 331 if (list) 332 list_add_tail(&evsel->core.node, list); 333 334 return evsel; 335 } 336 337 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr, 338 const char *name, const char *metric_id, 339 struct perf_pmu *pmu) 340 { 341 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name, 342 metric_id, pmu, /*config_terms=*/NULL, 343 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL, 344 /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 345 } 346 347 static int add_event(struct list_head *list, int *idx, 348 struct perf_event_attr *attr, const char *name, 349 const char *metric_id, struct list_head *config_terms, 350 u64 alternate_hw_config) 351 { 352 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id, 353 /*pmu=*/NULL, config_terms, 354 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL, 355 alternate_hw_config) ? 0 : -ENOMEM; 356 } 357 358 /** 359 * parse_aliases - search names for entries beginning or equalling str ignoring 360 * case. If mutliple entries in names match str then the longest 361 * is chosen. 362 * @str: The needle to look for. 363 * @names: The haystack to search. 364 * @size: The size of the haystack. 365 * @longest: Out argument giving the length of the matching entry. 366 */ 367 static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size, 368 int *longest) 369 { 370 *longest = -1; 371 for (int i = 0; i < size; i++) { 372 for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) { 373 int n = strlen(names[i][j]); 374 375 if (n > *longest && !strncasecmp(str, names[i][j], n)) 376 *longest = n; 377 } 378 if (*longest > 0) 379 return i; 380 } 381 382 return -1; 383 } 384 385 typedef int config_term_func_t(struct perf_event_attr *attr, 386 struct parse_events_term *term, 387 struct parse_events_error *err); 388 static int config_term_common(struct perf_event_attr *attr, 389 struct parse_events_term *term, 390 struct parse_events_error *err); 391 static int config_attr(struct perf_event_attr *attr, 392 const struct parse_events_terms *head, 393 struct parse_events_error *err, 394 config_term_func_t config_term); 395 396 /** 397 * parse_events__decode_legacy_cache - Search name for the legacy cache event 398 * name composed of 1, 2 or 3 hyphen 399 * separated sections. The first section is 400 * the cache type while the others are the 401 * optional op and optional result. To make 402 * life hard the names in the table also 403 * contain hyphens and the longest name 404 * should always be selected. 405 */ 406 int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config) 407 { 408 int len, cache_type = -1, cache_op = -1, cache_result = -1; 409 const char *name_end = &name[strlen(name) + 1]; 410 const char *str = name; 411 412 cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len); 413 if (cache_type == -1) 414 return -EINVAL; 415 str += len + 1; 416 417 if (str < name_end) { 418 cache_op = parse_aliases(str, evsel__hw_cache_op, 419 PERF_COUNT_HW_CACHE_OP_MAX, &len); 420 if (cache_op >= 0) { 421 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 422 return -EINVAL; 423 str += len + 1; 424 } else { 425 cache_result = parse_aliases(str, evsel__hw_cache_result, 426 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 427 if (cache_result >= 0) 428 str += len + 1; 429 } 430 } 431 if (str < name_end) { 432 if (cache_op < 0) { 433 cache_op = parse_aliases(str, evsel__hw_cache_op, 434 PERF_COUNT_HW_CACHE_OP_MAX, &len); 435 if (cache_op >= 0) { 436 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 437 return -EINVAL; 438 } 439 } else if (cache_result < 0) { 440 cache_result = parse_aliases(str, evsel__hw_cache_result, 441 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 442 } 443 } 444 445 /* 446 * Fall back to reads: 447 */ 448 if (cache_op == -1) 449 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 450 451 /* 452 * Fall back to accesses: 453 */ 454 if (cache_result == -1) 455 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 456 457 *config = cache_type | (cache_op << 8) | (cache_result << 16); 458 if (perf_pmus__supports_extended_type()) 459 *config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT; 460 return 0; 461 } 462 463 /** 464 * parse_events__filter_pmu - returns false if a wildcard PMU should be 465 * considered, true if it should be filtered. 466 */ 467 bool parse_events__filter_pmu(const struct parse_events_state *parse_state, 468 const struct perf_pmu *pmu) 469 { 470 if (parse_state->pmu_filter == NULL) 471 return false; 472 473 return strcmp(parse_state->pmu_filter, pmu->name) != 0; 474 } 475 476 static int parse_events_add_pmu(struct parse_events_state *parse_state, 477 struct list_head *list, struct perf_pmu *pmu, 478 const struct parse_events_terms *const_parsed_terms, 479 struct evsel *first_wildcard_match, u64 alternate_hw_config); 480 481 int parse_events_add_cache(struct list_head *list, int *idx, const char *name, 482 struct parse_events_state *parse_state, 483 struct parse_events_terms *parsed_terms) 484 { 485 struct perf_pmu *pmu = NULL; 486 bool found_supported = false; 487 const char *config_name = get_config_name(parsed_terms); 488 const char *metric_id = get_config_metric_id(parsed_terms); 489 struct perf_cpu_map *cpus = get_config_cpu(parsed_terms); 490 int ret = 0; 491 struct evsel *first_wildcard_match = NULL; 492 493 while ((pmu = perf_pmus__scan_for_event(pmu, name)) != NULL) { 494 LIST_HEAD(config_terms); 495 struct perf_event_attr attr; 496 497 if (parse_events__filter_pmu(parse_state, pmu)) 498 continue; 499 500 if (perf_pmu__have_event(pmu, name)) { 501 /* 502 * The PMU has the event so add as not a legacy cache 503 * event. 504 */ 505 ret = parse_events_add_pmu(parse_state, list, pmu, 506 parsed_terms, 507 first_wildcard_match, 508 /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 509 if (ret) 510 goto out_err; 511 if (first_wildcard_match == NULL) 512 first_wildcard_match = 513 container_of(list->prev, struct evsel, core.node); 514 continue; 515 } 516 517 if (!pmu->is_core) { 518 /* Legacy cache events are only supported by core PMUs. */ 519 continue; 520 } 521 522 memset(&attr, 0, sizeof(attr)); 523 attr.type = PERF_TYPE_HW_CACHE; 524 525 ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config); 526 if (ret) 527 return ret; 528 529 found_supported = true; 530 531 if (parsed_terms) { 532 if (config_attr(&attr, parsed_terms, parse_state->error, 533 config_term_common)) { 534 ret = -EINVAL; 535 goto out_err; 536 } 537 if (get_config_terms(parsed_terms, &config_terms)) { 538 ret = -ENOMEM; 539 goto out_err; 540 } 541 } 542 543 if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name, 544 metric_id, pmu, &config_terms, first_wildcard_match, 545 cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) == NULL) 546 ret = -ENOMEM; 547 548 if (first_wildcard_match == NULL) 549 first_wildcard_match = container_of(list->prev, struct evsel, core.node); 550 free_config_terms(&config_terms); 551 if (ret) 552 goto out_err; 553 } 554 out_err: 555 perf_cpu_map__put(cpus); 556 return found_supported ? 0 : -EINVAL; 557 } 558 559 static void tracepoint_error(struct parse_events_error *e, int err, 560 const char *sys, const char *name, int column) 561 { 562 const char *str; 563 char help[BUFSIZ]; 564 565 if (!e) 566 return; 567 568 /* 569 * We get error directly from syscall errno ( > 0), 570 * or from encoded pointer's error ( < 0). 571 */ 572 err = abs(err); 573 574 switch (err) { 575 case EACCES: 576 str = "can't access trace events"; 577 break; 578 case ENOENT: 579 str = "unknown tracepoint"; 580 break; 581 default: 582 str = "failed to add tracepoint"; 583 break; 584 } 585 586 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name); 587 parse_events_error__handle(e, column, strdup(str), strdup(help)); 588 } 589 590 static int add_tracepoint(struct parse_events_state *parse_state, 591 struct list_head *list, 592 const char *sys_name, const char *evt_name, 593 struct parse_events_error *err, 594 struct parse_events_terms *head_config, void *loc_) 595 { 596 YYLTYPE *loc = loc_; 597 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++, 598 !parse_state->fake_tp); 599 600 if (IS_ERR(evsel)) { 601 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column); 602 return PTR_ERR(evsel); 603 } 604 605 if (head_config) { 606 LIST_HEAD(config_terms); 607 608 if (get_config_terms(head_config, &config_terms)) 609 return -ENOMEM; 610 list_splice(&config_terms, &evsel->config_terms); 611 } 612 613 list_add_tail(&evsel->core.node, list); 614 return 0; 615 } 616 617 static int add_tracepoint_multi_event(struct parse_events_state *parse_state, 618 struct list_head *list, 619 const char *sys_name, const char *evt_name, 620 struct parse_events_error *err, 621 struct parse_events_terms *head_config, YYLTYPE *loc) 622 { 623 char *evt_path; 624 struct io_dirent64 *evt_ent; 625 struct io_dir evt_dir; 626 int ret = 0, found = 0; 627 628 evt_path = get_events_file(sys_name); 629 if (!evt_path) { 630 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 631 return -1; 632 } 633 io_dir__init(&evt_dir, open(evt_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY)); 634 if (evt_dir.dirfd < 0) { 635 put_events_file(evt_path); 636 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 637 return -1; 638 } 639 640 while (!ret && (evt_ent = io_dir__readdir(&evt_dir))) { 641 if (!strcmp(evt_ent->d_name, ".") 642 || !strcmp(evt_ent->d_name, "..") 643 || !strcmp(evt_ent->d_name, "enable") 644 || !strcmp(evt_ent->d_name, "filter")) 645 continue; 646 647 if (!strglobmatch(evt_ent->d_name, evt_name)) 648 continue; 649 650 found++; 651 652 ret = add_tracepoint(parse_state, list, sys_name, evt_ent->d_name, 653 err, head_config, loc); 654 } 655 656 if (!found) { 657 tracepoint_error(err, ENOENT, sys_name, evt_name, loc->first_column); 658 ret = -1; 659 } 660 661 put_events_file(evt_path); 662 close(evt_dir.dirfd); 663 return ret; 664 } 665 666 static int add_tracepoint_event(struct parse_events_state *parse_state, 667 struct list_head *list, 668 const char *sys_name, const char *evt_name, 669 struct parse_events_error *err, 670 struct parse_events_terms *head_config, YYLTYPE *loc) 671 { 672 return strpbrk(evt_name, "*?") ? 673 add_tracepoint_multi_event(parse_state, list, sys_name, evt_name, 674 err, head_config, loc) : 675 add_tracepoint(parse_state, list, sys_name, evt_name, 676 err, head_config, loc); 677 } 678 679 static int add_tracepoint_multi_sys(struct parse_events_state *parse_state, 680 struct list_head *list, 681 const char *sys_name, const char *evt_name, 682 struct parse_events_error *err, 683 struct parse_events_terms *head_config, YYLTYPE *loc) 684 { 685 struct io_dirent64 *events_ent; 686 struct io_dir events_dir; 687 int ret = 0; 688 char *events_dir_path = get_tracing_file("events"); 689 690 if (!events_dir_path) { 691 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 692 return -1; 693 } 694 io_dir__init(&events_dir, open(events_dir_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY)); 695 put_events_file(events_dir_path); 696 if (events_dir.dirfd < 0) { 697 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 698 return -1; 699 } 700 701 while (!ret && (events_ent = io_dir__readdir(&events_dir))) { 702 if (!strcmp(events_ent->d_name, ".") 703 || !strcmp(events_ent->d_name, "..") 704 || !strcmp(events_ent->d_name, "enable") 705 || !strcmp(events_ent->d_name, "header_event") 706 || !strcmp(events_ent->d_name, "header_page")) 707 continue; 708 709 if (!strglobmatch(events_ent->d_name, sys_name)) 710 continue; 711 712 ret = add_tracepoint_event(parse_state, list, events_ent->d_name, 713 evt_name, err, head_config, loc); 714 } 715 close(events_dir.dirfd); 716 return ret; 717 } 718 719 size_t default_breakpoint_len(void) 720 { 721 #if defined(__i386__) 722 static int len; 723 724 if (len == 0) { 725 struct perf_env env = {}; 726 727 perf_env__init(&env); 728 len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long); 729 perf_env__exit(&env); 730 } 731 return len; 732 #elif defined(__aarch64__) 733 return 4; 734 #else 735 return sizeof(long); 736 #endif 737 } 738 739 static int 740 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 741 { 742 int i; 743 744 for (i = 0; i < 3; i++) { 745 if (!type || !type[i]) 746 break; 747 748 #define CHECK_SET_TYPE(bit) \ 749 do { \ 750 if (attr->bp_type & bit) \ 751 return -EINVAL; \ 752 else \ 753 attr->bp_type |= bit; \ 754 } while (0) 755 756 switch (type[i]) { 757 case 'r': 758 CHECK_SET_TYPE(HW_BREAKPOINT_R); 759 break; 760 case 'w': 761 CHECK_SET_TYPE(HW_BREAKPOINT_W); 762 break; 763 case 'x': 764 CHECK_SET_TYPE(HW_BREAKPOINT_X); 765 break; 766 default: 767 return -EINVAL; 768 } 769 } 770 771 #undef CHECK_SET_TYPE 772 773 if (!attr->bp_type) /* Default */ 774 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 775 776 return 0; 777 } 778 779 int parse_events_add_breakpoint(struct parse_events_state *parse_state, 780 struct list_head *list, 781 u64 addr, char *type, u64 len, 782 struct parse_events_terms *head_config) 783 { 784 struct perf_event_attr attr; 785 LIST_HEAD(config_terms); 786 const char *name; 787 788 memset(&attr, 0, sizeof(attr)); 789 attr.bp_addr = addr; 790 791 if (parse_breakpoint_type(type, &attr)) 792 return -EINVAL; 793 794 /* Provide some defaults if len is not specified */ 795 if (!len) { 796 if (attr.bp_type == HW_BREAKPOINT_X) 797 len = default_breakpoint_len(); 798 else 799 len = HW_BREAKPOINT_LEN_4; 800 } 801 802 attr.bp_len = len; 803 804 attr.type = PERF_TYPE_BREAKPOINT; 805 attr.sample_period = 1; 806 807 if (head_config) { 808 if (config_attr(&attr, head_config, parse_state->error, 809 config_term_common)) 810 return -EINVAL; 811 812 if (get_config_terms(head_config, &config_terms)) 813 return -ENOMEM; 814 } 815 816 name = get_config_name(head_config); 817 818 return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL, 819 &config_terms, /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 820 } 821 822 static int check_type_val(struct parse_events_term *term, 823 struct parse_events_error *err, 824 enum parse_events__term_val_type type) 825 { 826 if (type == term->type_val) 827 return 0; 828 829 if (err) { 830 parse_events_error__handle(err, term->err_val, 831 type == PARSE_EVENTS__TERM_TYPE_NUM 832 ? strdup("expected numeric value") 833 : strdup("expected string value"), 834 NULL); 835 } 836 return -EINVAL; 837 } 838 839 static bool config_term_shrinked; 840 841 const char *parse_events__term_type_str(enum parse_events__term_type term_type) 842 { 843 /* 844 * Update according to parse-events.l 845 */ 846 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { 847 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>", 848 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config", 849 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1", 850 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2", 851 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3", 852 [PARSE_EVENTS__TERM_TYPE_NAME] = "name", 853 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period", 854 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq", 855 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type", 856 [PARSE_EVENTS__TERM_TYPE_TIME] = "time", 857 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph", 858 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size", 859 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", 860 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", 861 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack", 862 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr", 863 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite", 864 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite", 865 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config", 866 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore", 867 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output", 868 [PARSE_EVENTS__TERM_TYPE_AUX_ACTION] = "aux-action", 869 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size", 870 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id", 871 [PARSE_EVENTS__TERM_TYPE_RAW] = "raw", 872 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache", 873 [PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware", 874 [PARSE_EVENTS__TERM_TYPE_CPU] = "cpu", 875 }; 876 if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR) 877 return "unknown term"; 878 879 return config_term_names[term_type]; 880 } 881 882 static bool 883 config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err) 884 { 885 char *err_str; 886 887 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) { 888 parse_events_error__handle(err, -1, 889 strdup("Invalid term_type"), NULL); 890 return false; 891 } 892 if (!config_term_shrinked) 893 return true; 894 895 switch (term_type) { 896 case PARSE_EVENTS__TERM_TYPE_CONFIG: 897 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 898 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 899 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 900 case PARSE_EVENTS__TERM_TYPE_NAME: 901 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 902 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 903 case PARSE_EVENTS__TERM_TYPE_PERCORE: 904 case PARSE_EVENTS__TERM_TYPE_CPU: 905 return true; 906 case PARSE_EVENTS__TERM_TYPE_USER: 907 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 908 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 909 case PARSE_EVENTS__TERM_TYPE_TIME: 910 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 911 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 912 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 913 case PARSE_EVENTS__TERM_TYPE_INHERIT: 914 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 915 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 916 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 917 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 918 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 919 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 920 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 921 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 922 case PARSE_EVENTS__TERM_TYPE_RAW: 923 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 924 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 925 default: 926 if (!err) 927 return false; 928 929 /* term_type is validated so indexing is safe */ 930 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'", 931 parse_events__term_type_str(term_type)) >= 0) 932 parse_events_error__handle(err, -1, err_str, NULL); 933 return false; 934 } 935 } 936 937 void parse_events__shrink_config_terms(void) 938 { 939 config_term_shrinked = true; 940 } 941 942 static int config_term_common(struct perf_event_attr *attr, 943 struct parse_events_term *term, 944 struct parse_events_error *err) 945 { 946 #define CHECK_TYPE_VAL(type) \ 947 do { \ 948 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \ 949 return -EINVAL; \ 950 } while (0) 951 952 switch (term->type_term) { 953 case PARSE_EVENTS__TERM_TYPE_CONFIG: 954 CHECK_TYPE_VAL(NUM); 955 attr->config = term->val.num; 956 break; 957 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 958 CHECK_TYPE_VAL(NUM); 959 attr->config1 = term->val.num; 960 break; 961 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 962 CHECK_TYPE_VAL(NUM); 963 attr->config2 = term->val.num; 964 break; 965 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 966 CHECK_TYPE_VAL(NUM); 967 attr->config3 = term->val.num; 968 break; 969 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 970 CHECK_TYPE_VAL(NUM); 971 break; 972 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 973 CHECK_TYPE_VAL(NUM); 974 break; 975 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 976 CHECK_TYPE_VAL(STR); 977 if (strcmp(term->val.str, "no") && 978 parse_branch_str(term->val.str, 979 &attr->branch_sample_type)) { 980 parse_events_error__handle(err, term->err_val, 981 strdup("invalid branch sample type"), 982 NULL); 983 return -EINVAL; 984 } 985 break; 986 case PARSE_EVENTS__TERM_TYPE_TIME: 987 CHECK_TYPE_VAL(NUM); 988 if (term->val.num > 1) { 989 parse_events_error__handle(err, term->err_val, 990 strdup("expected 0 or 1"), 991 NULL); 992 return -EINVAL; 993 } 994 break; 995 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 996 CHECK_TYPE_VAL(STR); 997 break; 998 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 999 CHECK_TYPE_VAL(NUM); 1000 break; 1001 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1002 CHECK_TYPE_VAL(NUM); 1003 break; 1004 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1005 CHECK_TYPE_VAL(NUM); 1006 break; 1007 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1008 CHECK_TYPE_VAL(NUM); 1009 break; 1010 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1011 CHECK_TYPE_VAL(NUM); 1012 break; 1013 case PARSE_EVENTS__TERM_TYPE_NAME: 1014 CHECK_TYPE_VAL(STR); 1015 break; 1016 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1017 CHECK_TYPE_VAL(STR); 1018 break; 1019 case PARSE_EVENTS__TERM_TYPE_RAW: 1020 CHECK_TYPE_VAL(STR); 1021 break; 1022 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1023 CHECK_TYPE_VAL(NUM); 1024 break; 1025 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1026 CHECK_TYPE_VAL(NUM); 1027 break; 1028 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1029 CHECK_TYPE_VAL(NUM); 1030 if ((unsigned int)term->val.num > 1) { 1031 parse_events_error__handle(err, term->err_val, 1032 strdup("expected 0 or 1"), 1033 NULL); 1034 return -EINVAL; 1035 } 1036 break; 1037 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1038 CHECK_TYPE_VAL(NUM); 1039 break; 1040 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1041 CHECK_TYPE_VAL(STR); 1042 break; 1043 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1044 CHECK_TYPE_VAL(NUM); 1045 if (term->val.num > UINT_MAX) { 1046 parse_events_error__handle(err, term->err_val, 1047 strdup("too big"), 1048 NULL); 1049 return -EINVAL; 1050 } 1051 break; 1052 case PARSE_EVENTS__TERM_TYPE_CPU: 1053 CHECK_TYPE_VAL(NUM); 1054 if (term->val.num >= (u64)cpu__max_present_cpu().cpu) { 1055 parse_events_error__handle(err, term->err_val, 1056 strdup("too big"), 1057 NULL); 1058 return -EINVAL; 1059 } 1060 break; 1061 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1062 case PARSE_EVENTS__TERM_TYPE_USER: 1063 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1064 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1065 default: 1066 parse_events_error__handle(err, term->err_term, 1067 strdup(parse_events__term_type_str(term->type_term)), 1068 parse_events_formats_error_string(NULL)); 1069 return -EINVAL; 1070 } 1071 1072 /* 1073 * Check term availability after basic checking so 1074 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered. 1075 * 1076 * If check availability at the entry of this function, 1077 * user will see "'<sysfs term>' is not usable in 'perf stat'" 1078 * if an invalid config term is provided for legacy events 1079 * (for example, instructions/badterm/...), which is confusing. 1080 */ 1081 if (!config_term_avail(term->type_term, err)) 1082 return -EINVAL; 1083 return 0; 1084 #undef CHECK_TYPE_VAL 1085 } 1086 1087 static int config_term_pmu(struct perf_event_attr *attr, 1088 struct parse_events_term *term, 1089 struct parse_events_error *err) 1090 { 1091 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) { 1092 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); 1093 1094 if (!pmu) { 1095 char *err_str; 1096 1097 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0) 1098 parse_events_error__handle(err, term->err_term, 1099 err_str, /*help=*/NULL); 1100 return -EINVAL; 1101 } 1102 /* 1103 * Rewrite the PMU event to a legacy cache one unless the PMU 1104 * doesn't support legacy cache events or the event is present 1105 * within the PMU. 1106 */ 1107 if (perf_pmu__supports_legacy_cache(pmu) && 1108 !perf_pmu__have_event(pmu, term->config)) { 1109 attr->type = PERF_TYPE_HW_CACHE; 1110 return parse_events__decode_legacy_cache(term->config, pmu->type, 1111 &attr->config); 1112 } else { 1113 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 1114 term->no_value = true; 1115 } 1116 } 1117 if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) { 1118 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); 1119 1120 if (!pmu) { 1121 char *err_str; 1122 1123 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0) 1124 parse_events_error__handle(err, term->err_term, 1125 err_str, /*help=*/NULL); 1126 return -EINVAL; 1127 } 1128 /* 1129 * If the PMU has a sysfs or json event prefer it over 1130 * legacy. ARM requires this. 1131 */ 1132 if (perf_pmu__have_event(pmu, term->config)) { 1133 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 1134 term->no_value = true; 1135 term->alternate_hw_config = true; 1136 } else { 1137 attr->type = PERF_TYPE_HARDWARE; 1138 attr->config = term->val.num; 1139 if (perf_pmus__supports_extended_type()) 1140 attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT; 1141 } 1142 return 0; 1143 } 1144 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || 1145 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) { 1146 /* 1147 * Always succeed for sysfs terms, as we dont know 1148 * at this point what type they need to have. 1149 */ 1150 return 0; 1151 } 1152 return config_term_common(attr, term, err); 1153 } 1154 1155 static int config_term_tracepoint(struct perf_event_attr *attr, 1156 struct parse_events_term *term, 1157 struct parse_events_error *err) 1158 { 1159 switch (term->type_term) { 1160 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1161 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1162 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1163 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1164 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1165 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1166 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1167 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1168 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1169 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1170 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1171 return config_term_common(attr, term, err); 1172 case PARSE_EVENTS__TERM_TYPE_USER: 1173 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1174 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1175 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1176 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1177 case PARSE_EVENTS__TERM_TYPE_NAME: 1178 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1179 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1180 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1181 case PARSE_EVENTS__TERM_TYPE_TIME: 1182 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1183 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1184 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1185 case PARSE_EVENTS__TERM_TYPE_RAW: 1186 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1187 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1188 case PARSE_EVENTS__TERM_TYPE_CPU: 1189 default: 1190 if (err) { 1191 parse_events_error__handle(err, term->err_term, 1192 strdup(parse_events__term_type_str(term->type_term)), 1193 strdup("valid terms: call-graph,stack-size\n") 1194 ); 1195 } 1196 return -EINVAL; 1197 } 1198 1199 return 0; 1200 } 1201 1202 static int config_attr(struct perf_event_attr *attr, 1203 const struct parse_events_terms *head, 1204 struct parse_events_error *err, 1205 config_term_func_t config_term) 1206 { 1207 struct parse_events_term *term; 1208 1209 list_for_each_entry(term, &head->terms, list) 1210 if (config_term(attr, term, err)) 1211 return -EINVAL; 1212 1213 return 0; 1214 } 1215 1216 static int get_config_terms(const struct parse_events_terms *head_config, 1217 struct list_head *head_terms) 1218 { 1219 #define ADD_CONFIG_TERM(__type, __weak) \ 1220 struct evsel_config_term *__t; \ 1221 \ 1222 __t = zalloc(sizeof(*__t)); \ 1223 if (!__t) \ 1224 return -ENOMEM; \ 1225 \ 1226 INIT_LIST_HEAD(&__t->list); \ 1227 __t->type = EVSEL__CONFIG_TERM_ ## __type; \ 1228 __t->weak = __weak; \ 1229 list_add_tail(&__t->list, head_terms) 1230 1231 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \ 1232 do { \ 1233 ADD_CONFIG_TERM(__type, __weak); \ 1234 __t->val.__name = __val; \ 1235 } while (0) 1236 1237 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \ 1238 do { \ 1239 ADD_CONFIG_TERM(__type, __weak); \ 1240 __t->val.str = strdup(__val); \ 1241 if (!__t->val.str) { \ 1242 zfree(&__t); \ 1243 return -ENOMEM; \ 1244 } \ 1245 __t->free_str = true; \ 1246 } while (0) 1247 1248 struct parse_events_term *term; 1249 1250 list_for_each_entry(term, &head_config->terms, list) { 1251 switch (term->type_term) { 1252 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1253 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak); 1254 break; 1255 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1256 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak); 1257 break; 1258 case PARSE_EVENTS__TERM_TYPE_TIME: 1259 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak); 1260 break; 1261 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1262 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak); 1263 break; 1264 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1265 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak); 1266 break; 1267 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1268 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user, 1269 term->val.num, term->weak); 1270 break; 1271 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1272 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1273 term->val.num ? 1 : 0, term->weak); 1274 break; 1275 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1276 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1277 term->val.num ? 0 : 1, term->weak); 1278 break; 1279 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1280 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack, 1281 term->val.num, term->weak); 1282 break; 1283 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1284 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events, 1285 term->val.num, term->weak); 1286 break; 1287 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1288 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1289 term->val.num ? 1 : 0, term->weak); 1290 break; 1291 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1292 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1293 term->val.num ? 0 : 1, term->weak); 1294 break; 1295 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1296 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak); 1297 break; 1298 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1299 ADD_CONFIG_TERM_VAL(PERCORE, percore, 1300 term->val.num ? true : false, term->weak); 1301 break; 1302 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1303 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output, 1304 term->val.num ? 1 : 0, term->weak); 1305 break; 1306 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1307 ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak); 1308 break; 1309 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1310 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size, 1311 term->val.num, term->weak); 1312 break; 1313 case PARSE_EVENTS__TERM_TYPE_USER: 1314 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1315 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1316 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1317 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1318 case PARSE_EVENTS__TERM_TYPE_NAME: 1319 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1320 case PARSE_EVENTS__TERM_TYPE_RAW: 1321 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1322 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1323 case PARSE_EVENTS__TERM_TYPE_CPU: 1324 default: 1325 break; 1326 } 1327 } 1328 return 0; 1329 } 1330 1331 /* 1332 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for 1333 * each bit of attr->config that the user has changed. 1334 */ 1335 static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config, 1336 struct list_head *head_terms) 1337 { 1338 struct parse_events_term *term; 1339 u64 bits = 0; 1340 int type; 1341 1342 list_for_each_entry(term, &head_config->terms, list) { 1343 switch (term->type_term) { 1344 case PARSE_EVENTS__TERM_TYPE_USER: 1345 type = perf_pmu__format_type(pmu, term->config); 1346 if (type != PERF_PMU_FORMAT_VALUE_CONFIG) 1347 continue; 1348 bits |= perf_pmu__format_bits(pmu, term->config); 1349 break; 1350 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1351 bits = ~(u64)0; 1352 break; 1353 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1354 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1355 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1356 case PARSE_EVENTS__TERM_TYPE_NAME: 1357 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1358 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1359 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1360 case PARSE_EVENTS__TERM_TYPE_TIME: 1361 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1362 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1363 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1364 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1365 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1366 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1367 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1368 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1369 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1370 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1371 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1372 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1373 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1374 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1375 case PARSE_EVENTS__TERM_TYPE_RAW: 1376 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1377 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1378 case PARSE_EVENTS__TERM_TYPE_CPU: 1379 default: 1380 break; 1381 } 1382 } 1383 1384 if (bits) 1385 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false); 1386 1387 #undef ADD_CONFIG_TERM 1388 return 0; 1389 } 1390 1391 int parse_events_add_tracepoint(struct parse_events_state *parse_state, 1392 struct list_head *list, 1393 const char *sys, const char *event, 1394 struct parse_events_error *err, 1395 struct parse_events_terms *head_config, void *loc_) 1396 { 1397 YYLTYPE *loc = loc_; 1398 1399 if (head_config) { 1400 struct perf_event_attr attr; 1401 1402 if (config_attr(&attr, head_config, err, 1403 config_term_tracepoint)) 1404 return -EINVAL; 1405 } 1406 1407 if (strpbrk(sys, "*?")) 1408 return add_tracepoint_multi_sys(parse_state, list, sys, event, 1409 err, head_config, loc); 1410 else 1411 return add_tracepoint_event(parse_state, list, sys, event, 1412 err, head_config, loc); 1413 } 1414 1415 static int __parse_events_add_numeric(struct parse_events_state *parse_state, 1416 struct list_head *list, 1417 struct perf_pmu *pmu, u32 type, u32 extended_type, 1418 u64 config, const struct parse_events_terms *head_config, 1419 struct evsel *first_wildcard_match) 1420 { 1421 struct perf_event_attr attr; 1422 LIST_HEAD(config_terms); 1423 const char *name, *metric_id; 1424 struct perf_cpu_map *cpus; 1425 int ret; 1426 1427 memset(&attr, 0, sizeof(attr)); 1428 attr.type = type; 1429 attr.config = config; 1430 if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) { 1431 assert(perf_pmus__supports_extended_type()); 1432 attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT; 1433 } 1434 1435 if (head_config) { 1436 if (config_attr(&attr, head_config, parse_state->error, 1437 config_term_common)) 1438 return -EINVAL; 1439 1440 if (get_config_terms(head_config, &config_terms)) 1441 return -ENOMEM; 1442 } 1443 1444 name = get_config_name(head_config); 1445 metric_id = get_config_metric_id(head_config); 1446 cpus = get_config_cpu(head_config); 1447 ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name, 1448 metric_id, pmu, &config_terms, first_wildcard_match, 1449 cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) ? 0 : -ENOMEM; 1450 perf_cpu_map__put(cpus); 1451 free_config_terms(&config_terms); 1452 return ret; 1453 } 1454 1455 int parse_events_add_numeric(struct parse_events_state *parse_state, 1456 struct list_head *list, 1457 u32 type, u64 config, 1458 const struct parse_events_terms *head_config, 1459 bool wildcard) 1460 { 1461 struct perf_pmu *pmu = NULL; 1462 bool found_supported = false; 1463 1464 /* Wildcards on numeric values are only supported by core PMUs. */ 1465 if (wildcard && perf_pmus__supports_extended_type()) { 1466 struct evsel *first_wildcard_match = NULL; 1467 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 1468 int ret; 1469 1470 found_supported = true; 1471 if (parse_events__filter_pmu(parse_state, pmu)) 1472 continue; 1473 1474 ret = __parse_events_add_numeric(parse_state, list, pmu, 1475 type, pmu->type, 1476 config, head_config, 1477 first_wildcard_match); 1478 if (ret) 1479 return ret; 1480 if (first_wildcard_match == NULL) 1481 first_wildcard_match = 1482 container_of(list->prev, struct evsel, core.node); 1483 } 1484 if (found_supported) 1485 return 0; 1486 } 1487 return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type), 1488 type, /*extended_type=*/0, config, head_config, 1489 /*first_wildcard_match=*/NULL); 1490 } 1491 1492 static bool config_term_percore(struct list_head *config_terms) 1493 { 1494 struct evsel_config_term *term; 1495 1496 list_for_each_entry(term, config_terms, list) { 1497 if (term->type == EVSEL__CONFIG_TERM_PERCORE) 1498 return term->val.percore; 1499 } 1500 1501 return false; 1502 } 1503 1504 static int parse_events_add_pmu(struct parse_events_state *parse_state, 1505 struct list_head *list, struct perf_pmu *pmu, 1506 const struct parse_events_terms *const_parsed_terms, 1507 struct evsel *first_wildcard_match, u64 alternate_hw_config) 1508 { 1509 struct perf_event_attr attr; 1510 struct perf_pmu_info info; 1511 struct evsel *evsel; 1512 struct parse_events_error *err = parse_state->error; 1513 LIST_HEAD(config_terms); 1514 struct parse_events_terms parsed_terms; 1515 bool alias_rewrote_terms = false; 1516 struct perf_cpu_map *term_cpu = NULL; 1517 1518 if (verbose > 1) { 1519 struct strbuf sb; 1520 1521 strbuf_init(&sb, /*hint=*/ 0); 1522 if (pmu->selectable && const_parsed_terms && 1523 list_empty(&const_parsed_terms->terms)) { 1524 strbuf_addf(&sb, "%s//", pmu->name); 1525 } else { 1526 strbuf_addf(&sb, "%s/", pmu->name); 1527 parse_events_terms__to_strbuf(const_parsed_terms, &sb); 1528 strbuf_addch(&sb, '/'); 1529 } 1530 fprintf(stderr, "Attempt to add: %s\n", sb.buf); 1531 strbuf_release(&sb); 1532 } 1533 1534 memset(&attr, 0, sizeof(attr)); 1535 if (pmu->perf_event_attr_init_default) 1536 pmu->perf_event_attr_init_default(pmu, &attr); 1537 1538 attr.type = pmu->type; 1539 1540 if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) { 1541 evsel = __add_event(list, &parse_state->idx, &attr, 1542 /*init_attr=*/true, /*name=*/NULL, 1543 /*metric_id=*/NULL, pmu, 1544 /*config_terms=*/NULL, first_wildcard_match, 1545 /*cpu_list=*/NULL, alternate_hw_config); 1546 return evsel ? 0 : -ENOMEM; 1547 } 1548 1549 parse_events_terms__init(&parsed_terms); 1550 if (const_parsed_terms) { 1551 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1552 1553 if (ret) 1554 return ret; 1555 } 1556 fix_raw(&parsed_terms, pmu); 1557 1558 /* Configure attr/terms with a known PMU, this will set hardcoded terms. */ 1559 if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) { 1560 parse_events_terms__exit(&parsed_terms); 1561 return -EINVAL; 1562 } 1563 1564 /* Look for event names in the terms and rewrite into format based terms. */ 1565 if (perf_pmu__check_alias(pmu, &parsed_terms, 1566 &info, &alias_rewrote_terms, 1567 &alternate_hw_config, err)) { 1568 parse_events_terms__exit(&parsed_terms); 1569 return -EINVAL; 1570 } 1571 1572 if (verbose > 1) { 1573 struct strbuf sb; 1574 1575 strbuf_init(&sb, /*hint=*/ 0); 1576 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1577 fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf); 1578 strbuf_release(&sb); 1579 } 1580 1581 /* Configure attr/terms again if an alias was expanded. */ 1582 if (alias_rewrote_terms && 1583 config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) { 1584 parse_events_terms__exit(&parsed_terms); 1585 return -EINVAL; 1586 } 1587 1588 if (get_config_terms(&parsed_terms, &config_terms)) { 1589 parse_events_terms__exit(&parsed_terms); 1590 return -ENOMEM; 1591 } 1592 1593 /* 1594 * When using default config, record which bits of attr->config were 1595 * changed by the user. 1596 */ 1597 if (pmu->perf_event_attr_init_default && 1598 get_config_chgs(pmu, &parsed_terms, &config_terms)) { 1599 parse_events_terms__exit(&parsed_terms); 1600 return -ENOMEM; 1601 } 1602 1603 /* Skip configuring hard coded terms that were applied by config_attr. */ 1604 if (perf_pmu__config(pmu, &attr, &parsed_terms, /*apply_hardcoded=*/false, 1605 parse_state->error)) { 1606 free_config_terms(&config_terms); 1607 parse_events_terms__exit(&parsed_terms); 1608 return -EINVAL; 1609 } 1610 1611 term_cpu = get_config_cpu(&parsed_terms); 1612 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true, 1613 get_config_name(&parsed_terms), 1614 get_config_metric_id(&parsed_terms), pmu, 1615 &config_terms, first_wildcard_match, term_cpu, alternate_hw_config); 1616 perf_cpu_map__put(term_cpu); 1617 if (!evsel) { 1618 parse_events_terms__exit(&parsed_terms); 1619 return -ENOMEM; 1620 } 1621 1622 if (evsel->name) 1623 evsel->use_config_name = true; 1624 1625 evsel->percore = config_term_percore(&evsel->config_terms); 1626 1627 parse_events_terms__exit(&parsed_terms); 1628 free((char *)evsel->unit); 1629 evsel->unit = strdup(info.unit); 1630 evsel->scale = info.scale; 1631 evsel->per_pkg = info.per_pkg; 1632 evsel->snapshot = info.snapshot; 1633 evsel->retirement_latency.mean = info.retirement_latency_mean; 1634 evsel->retirement_latency.min = info.retirement_latency_min; 1635 evsel->retirement_latency.max = info.retirement_latency_max; 1636 1637 return 0; 1638 } 1639 1640 int parse_events_multi_pmu_add(struct parse_events_state *parse_state, 1641 const char *event_name, u64 hw_config, 1642 const struct parse_events_terms *const_parsed_terms, 1643 struct list_head **listp, void *loc_) 1644 { 1645 struct parse_events_term *term; 1646 struct list_head *list = NULL; 1647 struct perf_pmu *pmu = NULL; 1648 YYLTYPE *loc = loc_; 1649 int ok = 0; 1650 const char *config; 1651 struct parse_events_terms parsed_terms; 1652 struct evsel *first_wildcard_match = NULL; 1653 1654 *listp = NULL; 1655 1656 parse_events_terms__init(&parsed_terms); 1657 if (const_parsed_terms) { 1658 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1659 1660 if (ret) 1661 return ret; 1662 } 1663 1664 config = strdup(event_name); 1665 if (!config) 1666 goto out_err; 1667 1668 if (parse_events_term__num(&term, 1669 PARSE_EVENTS__TERM_TYPE_USER, 1670 config, /*num=*/1, /*novalue=*/true, 1671 loc, /*loc_val=*/NULL) < 0) { 1672 zfree(&config); 1673 goto out_err; 1674 } 1675 list_add_tail(&term->list, &parsed_terms.terms); 1676 1677 /* Add it for all PMUs that support the alias */ 1678 list = malloc(sizeof(struct list_head)); 1679 if (!list) 1680 goto out_err; 1681 1682 INIT_LIST_HEAD(list); 1683 1684 while ((pmu = perf_pmus__scan_for_event(pmu, event_name)) != NULL) { 1685 1686 if (parse_events__filter_pmu(parse_state, pmu)) 1687 continue; 1688 1689 if (!perf_pmu__have_event(pmu, event_name)) 1690 continue; 1691 1692 if (!parse_events_add_pmu(parse_state, list, pmu, 1693 &parsed_terms, first_wildcard_match, hw_config)) { 1694 struct strbuf sb; 1695 1696 strbuf_init(&sb, /*hint=*/ 0); 1697 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1698 pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf); 1699 strbuf_release(&sb); 1700 ok++; 1701 } 1702 if (first_wildcard_match == NULL) 1703 first_wildcard_match = container_of(list->prev, struct evsel, core.node); 1704 } 1705 1706 if (parse_state->fake_pmu) { 1707 if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms, 1708 first_wildcard_match, hw_config)) { 1709 struct strbuf sb; 1710 1711 strbuf_init(&sb, /*hint=*/ 0); 1712 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1713 pr_debug("%s -> fake/%s/\n", event_name, sb.buf); 1714 strbuf_release(&sb); 1715 ok++; 1716 } 1717 } 1718 1719 out_err: 1720 parse_events_terms__exit(&parsed_terms); 1721 if (ok) 1722 *listp = list; 1723 else 1724 free(list); 1725 1726 return ok ? 0 : -1; 1727 } 1728 1729 int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state, 1730 const char *event_or_pmu, 1731 const struct parse_events_terms *const_parsed_terms, 1732 struct list_head **listp, 1733 void *loc_) 1734 { 1735 YYLTYPE *loc = loc_; 1736 struct perf_pmu *pmu; 1737 int ok = 0; 1738 char *help; 1739 struct evsel *first_wildcard_match = NULL; 1740 1741 *listp = malloc(sizeof(**listp)); 1742 if (!*listp) 1743 return -ENOMEM; 1744 1745 INIT_LIST_HEAD(*listp); 1746 1747 /* Attempt to add to list assuming event_or_pmu is a PMU name. */ 1748 pmu = perf_pmus__find(event_or_pmu); 1749 if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms, 1750 first_wildcard_match, 1751 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) 1752 return 0; 1753 1754 if (parse_state->fake_pmu) { 1755 if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(), 1756 const_parsed_terms, 1757 first_wildcard_match, 1758 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) 1759 return 0; 1760 } 1761 1762 pmu = NULL; 1763 /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */ 1764 while ((pmu = perf_pmus__scan_matching_wildcard(pmu, event_or_pmu)) != NULL) { 1765 1766 if (parse_events__filter_pmu(parse_state, pmu)) 1767 continue; 1768 1769 if (!parse_events_add_pmu(parse_state, *listp, pmu, 1770 const_parsed_terms, 1771 first_wildcard_match, 1772 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) { 1773 ok++; 1774 parse_state->wild_card_pmus = true; 1775 } 1776 if (first_wildcard_match == NULL) { 1777 first_wildcard_match = 1778 container_of((*listp)->prev, struct evsel, core.node); 1779 } 1780 } 1781 if (ok) 1782 return 0; 1783 1784 /* Failure to add, assume event_or_pmu is an event name. */ 1785 zfree(listp); 1786 if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, PERF_COUNT_HW_MAX, 1787 const_parsed_terms, listp, loc)) 1788 return 0; 1789 1790 if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0) 1791 help = NULL; 1792 parse_events_error__handle(parse_state->error, loc->first_column, 1793 strdup("Bad event or PMU"), 1794 help); 1795 zfree(listp); 1796 return -EINVAL; 1797 } 1798 1799 void parse_events__set_leader(char *name, struct list_head *list) 1800 { 1801 struct evsel *leader; 1802 1803 if (list_empty(list)) { 1804 WARN_ONCE(true, "WARNING: failed to set leader: empty list"); 1805 return; 1806 } 1807 1808 leader = list_first_entry(list, struct evsel, core.node); 1809 __perf_evlist__set_leader(list, &leader->core); 1810 zfree(&leader->group_name); 1811 leader->group_name = name; 1812 } 1813 1814 static int parse_events__modifier_list(struct parse_events_state *parse_state, 1815 YYLTYPE *loc, 1816 struct list_head *list, 1817 struct parse_events_modifier mod, 1818 bool group) 1819 { 1820 struct evsel *evsel; 1821 1822 if (!group && mod.weak) { 1823 parse_events_error__handle(parse_state->error, loc->first_column, 1824 strdup("Weak modifier is for use with groups"), NULL); 1825 return -EINVAL; 1826 } 1827 1828 __evlist__for_each_entry(list, evsel) { 1829 /* Translate modifiers into the equivalent evsel excludes. */ 1830 int eu = group ? evsel->core.attr.exclude_user : 0; 1831 int ek = group ? evsel->core.attr.exclude_kernel : 0; 1832 int eh = group ? evsel->core.attr.exclude_hv : 0; 1833 int eH = group ? evsel->core.attr.exclude_host : 0; 1834 int eG = group ? evsel->core.attr.exclude_guest : 0; 1835 int exclude = eu | ek | eh; 1836 int exclude_GH = eG | eH; 1837 1838 if (mod.user) { 1839 if (!exclude) 1840 exclude = eu = ek = eh = 1; 1841 eu = 0; 1842 } 1843 if (mod.kernel) { 1844 if (!exclude) 1845 exclude = eu = ek = eh = 1; 1846 ek = 0; 1847 } 1848 if (mod.hypervisor) { 1849 if (!exclude) 1850 exclude = eu = ek = eh = 1; 1851 eh = 0; 1852 } 1853 if (mod.guest) { 1854 if (!exclude_GH) 1855 exclude_GH = eG = eH = 1; 1856 eG = 0; 1857 } 1858 if (mod.host) { 1859 if (!exclude_GH) 1860 exclude_GH = eG = eH = 1; 1861 eH = 0; 1862 } 1863 if (!exclude_GH && exclude_GH_default) { 1864 if (perf_host) 1865 eG = 1; 1866 else if (perf_guest) 1867 eH = 1; 1868 } 1869 1870 evsel->core.attr.exclude_user = eu; 1871 evsel->core.attr.exclude_kernel = ek; 1872 evsel->core.attr.exclude_hv = eh; 1873 evsel->core.attr.exclude_host = eH; 1874 evsel->core.attr.exclude_guest = eG; 1875 evsel->exclude_GH = exclude_GH; 1876 1877 /* Simple modifiers copied to the evsel. */ 1878 if (mod.precise) { 1879 u8 precise = evsel->core.attr.precise_ip + mod.precise; 1880 /* 1881 * precise ip: 1882 * 1883 * 0 - SAMPLE_IP can have arbitrary skid 1884 * 1 - SAMPLE_IP must have constant skid 1885 * 2 - SAMPLE_IP requested to have 0 skid 1886 * 3 - SAMPLE_IP must have 0 skid 1887 * 1888 * See also PERF_RECORD_MISC_EXACT_IP 1889 */ 1890 if (precise > 3) { 1891 char *help; 1892 1893 if (asprintf(&help, 1894 "Maximum combined precise value is 3, adding precision to \"%s\"", 1895 evsel__name(evsel)) > 0) { 1896 parse_events_error__handle(parse_state->error, 1897 loc->first_column, 1898 help, NULL); 1899 } 1900 return -EINVAL; 1901 } 1902 evsel->core.attr.precise_ip = precise; 1903 } 1904 if (mod.precise_max) 1905 evsel->precise_max = 1; 1906 if (mod.non_idle) 1907 evsel->core.attr.exclude_idle = 1; 1908 if (mod.sample_read) 1909 evsel->sample_read = 1; 1910 if (mod.pinned && evsel__is_group_leader(evsel)) 1911 evsel->core.attr.pinned = 1; 1912 if (mod.exclusive && evsel__is_group_leader(evsel)) 1913 evsel->core.attr.exclusive = 1; 1914 if (mod.weak) 1915 evsel->weak_group = true; 1916 if (mod.bpf) 1917 evsel->bpf_counter = true; 1918 if (mod.retire_lat) 1919 evsel->retire_lat = true; 1920 } 1921 return 0; 1922 } 1923 1924 int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc, 1925 struct list_head *list, 1926 struct parse_events_modifier mod) 1927 { 1928 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true); 1929 } 1930 1931 int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc, 1932 struct list_head *list, 1933 struct parse_events_modifier mod) 1934 { 1935 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false); 1936 } 1937 1938 int parse_events__set_default_name(struct list_head *list, char *name) 1939 { 1940 struct evsel *evsel; 1941 bool used_name = false; 1942 1943 __evlist__for_each_entry(list, evsel) { 1944 if (!evsel->name) { 1945 evsel->name = used_name ? strdup(name) : name; 1946 used_name = true; 1947 if (!evsel->name) 1948 return -ENOMEM; 1949 } 1950 } 1951 if (!used_name) 1952 free(name); 1953 return 0; 1954 } 1955 1956 static int parse_events__scanner(const char *str, 1957 FILE *input, 1958 struct parse_events_state *parse_state) 1959 { 1960 YY_BUFFER_STATE buffer; 1961 void *scanner; 1962 int ret; 1963 1964 ret = parse_events_lex_init_extra(parse_state, &scanner); 1965 if (ret) 1966 return ret; 1967 1968 if (str) 1969 buffer = parse_events__scan_string(str, scanner); 1970 else 1971 parse_events_set_in(input, scanner); 1972 1973 #ifdef PARSER_DEBUG 1974 parse_events_debug = 1; 1975 parse_events_set_debug(1, scanner); 1976 #endif 1977 ret = parse_events_parse(parse_state, scanner); 1978 1979 if (str) { 1980 parse_events__flush_buffer(buffer, scanner); 1981 parse_events__delete_buffer(buffer, scanner); 1982 } 1983 parse_events_lex_destroy(scanner); 1984 return ret; 1985 } 1986 1987 /* 1988 * parse event config string, return a list of event terms. 1989 */ 1990 int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input) 1991 { 1992 struct parse_events_state parse_state = { 1993 .terms = NULL, 1994 .stoken = PE_START_TERMS, 1995 }; 1996 int ret; 1997 1998 ret = parse_events__scanner(str, input, &parse_state); 1999 if (!ret) 2000 list_splice(&parse_state.terms->terms, &terms->terms); 2001 2002 zfree(&parse_state.terms); 2003 return ret; 2004 } 2005 2006 static int evsel__compute_group_pmu_name(struct evsel *evsel, 2007 const struct list_head *head) 2008 { 2009 struct evsel *leader = evsel__leader(evsel); 2010 struct evsel *pos; 2011 const char *group_pmu_name; 2012 struct perf_pmu *pmu = evsel__find_pmu(evsel); 2013 2014 if (!pmu) { 2015 /* 2016 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU 2017 * is a core PMU, but in heterogeneous systems this is 2018 * unknown. For now pick the first core PMU. 2019 */ 2020 pmu = perf_pmus__scan_core(NULL); 2021 } 2022 if (!pmu) { 2023 pr_debug("No PMU found for '%s'\n", evsel__name(evsel)); 2024 return -EINVAL; 2025 } 2026 group_pmu_name = pmu->name; 2027 /* 2028 * Software events may be in a group with other uncore PMU events. Use 2029 * the pmu_name of the first non-software event to avoid breaking the 2030 * software event out of the group. 2031 * 2032 * Aux event leaders, like intel_pt, expect a group with events from 2033 * other PMUs, so substitute the AUX event's PMU in this case. 2034 */ 2035 if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) { 2036 struct perf_pmu *leader_pmu = evsel__find_pmu(leader); 2037 2038 if (!leader_pmu) { 2039 /* As with determining pmu above. */ 2040 leader_pmu = perf_pmus__scan_core(NULL); 2041 } 2042 /* 2043 * Starting with the leader, find the first event with a named 2044 * non-software PMU. for_each_group_(member|evsel) isn't used as 2045 * the list isn't yet sorted putting evsel's in the same group 2046 * together. 2047 */ 2048 if (leader_pmu && !perf_pmu__is_software(leader_pmu)) { 2049 group_pmu_name = leader_pmu->name; 2050 } else if (leader->core.nr_members > 1) { 2051 list_for_each_entry(pos, head, core.node) { 2052 struct perf_pmu *pos_pmu; 2053 2054 if (pos == leader || evsel__leader(pos) != leader) 2055 continue; 2056 pos_pmu = evsel__find_pmu(pos); 2057 if (!pos_pmu) { 2058 /* As with determining pmu above. */ 2059 pos_pmu = perf_pmus__scan_core(NULL); 2060 } 2061 if (pos_pmu && !perf_pmu__is_software(pos_pmu)) { 2062 group_pmu_name = pos_pmu->name; 2063 break; 2064 } 2065 } 2066 } 2067 } 2068 /* Record computed name. */ 2069 evsel->group_pmu_name = strdup(group_pmu_name); 2070 return evsel->group_pmu_name ? 0 : -ENOMEM; 2071 } 2072 2073 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs) 2074 { 2075 /* Order by insertion index. */ 2076 return lhs->core.idx - rhs->core.idx; 2077 } 2078 2079 static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r) 2080 { 2081 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node); 2082 const struct evsel *lhs = container_of(lhs_core, struct evsel, core); 2083 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node); 2084 const struct evsel *rhs = container_of(rhs_core, struct evsel, core); 2085 int *force_grouped_idx = _fg_idx; 2086 int lhs_sort_idx, rhs_sort_idx, ret; 2087 const char *lhs_pmu_name, *rhs_pmu_name; 2088 2089 /* 2090 * Get the indexes of the 2 events to sort. If the events are 2091 * in groups then the leader's index is used otherwise the 2092 * event's index is used. An index may be forced for events that 2093 * must be in the same group, namely Intel topdown events. 2094 */ 2095 if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) { 2096 lhs_sort_idx = *force_grouped_idx; 2097 } else { 2098 bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1; 2099 2100 lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx; 2101 } 2102 if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) { 2103 rhs_sort_idx = *force_grouped_idx; 2104 } else { 2105 bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1; 2106 2107 rhs_sort_idx = rhs_has_group ? rhs_core->leader->idx : rhs_core->idx; 2108 } 2109 2110 /* If the indices differ then respect the insertion order. */ 2111 if (lhs_sort_idx != rhs_sort_idx) 2112 return lhs_sort_idx - rhs_sort_idx; 2113 2114 /* 2115 * Ignoring forcing, lhs_sort_idx == rhs_sort_idx so lhs and rhs should 2116 * be in the same group. Events in the same group need to be ordered by 2117 * their grouping PMU name as the group will be broken to ensure only 2118 * events on the same PMU are programmed together. 2119 * 2120 * With forcing the lhs_sort_idx == rhs_sort_idx shows that one or both 2121 * events are being forced to be at force_group_index. If only one event 2122 * is being forced then the other event is the group leader of the group 2123 * we're trying to force the event into. Ensure for the force grouped 2124 * case that the PMU name ordering is also respected. 2125 */ 2126 lhs_pmu_name = lhs->group_pmu_name; 2127 rhs_pmu_name = rhs->group_pmu_name; 2128 ret = strcmp(lhs_pmu_name, rhs_pmu_name); 2129 if (ret) 2130 return ret; 2131 2132 /* 2133 * Architecture specific sorting, by default sort events in the same 2134 * group with the same PMU by their insertion index. On Intel topdown 2135 * constraints must be adhered to - slots first, etc. 2136 */ 2137 return arch_evlist__cmp(lhs, rhs); 2138 } 2139 2140 static int parse_events__sort_events_and_fix_groups(struct list_head *list) 2141 { 2142 int idx = 0, force_grouped_idx = -1; 2143 struct evsel *pos, *cur_leader = NULL; 2144 struct perf_evsel *cur_leaders_grp = NULL; 2145 bool idx_changed = false; 2146 int orig_num_leaders = 0, num_leaders = 0; 2147 int ret; 2148 struct evsel *force_grouped_leader = NULL; 2149 bool last_event_was_forced_leader = false; 2150 2151 /* 2152 * Compute index to insert ungrouped events at. Place them where the 2153 * first ungrouped event appears. 2154 */ 2155 list_for_each_entry(pos, list, core.node) { 2156 const struct evsel *pos_leader = evsel__leader(pos); 2157 2158 ret = evsel__compute_group_pmu_name(pos, list); 2159 if (ret) 2160 return ret; 2161 2162 if (pos == pos_leader) 2163 orig_num_leaders++; 2164 2165 /* 2166 * Ensure indexes are sequential, in particular for multiple 2167 * event lists being merged. The indexes are used to detect when 2168 * the user order is modified. 2169 */ 2170 pos->core.idx = idx++; 2171 2172 /* 2173 * Remember an index to sort all forced grouped events 2174 * together to. Use the group leader as some events 2175 * must appear first within the group. 2176 */ 2177 if (force_grouped_idx == -1 && arch_evsel__must_be_in_group(pos)) 2178 force_grouped_idx = pos_leader->core.idx; 2179 } 2180 2181 /* Sort events. */ 2182 list_sort(&force_grouped_idx, list, evlist__cmp); 2183 2184 /* 2185 * Recompute groups, splitting for PMUs and adding groups for events 2186 * that require them. 2187 */ 2188 idx = 0; 2189 list_for_each_entry(pos, list, core.node) { 2190 const struct evsel *pos_leader = evsel__leader(pos); 2191 const char *pos_pmu_name = pos->group_pmu_name; 2192 const char *cur_leader_pmu_name; 2193 bool pos_force_grouped = force_grouped_idx != -1 && 2194 arch_evsel__must_be_in_group(pos); 2195 2196 /* Reset index and nr_members. */ 2197 if (pos->core.idx != idx) 2198 idx_changed = true; 2199 pos->core.idx = idx++; 2200 pos->core.nr_members = 0; 2201 2202 /* 2203 * Set the group leader respecting the given groupings and that 2204 * groups can't span PMUs. 2205 */ 2206 if (!cur_leader) { 2207 cur_leader = pos; 2208 cur_leaders_grp = &pos->core; 2209 if (pos_force_grouped) 2210 force_grouped_leader = pos; 2211 } 2212 2213 cur_leader_pmu_name = cur_leader->group_pmu_name; 2214 if (strcmp(cur_leader_pmu_name, pos_pmu_name)) { 2215 /* PMU changed so the group/leader must change. */ 2216 cur_leader = pos; 2217 cur_leaders_grp = pos->core.leader; 2218 if (pos_force_grouped && force_grouped_leader == NULL) 2219 force_grouped_leader = pos; 2220 } else if (cur_leaders_grp != pos->core.leader) { 2221 bool split_even_if_last_leader_was_forced = true; 2222 2223 /* 2224 * Event is for a different group. If the last event was 2225 * the forced group leader then subsequent group events 2226 * and forced events should be in the same group. If 2227 * there are no other forced group events then the 2228 * forced group leader wasn't really being forced into a 2229 * group, it just set arch_evsel__must_be_in_group, and 2230 * we don't want the group to split here. 2231 */ 2232 if (force_grouped_idx != -1 && last_event_was_forced_leader) { 2233 struct evsel *pos2 = pos; 2234 /* 2235 * Search the whole list as the group leaders 2236 * aren't currently valid. 2237 */ 2238 list_for_each_entry_continue(pos2, list, core.node) { 2239 if (pos->core.leader == pos2->core.leader && 2240 arch_evsel__must_be_in_group(pos2)) { 2241 split_even_if_last_leader_was_forced = false; 2242 break; 2243 } 2244 } 2245 } 2246 if (!last_event_was_forced_leader || split_even_if_last_leader_was_forced) { 2247 if (pos_force_grouped) { 2248 if (force_grouped_leader) { 2249 cur_leader = force_grouped_leader; 2250 cur_leaders_grp = force_grouped_leader->core.leader; 2251 } else { 2252 cur_leader = force_grouped_leader = pos; 2253 cur_leaders_grp = &pos->core; 2254 } 2255 } else { 2256 cur_leader = pos; 2257 cur_leaders_grp = pos->core.leader; 2258 } 2259 } 2260 } 2261 if (pos_leader != cur_leader) { 2262 /* The leader changed so update it. */ 2263 evsel__set_leader(pos, cur_leader); 2264 } 2265 last_event_was_forced_leader = (force_grouped_leader == pos); 2266 } 2267 list_for_each_entry(pos, list, core.node) { 2268 struct evsel *pos_leader = evsel__leader(pos); 2269 2270 if (pos == pos_leader) 2271 num_leaders++; 2272 pos_leader->core.nr_members++; 2273 } 2274 return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0; 2275 } 2276 2277 int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter, 2278 struct parse_events_error *err, bool fake_pmu, 2279 bool warn_if_reordered, bool fake_tp) 2280 { 2281 struct parse_events_state parse_state = { 2282 .list = LIST_HEAD_INIT(parse_state.list), 2283 .idx = evlist->core.nr_entries, 2284 .error = err, 2285 .stoken = PE_START_EVENTS, 2286 .fake_pmu = fake_pmu, 2287 .fake_tp = fake_tp, 2288 .pmu_filter = pmu_filter, 2289 .match_legacy_cache_terms = true, 2290 }; 2291 int ret, ret2; 2292 2293 ret = parse_events__scanner(str, /*input=*/ NULL, &parse_state); 2294 2295 if (!ret && list_empty(&parse_state.list)) { 2296 WARN_ONCE(true, "WARNING: event parser found nothing\n"); 2297 return -1; 2298 } 2299 2300 ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list); 2301 if (ret2 < 0) 2302 return ret; 2303 2304 /* 2305 * Add list to the evlist even with errors to allow callers to clean up. 2306 */ 2307 evlist__splice_list_tail(evlist, &parse_state.list); 2308 2309 if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) { 2310 pr_warning("WARNING: events were regrouped to match PMUs\n"); 2311 2312 if (verbose > 0) { 2313 struct strbuf sb = STRBUF_INIT; 2314 2315 evlist__uniquify_evsel_names(evlist, &stat_config); 2316 evlist__format_evsels(evlist, &sb, 2048); 2317 pr_debug("evlist after sorting/fixing: '%s'\n", sb.buf); 2318 strbuf_release(&sb); 2319 } 2320 } 2321 if (!ret) { 2322 struct evsel *last; 2323 2324 last = evlist__last(evlist); 2325 last->cmdline_group_boundary = true; 2326 2327 return 0; 2328 } 2329 2330 /* 2331 * There are 2 users - builtin-record and builtin-test objects. 2332 * Both call evlist__delete in case of error, so we dont 2333 * need to bother. 2334 */ 2335 return ret; 2336 } 2337 2338 int parse_event(struct evlist *evlist, const char *str) 2339 { 2340 struct parse_events_error err; 2341 int ret; 2342 2343 parse_events_error__init(&err); 2344 ret = parse_events(evlist, str, &err); 2345 parse_events_error__exit(&err); 2346 return ret; 2347 } 2348 2349 struct parse_events_error_entry { 2350 /** @list: The list the error is part of. */ 2351 struct list_head list; 2352 /** @idx: index in the parsed string */ 2353 int idx; 2354 /** @str: string to display at the index */ 2355 char *str; 2356 /** @help: optional help string */ 2357 char *help; 2358 }; 2359 2360 void parse_events_error__init(struct parse_events_error *err) 2361 { 2362 INIT_LIST_HEAD(&err->list); 2363 } 2364 2365 void parse_events_error__exit(struct parse_events_error *err) 2366 { 2367 struct parse_events_error_entry *pos, *tmp; 2368 2369 list_for_each_entry_safe(pos, tmp, &err->list, list) { 2370 zfree(&pos->str); 2371 zfree(&pos->help); 2372 list_del_init(&pos->list); 2373 free(pos); 2374 } 2375 } 2376 2377 void parse_events_error__handle(struct parse_events_error *err, int idx, 2378 char *str, char *help) 2379 { 2380 struct parse_events_error_entry *entry; 2381 2382 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) 2383 goto out_free; 2384 2385 entry = zalloc(sizeof(*entry)); 2386 if (!entry) { 2387 pr_err("Failed to allocate memory for event parsing error: %s (%s)\n", 2388 str, help ?: "<no help>"); 2389 goto out_free; 2390 } 2391 entry->idx = idx; 2392 entry->str = str; 2393 entry->help = help; 2394 list_add(&entry->list, &err->list); 2395 return; 2396 out_free: 2397 free(str); 2398 free(help); 2399 } 2400 2401 #define MAX_WIDTH 1000 2402 static int get_term_width(void) 2403 { 2404 struct winsize ws; 2405 2406 get_term_dimensions(&ws); 2407 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; 2408 } 2409 2410 static void __parse_events_error__print(int err_idx, const char *err_str, 2411 const char *err_help, const char *event) 2412 { 2413 const char *str = "invalid or unsupported event: "; 2414 char _buf[MAX_WIDTH]; 2415 char *buf = (char *) event; 2416 int idx = 0; 2417 if (err_str) { 2418 /* -2 for extra '' in the final fprintf */ 2419 int width = get_term_width() - 2; 2420 int len_event = strlen(event); 2421 int len_str, max_len, cut = 0; 2422 2423 /* 2424 * Maximum error index indent, we will cut 2425 * the event string if it's bigger. 2426 */ 2427 int max_err_idx = 13; 2428 2429 /* 2430 * Let's be specific with the message when 2431 * we have the precise error. 2432 */ 2433 str = "event syntax error: "; 2434 len_str = strlen(str); 2435 max_len = width - len_str; 2436 2437 buf = _buf; 2438 2439 /* We're cutting from the beginning. */ 2440 if (err_idx > max_err_idx) 2441 cut = err_idx - max_err_idx; 2442 2443 strncpy(buf, event + cut, max_len); 2444 2445 /* Mark cut parts with '..' on both sides. */ 2446 if (cut) 2447 buf[0] = buf[1] = '.'; 2448 2449 if ((len_event - cut) > max_len) { 2450 buf[max_len - 1] = buf[max_len - 2] = '.'; 2451 buf[max_len] = 0; 2452 } 2453 2454 idx = len_str + err_idx - cut; 2455 } 2456 2457 fprintf(stderr, "%s'%s'\n", str, buf); 2458 if (idx) { 2459 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str); 2460 if (err_help) 2461 fprintf(stderr, "\n%s\n", err_help); 2462 } 2463 } 2464 2465 void parse_events_error__print(const struct parse_events_error *err, 2466 const char *event) 2467 { 2468 struct parse_events_error_entry *pos; 2469 bool first = true; 2470 2471 list_for_each_entry(pos, &err->list, list) { 2472 if (!first) 2473 fputs("\n", stderr); 2474 __parse_events_error__print(pos->idx, pos->str, pos->help, event); 2475 first = false; 2476 } 2477 } 2478 2479 /* 2480 * In the list of errors err, do any of the error strings (str) contain the 2481 * given needle string? 2482 */ 2483 bool parse_events_error__contains(const struct parse_events_error *err, 2484 const char *needle) 2485 { 2486 struct parse_events_error_entry *pos; 2487 2488 list_for_each_entry(pos, &err->list, list) { 2489 if (strstr(pos->str, needle) != NULL) 2490 return true; 2491 } 2492 return false; 2493 } 2494 2495 #undef MAX_WIDTH 2496 2497 int parse_events_option(const struct option *opt, const char *str, 2498 int unset __maybe_unused) 2499 { 2500 struct parse_events_option_args *args = opt->value; 2501 struct parse_events_error err; 2502 int ret; 2503 2504 parse_events_error__init(&err); 2505 ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err, 2506 /*fake_pmu=*/false, /*warn_if_reordered=*/true, 2507 /*fake_tp=*/false); 2508 2509 if (ret) { 2510 parse_events_error__print(&err, str); 2511 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 2512 } 2513 parse_events_error__exit(&err); 2514 2515 return ret; 2516 } 2517 2518 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset) 2519 { 2520 struct parse_events_option_args *args = opt->value; 2521 int ret; 2522 2523 if (*args->evlistp == NULL) { 2524 *args->evlistp = evlist__new(); 2525 2526 if (*args->evlistp == NULL) { 2527 fprintf(stderr, "Not enough memory to create evlist\n"); 2528 return -1; 2529 } 2530 } 2531 ret = parse_events_option(opt, str, unset); 2532 if (ret) { 2533 evlist__delete(*args->evlistp); 2534 *args->evlistp = NULL; 2535 } 2536 2537 return ret; 2538 } 2539 2540 static int 2541 foreach_evsel_in_last_glob(struct evlist *evlist, 2542 int (*func)(struct evsel *evsel, 2543 const void *arg), 2544 const void *arg) 2545 { 2546 struct evsel *last = NULL; 2547 int err; 2548 2549 /* 2550 * Don't return when list_empty, give func a chance to report 2551 * error when it found last == NULL. 2552 * 2553 * So no need to WARN here, let *func do this. 2554 */ 2555 if (evlist->core.nr_entries > 0) 2556 last = evlist__last(evlist); 2557 2558 do { 2559 err = (*func)(last, arg); 2560 if (err) 2561 return -1; 2562 if (!last) 2563 return 0; 2564 2565 if (last->core.node.prev == &evlist->core.entries) 2566 return 0; 2567 last = list_entry(last->core.node.prev, struct evsel, core.node); 2568 } while (!last->cmdline_group_boundary); 2569 2570 return 0; 2571 } 2572 2573 /* Will a tracepoint filter work for str or should a BPF filter be used? */ 2574 static bool is_possible_tp_filter(const char *str) 2575 { 2576 return strstr(str, "uid") == NULL; 2577 } 2578 2579 static int set_filter(struct evsel *evsel, const void *arg) 2580 { 2581 const char *str = arg; 2582 int nr_addr_filters = 0; 2583 struct perf_pmu *pmu; 2584 2585 if (evsel == NULL) { 2586 fprintf(stderr, 2587 "--filter option should follow a -e tracepoint or HW tracer option\n"); 2588 return -1; 2589 } 2590 2591 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && is_possible_tp_filter(str)) { 2592 if (evsel__append_tp_filter(evsel, str) < 0) { 2593 fprintf(stderr, 2594 "not enough memory to hold filter string\n"); 2595 return -1; 2596 } 2597 2598 return 0; 2599 } 2600 2601 pmu = evsel__find_pmu(evsel); 2602 if (pmu) { 2603 perf_pmu__scan_file(pmu, "nr_addr_filters", 2604 "%d", &nr_addr_filters); 2605 } 2606 if (!nr_addr_filters) 2607 return perf_bpf_filter__parse(&evsel->bpf_filters, str); 2608 2609 if (evsel__append_addr_filter(evsel, str) < 0) { 2610 fprintf(stderr, 2611 "not enough memory to hold filter string\n"); 2612 return -1; 2613 } 2614 2615 return 0; 2616 } 2617 2618 int parse_filter(const struct option *opt, const char *str, 2619 int unset __maybe_unused) 2620 { 2621 struct evlist *evlist = *(struct evlist **)opt->value; 2622 2623 return foreach_evsel_in_last_glob(evlist, set_filter, 2624 (const void *)str); 2625 } 2626 2627 int parse_uid_filter(struct evlist *evlist, uid_t uid) 2628 { 2629 struct option opt = { 2630 .value = &evlist, 2631 }; 2632 char buf[128]; 2633 int ret; 2634 2635 snprintf(buf, sizeof(buf), "uid == %d", uid); 2636 ret = parse_filter(&opt, buf, /*unset=*/0); 2637 if (ret) { 2638 if (use_browser >= 1) { 2639 /* 2640 * Use ui__warning so a pop up appears above the 2641 * underlying BPF error message. 2642 */ 2643 ui__warning("Failed to add UID filtering that uses BPF filtering.\n"); 2644 } else { 2645 fprintf(stderr, "Failed to add UID filtering that uses BPF filtering.\n"); 2646 } 2647 } 2648 return ret; 2649 } 2650 2651 static int add_exclude_perf_filter(struct evsel *evsel, 2652 const void *arg __maybe_unused) 2653 { 2654 char new_filter[64]; 2655 2656 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2657 fprintf(stderr, 2658 "--exclude-perf option should follow a -e tracepoint option\n"); 2659 return -1; 2660 } 2661 2662 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid()); 2663 2664 if (evsel__append_tp_filter(evsel, new_filter) < 0) { 2665 fprintf(stderr, 2666 "not enough memory to hold filter string\n"); 2667 return -1; 2668 } 2669 2670 return 0; 2671 } 2672 2673 int exclude_perf(const struct option *opt, 2674 const char *arg __maybe_unused, 2675 int unset __maybe_unused) 2676 { 2677 struct evlist *evlist = *(struct evlist **)opt->value; 2678 2679 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter, 2680 NULL); 2681 } 2682 2683 int parse_events__is_hardcoded_term(struct parse_events_term *term) 2684 { 2685 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 2686 } 2687 2688 static int new_term(struct parse_events_term **_term, 2689 struct parse_events_term *temp, 2690 char *str, u64 num) 2691 { 2692 struct parse_events_term *term; 2693 2694 term = malloc(sizeof(*term)); 2695 if (!term) 2696 return -ENOMEM; 2697 2698 *term = *temp; 2699 INIT_LIST_HEAD(&term->list); 2700 term->weak = false; 2701 2702 switch (term->type_val) { 2703 case PARSE_EVENTS__TERM_TYPE_NUM: 2704 term->val.num = num; 2705 break; 2706 case PARSE_EVENTS__TERM_TYPE_STR: 2707 term->val.str = str; 2708 break; 2709 default: 2710 free(term); 2711 return -EINVAL; 2712 } 2713 2714 *_term = term; 2715 return 0; 2716 } 2717 2718 int parse_events_term__num(struct parse_events_term **term, 2719 enum parse_events__term_type type_term, 2720 const char *config, u64 num, 2721 bool no_value, 2722 void *loc_term_, void *loc_val_) 2723 { 2724 YYLTYPE *loc_term = loc_term_; 2725 YYLTYPE *loc_val = loc_val_; 2726 2727 struct parse_events_term temp = { 2728 .type_val = PARSE_EVENTS__TERM_TYPE_NUM, 2729 .type_term = type_term, 2730 .config = config ? : strdup(parse_events__term_type_str(type_term)), 2731 .no_value = no_value, 2732 .err_term = loc_term ? loc_term->first_column : 0, 2733 .err_val = loc_val ? loc_val->first_column : 0, 2734 }; 2735 2736 return new_term(term, &temp, /*str=*/NULL, num); 2737 } 2738 2739 int parse_events_term__str(struct parse_events_term **term, 2740 enum parse_events__term_type type_term, 2741 char *config, char *str, 2742 void *loc_term_, void *loc_val_) 2743 { 2744 YYLTYPE *loc_term = loc_term_; 2745 YYLTYPE *loc_val = loc_val_; 2746 2747 struct parse_events_term temp = { 2748 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2749 .type_term = type_term, 2750 .config = config, 2751 .err_term = loc_term ? loc_term->first_column : 0, 2752 .err_val = loc_val ? loc_val->first_column : 0, 2753 }; 2754 2755 return new_term(term, &temp, str, /*num=*/0); 2756 } 2757 2758 int parse_events_term__term(struct parse_events_term **term, 2759 enum parse_events__term_type term_lhs, 2760 enum parse_events__term_type term_rhs, 2761 void *loc_term, void *loc_val) 2762 { 2763 return parse_events_term__str(term, term_lhs, NULL, 2764 strdup(parse_events__term_type_str(term_rhs)), 2765 loc_term, loc_val); 2766 } 2767 2768 int parse_events_term__clone(struct parse_events_term **new, 2769 const struct parse_events_term *term) 2770 { 2771 char *str; 2772 struct parse_events_term temp = *term; 2773 2774 temp.used = false; 2775 if (term->config) { 2776 temp.config = strdup(term->config); 2777 if (!temp.config) 2778 return -ENOMEM; 2779 } 2780 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2781 return new_term(new, &temp, /*str=*/NULL, term->val.num); 2782 2783 str = strdup(term->val.str); 2784 if (!str) { 2785 zfree(&temp.config); 2786 return -ENOMEM; 2787 } 2788 return new_term(new, &temp, str, /*num=*/0); 2789 } 2790 2791 void parse_events_term__delete(struct parse_events_term *term) 2792 { 2793 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) 2794 zfree(&term->val.str); 2795 2796 zfree(&term->config); 2797 free(term); 2798 } 2799 2800 static int parse_events_terms__copy(const struct parse_events_terms *src, 2801 struct parse_events_terms *dest) 2802 { 2803 struct parse_events_term *term; 2804 2805 list_for_each_entry (term, &src->terms, list) { 2806 struct parse_events_term *n; 2807 int ret; 2808 2809 ret = parse_events_term__clone(&n, term); 2810 if (ret) 2811 return ret; 2812 2813 list_add_tail(&n->list, &dest->terms); 2814 } 2815 return 0; 2816 } 2817 2818 void parse_events_terms__init(struct parse_events_terms *terms) 2819 { 2820 INIT_LIST_HEAD(&terms->terms); 2821 } 2822 2823 void parse_events_terms__exit(struct parse_events_terms *terms) 2824 { 2825 struct parse_events_term *term, *h; 2826 2827 list_for_each_entry_safe(term, h, &terms->terms, list) { 2828 list_del_init(&term->list); 2829 parse_events_term__delete(term); 2830 } 2831 } 2832 2833 void parse_events_terms__delete(struct parse_events_terms *terms) 2834 { 2835 if (!terms) 2836 return; 2837 parse_events_terms__exit(terms); 2838 free(terms); 2839 } 2840 2841 int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb) 2842 { 2843 struct parse_events_term *term; 2844 bool first = true; 2845 2846 if (!terms) 2847 return 0; 2848 2849 list_for_each_entry(term, &terms->terms, list) { 2850 int ret; 2851 2852 if (!first) { 2853 ret = strbuf_addch(sb, ','); 2854 if (ret < 0) 2855 return ret; 2856 } 2857 first = false; 2858 2859 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2860 if (term->no_value) { 2861 assert(term->val.num == 1); 2862 ret = strbuf_addf(sb, "%s", term->config); 2863 } else 2864 ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num); 2865 else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) { 2866 if (term->config) { 2867 ret = strbuf_addf(sb, "%s=", term->config); 2868 if (ret < 0) 2869 return ret; 2870 } else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) { 2871 ret = strbuf_addf(sb, "%s=", 2872 parse_events__term_type_str(term->type_term)); 2873 if (ret < 0) 2874 return ret; 2875 } 2876 assert(!term->no_value); 2877 ret = strbuf_addf(sb, "%s", term->val.str); 2878 } 2879 if (ret < 0) 2880 return ret; 2881 } 2882 return 0; 2883 } 2884 2885 static void config_terms_list(char *buf, size_t buf_sz) 2886 { 2887 int i; 2888 bool first = true; 2889 2890 buf[0] = '\0'; 2891 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) { 2892 const char *name = parse_events__term_type_str(i); 2893 2894 if (!config_term_avail(i, NULL)) 2895 continue; 2896 if (!name) 2897 continue; 2898 if (name[0] == '<') 2899 continue; 2900 2901 if (strlen(buf) + strlen(name) + 2 >= buf_sz) 2902 return; 2903 2904 if (!first) 2905 strcat(buf, ","); 2906 else 2907 first = false; 2908 strcat(buf, name); 2909 } 2910 } 2911 2912 /* 2913 * Return string contains valid config terms of an event. 2914 * @additional_terms: For terms such as PMU sysfs terms. 2915 */ 2916 char *parse_events_formats_error_string(char *additional_terms) 2917 { 2918 char *str; 2919 /* "no-overwrite" is the longest name */ 2920 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR * 2921 (sizeof("no-overwrite") - 1)]; 2922 2923 config_terms_list(static_terms, sizeof(static_terms)); 2924 /* valid terms */ 2925 if (additional_terms) { 2926 if (asprintf(&str, "valid terms: %s,%s", 2927 additional_terms, static_terms) < 0) 2928 goto fail; 2929 } else { 2930 if (asprintf(&str, "valid terms: %s", static_terms) < 0) 2931 goto fail; 2932 } 2933 return str; 2934 2935 fail: 2936 return NULL; 2937 } 2938