1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/hw_breakpoint.h> 3 #include <linux/err.h> 4 #include <linux/list_sort.h> 5 #include <linux/zalloc.h> 6 #include <dirent.h> 7 #include <errno.h> 8 #include <sys/ioctl.h> 9 #include <sys/param.h> 10 #include "cpumap.h" 11 #include "term.h" 12 #include "env.h" 13 #include "evlist.h" 14 #include "evsel.h" 15 #include <subcmd/parse-options.h> 16 #include "parse-events.h" 17 #include "string2.h" 18 #include "strbuf.h" 19 #include "debug.h" 20 #include <api/fs/tracing_path.h> 21 #include <api/io_dir.h> 22 #include <perf/cpumap.h> 23 #include <util/parse-events-bison.h> 24 #include <util/parse-events-flex.h> 25 #include "pmu.h" 26 #include "pmus.h" 27 #include "asm/bug.h" 28 #include "ui/ui.h" 29 #include "util/parse-branch-options.h" 30 #include "util/evsel_config.h" 31 #include "util/event.h" 32 #include "util/bpf-filter.h" 33 #include "util/stat.h" 34 #include "util/util.h" 35 #include "tracepoint.h" 36 37 #define MAX_NAME_LEN 100 38 39 static int get_config_terms(const struct parse_events_terms *head_config, 40 struct list_head *head_terms); 41 static int parse_events_terms__copy(const struct parse_events_terms *src, 42 struct parse_events_terms *dest); 43 44 const struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { 45 [PERF_COUNT_HW_CPU_CYCLES] = { 46 .symbol = "cpu-cycles", 47 .alias = "cycles", 48 }, 49 [PERF_COUNT_HW_INSTRUCTIONS] = { 50 .symbol = "instructions", 51 .alias = "", 52 }, 53 [PERF_COUNT_HW_CACHE_REFERENCES] = { 54 .symbol = "cache-references", 55 .alias = "", 56 }, 57 [PERF_COUNT_HW_CACHE_MISSES] = { 58 .symbol = "cache-misses", 59 .alias = "", 60 }, 61 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 62 .symbol = "branch-instructions", 63 .alias = "branches", 64 }, 65 [PERF_COUNT_HW_BRANCH_MISSES] = { 66 .symbol = "branch-misses", 67 .alias = "", 68 }, 69 [PERF_COUNT_HW_BUS_CYCLES] = { 70 .symbol = "bus-cycles", 71 .alias = "", 72 }, 73 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = { 74 .symbol = "stalled-cycles-frontend", 75 .alias = "idle-cycles-frontend", 76 }, 77 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = { 78 .symbol = "stalled-cycles-backend", 79 .alias = "idle-cycles-backend", 80 }, 81 [PERF_COUNT_HW_REF_CPU_CYCLES] = { 82 .symbol = "ref-cycles", 83 .alias = "", 84 }, 85 }; 86 87 const struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { 88 [PERF_COUNT_SW_CPU_CLOCK] = { 89 .symbol = "cpu-clock", 90 .alias = "", 91 }, 92 [PERF_COUNT_SW_TASK_CLOCK] = { 93 .symbol = "task-clock", 94 .alias = "", 95 }, 96 [PERF_COUNT_SW_PAGE_FAULTS] = { 97 .symbol = "page-faults", 98 .alias = "faults", 99 }, 100 [PERF_COUNT_SW_CONTEXT_SWITCHES] = { 101 .symbol = "context-switches", 102 .alias = "cs", 103 }, 104 [PERF_COUNT_SW_CPU_MIGRATIONS] = { 105 .symbol = "cpu-migrations", 106 .alias = "migrations", 107 }, 108 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = { 109 .symbol = "minor-faults", 110 .alias = "", 111 }, 112 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = { 113 .symbol = "major-faults", 114 .alias = "", 115 }, 116 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = { 117 .symbol = "alignment-faults", 118 .alias = "", 119 }, 120 [PERF_COUNT_SW_EMULATION_FAULTS] = { 121 .symbol = "emulation-faults", 122 .alias = "", 123 }, 124 [PERF_COUNT_SW_DUMMY] = { 125 .symbol = "dummy", 126 .alias = "", 127 }, 128 [PERF_COUNT_SW_BPF_OUTPUT] = { 129 .symbol = "bpf-output", 130 .alias = "", 131 }, 132 [PERF_COUNT_SW_CGROUP_SWITCHES] = { 133 .symbol = "cgroup-switches", 134 .alias = "", 135 }, 136 }; 137 138 static const char *const event_types[] = { 139 [PERF_TYPE_HARDWARE] = "hardware", 140 [PERF_TYPE_SOFTWARE] = "software", 141 [PERF_TYPE_TRACEPOINT] = "tracepoint", 142 [PERF_TYPE_HW_CACHE] = "hardware-cache", 143 [PERF_TYPE_RAW] = "raw", 144 [PERF_TYPE_BREAKPOINT] = "breakpoint", 145 }; 146 147 const char *event_type(size_t type) 148 { 149 if (type >= PERF_TYPE_MAX) 150 return "unknown"; 151 152 return event_types[type]; 153 } 154 155 static char *get_config_str(const struct parse_events_terms *head_terms, 156 enum parse_events__term_type type_term) 157 { 158 struct parse_events_term *term; 159 160 if (!head_terms) 161 return NULL; 162 163 list_for_each_entry(term, &head_terms->terms, list) 164 if (term->type_term == type_term) 165 return term->val.str; 166 167 return NULL; 168 } 169 170 static char *get_config_metric_id(const struct parse_events_terms *head_terms) 171 { 172 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID); 173 } 174 175 static char *get_config_name(const struct parse_events_terms *head_terms) 176 { 177 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME); 178 } 179 180 static struct perf_cpu_map *get_config_cpu(const struct parse_events_terms *head_terms) 181 { 182 struct parse_events_term *term; 183 struct perf_cpu_map *cpus = NULL; 184 185 if (!head_terms) 186 return NULL; 187 188 list_for_each_entry(term, &head_terms->terms, list) { 189 if (term->type_term == PARSE_EVENTS__TERM_TYPE_CPU) { 190 struct perf_cpu_map *term_cpus; 191 192 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) { 193 term_cpus = perf_cpu_map__new_int(term->val.num); 194 } else { 195 struct perf_pmu *pmu = perf_pmus__find(term->val.str); 196 197 if (pmu && perf_cpu_map__is_empty(pmu->cpus)) 198 term_cpus = pmu->is_core ? cpu_map__online() : NULL; 199 else if (pmu) 200 term_cpus = perf_cpu_map__get(pmu->cpus); 201 else 202 term_cpus = perf_cpu_map__new(term->val.str); 203 } 204 perf_cpu_map__merge(&cpus, term_cpus); 205 perf_cpu_map__put(term_cpus); 206 } 207 } 208 209 return cpus; 210 } 211 212 /** 213 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that 214 * matches the raw's string value. If the string value matches an 215 * event then change the term to be an event, if not then change it to 216 * be a config term. For example, "read" may be an event of the PMU or 217 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of 218 * the event can be determined and we don't need to scan all PMUs 219 * ahead-of-time. 220 * @config_terms: the list of terms that may contain a raw term. 221 * @pmu: the PMU to scan for events from. 222 */ 223 static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu) 224 { 225 struct parse_events_term *term; 226 227 list_for_each_entry(term, &config_terms->terms, list) { 228 u64 num; 229 230 if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW) 231 continue; 232 233 if (perf_pmu__have_event(pmu, term->val.str)) { 234 zfree(&term->config); 235 term->config = term->val.str; 236 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 237 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 238 term->val.num = 1; 239 term->no_value = true; 240 continue; 241 } 242 243 zfree(&term->config); 244 term->config = strdup("config"); 245 errno = 0; 246 num = strtoull(term->val.str + 1, NULL, 16); 247 assert(errno == 0); 248 free(term->val.str); 249 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 250 term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG; 251 term->val.num = num; 252 term->no_value = false; 253 } 254 } 255 256 static struct evsel * 257 __add_event(struct list_head *list, int *idx, 258 struct perf_event_attr *attr, 259 bool init_attr, 260 const char *name, const char *metric_id, struct perf_pmu *pmu, 261 struct list_head *config_terms, struct evsel *first_wildcard_match, 262 struct perf_cpu_map *cpu_list, u64 alternate_hw_config) 263 { 264 struct evsel *evsel; 265 bool is_pmu_core; 266 struct perf_cpu_map *cpus; 267 bool has_cpu_list = !perf_cpu_map__is_empty(cpu_list); 268 269 /* 270 * Ensure the first_wildcard_match's PMU matches that of the new event 271 * being added. Otherwise try to match with another event further down 272 * the evlist. 273 */ 274 if (first_wildcard_match) { 275 struct evsel *pos = list_prev_entry(first_wildcard_match, core.node); 276 277 first_wildcard_match = NULL; 278 list_for_each_entry_continue(pos, list, core.node) { 279 if (perf_pmu__name_no_suffix_match(pos->pmu, pmu->name)) { 280 first_wildcard_match = pos; 281 break; 282 } 283 if (pos->pmu->is_core && (!pmu || pmu->is_core)) { 284 first_wildcard_match = pos; 285 break; 286 } 287 } 288 } 289 290 if (pmu) { 291 is_pmu_core = pmu->is_core; 292 cpus = perf_cpu_map__get(has_cpu_list ? cpu_list : pmu->cpus); 293 perf_pmu__warn_invalid_formats(pmu); 294 if (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX) { 295 perf_pmu__warn_invalid_config(pmu, attr->config, name, 296 PERF_PMU_FORMAT_VALUE_CONFIG, "config"); 297 perf_pmu__warn_invalid_config(pmu, attr->config1, name, 298 PERF_PMU_FORMAT_VALUE_CONFIG1, "config1"); 299 perf_pmu__warn_invalid_config(pmu, attr->config2, name, 300 PERF_PMU_FORMAT_VALUE_CONFIG2, "config2"); 301 perf_pmu__warn_invalid_config(pmu, attr->config3, name, 302 PERF_PMU_FORMAT_VALUE_CONFIG3, "config3"); 303 } 304 } else { 305 is_pmu_core = (attr->type == PERF_TYPE_HARDWARE || 306 attr->type == PERF_TYPE_HW_CACHE); 307 if (has_cpu_list) 308 cpus = perf_cpu_map__get(cpu_list); 309 else 310 cpus = is_pmu_core ? cpu_map__online() : NULL; 311 } 312 if (init_attr) 313 event_attr_init(attr); 314 315 evsel = evsel__new_idx(attr, *idx); 316 if (!evsel) { 317 perf_cpu_map__put(cpus); 318 return NULL; 319 } 320 321 (*idx)++; 322 evsel->core.cpus = cpus; 323 evsel->core.pmu_cpus = perf_cpu_map__get(cpus); 324 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false; 325 evsel->core.is_pmu_core = is_pmu_core; 326 evsel->pmu = pmu; 327 evsel->alternate_hw_config = alternate_hw_config; 328 evsel->first_wildcard_match = first_wildcard_match; 329 330 if (name) 331 evsel->name = strdup(name); 332 333 if (metric_id) 334 evsel->metric_id = strdup(metric_id); 335 336 if (config_terms) 337 list_splice_init(config_terms, &evsel->config_terms); 338 339 if (list) 340 list_add_tail(&evsel->core.node, list); 341 342 if (has_cpu_list) 343 evsel__warn_user_requested_cpus(evsel, cpu_list); 344 345 return evsel; 346 } 347 348 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr, 349 const char *name, const char *metric_id, 350 struct perf_pmu *pmu) 351 { 352 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name, 353 metric_id, pmu, /*config_terms=*/NULL, 354 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL, 355 /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 356 } 357 358 static int add_event(struct list_head *list, int *idx, 359 struct perf_event_attr *attr, const char *name, 360 const char *metric_id, struct list_head *config_terms, 361 u64 alternate_hw_config) 362 { 363 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id, 364 /*pmu=*/NULL, config_terms, 365 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL, 366 alternate_hw_config) ? 0 : -ENOMEM; 367 } 368 369 /** 370 * parse_aliases - search names for entries beginning or equalling str ignoring 371 * case. If mutliple entries in names match str then the longest 372 * is chosen. 373 * @str: The needle to look for. 374 * @names: The haystack to search. 375 * @size: The size of the haystack. 376 * @longest: Out argument giving the length of the matching entry. 377 */ 378 static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size, 379 int *longest) 380 { 381 *longest = -1; 382 for (int i = 0; i < size; i++) { 383 for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) { 384 int n = strlen(names[i][j]); 385 386 if (n > *longest && !strncasecmp(str, names[i][j], n)) 387 *longest = n; 388 } 389 if (*longest > 0) 390 return i; 391 } 392 393 return -1; 394 } 395 396 typedef int config_term_func_t(struct perf_event_attr *attr, 397 struct parse_events_term *term, 398 struct parse_events_error *err); 399 static int config_term_common(struct perf_event_attr *attr, 400 struct parse_events_term *term, 401 struct parse_events_error *err); 402 static int config_attr(struct perf_event_attr *attr, 403 const struct parse_events_terms *head, 404 struct parse_events_error *err, 405 config_term_func_t config_term); 406 407 /** 408 * parse_events__decode_legacy_cache - Search name for the legacy cache event 409 * name composed of 1, 2 or 3 hyphen 410 * separated sections. The first section is 411 * the cache type while the others are the 412 * optional op and optional result. To make 413 * life hard the names in the table also 414 * contain hyphens and the longest name 415 * should always be selected. 416 */ 417 int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config) 418 { 419 int len, cache_type = -1, cache_op = -1, cache_result = -1; 420 const char *name_end = &name[strlen(name) + 1]; 421 const char *str = name; 422 423 cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len); 424 if (cache_type == -1) 425 return -EINVAL; 426 str += len + 1; 427 428 if (str < name_end) { 429 cache_op = parse_aliases(str, evsel__hw_cache_op, 430 PERF_COUNT_HW_CACHE_OP_MAX, &len); 431 if (cache_op >= 0) { 432 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 433 return -EINVAL; 434 str += len + 1; 435 } else { 436 cache_result = parse_aliases(str, evsel__hw_cache_result, 437 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 438 if (cache_result >= 0) 439 str += len + 1; 440 } 441 } 442 if (str < name_end) { 443 if (cache_op < 0) { 444 cache_op = parse_aliases(str, evsel__hw_cache_op, 445 PERF_COUNT_HW_CACHE_OP_MAX, &len); 446 if (cache_op >= 0) { 447 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 448 return -EINVAL; 449 } 450 } else if (cache_result < 0) { 451 cache_result = parse_aliases(str, evsel__hw_cache_result, 452 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 453 } 454 } 455 456 /* 457 * Fall back to reads: 458 */ 459 if (cache_op == -1) 460 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 461 462 /* 463 * Fall back to accesses: 464 */ 465 if (cache_result == -1) 466 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 467 468 *config = cache_type | (cache_op << 8) | (cache_result << 16); 469 if (perf_pmus__supports_extended_type()) 470 *config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT; 471 return 0; 472 } 473 474 /** 475 * parse_events__filter_pmu - returns false if a wildcard PMU should be 476 * considered, true if it should be filtered. 477 */ 478 bool parse_events__filter_pmu(const struct parse_events_state *parse_state, 479 const struct perf_pmu *pmu) 480 { 481 if (parse_state->pmu_filter == NULL) 482 return false; 483 484 return strcmp(parse_state->pmu_filter, pmu->name) != 0; 485 } 486 487 static int parse_events_add_pmu(struct parse_events_state *parse_state, 488 struct list_head *list, struct perf_pmu *pmu, 489 const struct parse_events_terms *const_parsed_terms, 490 struct evsel *first_wildcard_match, u64 alternate_hw_config); 491 492 int parse_events_add_cache(struct list_head *list, int *idx, const char *name, 493 struct parse_events_state *parse_state, 494 struct parse_events_terms *parsed_terms) 495 { 496 struct perf_pmu *pmu = NULL; 497 bool found_supported = false; 498 const char *config_name = get_config_name(parsed_terms); 499 const char *metric_id = get_config_metric_id(parsed_terms); 500 struct perf_cpu_map *cpus = get_config_cpu(parsed_terms); 501 int ret = 0; 502 struct evsel *first_wildcard_match = NULL; 503 504 while ((pmu = perf_pmus__scan_for_event(pmu, name)) != NULL) { 505 LIST_HEAD(config_terms); 506 struct perf_event_attr attr; 507 508 if (parse_events__filter_pmu(parse_state, pmu)) 509 continue; 510 511 if (perf_pmu__have_event(pmu, name)) { 512 /* 513 * The PMU has the event so add as not a legacy cache 514 * event. 515 */ 516 ret = parse_events_add_pmu(parse_state, list, pmu, 517 parsed_terms, 518 first_wildcard_match, 519 /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 520 if (ret) 521 goto out_err; 522 if (first_wildcard_match == NULL) 523 first_wildcard_match = 524 container_of(list->prev, struct evsel, core.node); 525 continue; 526 } 527 528 if (!pmu->is_core) { 529 /* Legacy cache events are only supported by core PMUs. */ 530 continue; 531 } 532 533 memset(&attr, 0, sizeof(attr)); 534 attr.type = PERF_TYPE_HW_CACHE; 535 536 ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config); 537 if (ret) 538 return ret; 539 540 found_supported = true; 541 542 if (parsed_terms) { 543 if (config_attr(&attr, parsed_terms, parse_state->error, 544 config_term_common)) { 545 ret = -EINVAL; 546 goto out_err; 547 } 548 if (get_config_terms(parsed_terms, &config_terms)) { 549 ret = -ENOMEM; 550 goto out_err; 551 } 552 } 553 554 if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name, 555 metric_id, pmu, &config_terms, first_wildcard_match, 556 cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) == NULL) 557 ret = -ENOMEM; 558 559 if (first_wildcard_match == NULL) 560 first_wildcard_match = container_of(list->prev, struct evsel, core.node); 561 free_config_terms(&config_terms); 562 if (ret) 563 goto out_err; 564 } 565 out_err: 566 perf_cpu_map__put(cpus); 567 return found_supported ? 0 : -EINVAL; 568 } 569 570 static void tracepoint_error(struct parse_events_error *e, int err, 571 const char *sys, const char *name, int column) 572 { 573 const char *str; 574 char help[BUFSIZ]; 575 576 if (!e) 577 return; 578 579 /* 580 * We get error directly from syscall errno ( > 0), 581 * or from encoded pointer's error ( < 0). 582 */ 583 err = abs(err); 584 585 switch (err) { 586 case EACCES: 587 str = "can't access trace events"; 588 break; 589 case ENOENT: 590 str = "unknown tracepoint"; 591 break; 592 default: 593 str = "failed to add tracepoint"; 594 break; 595 } 596 597 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name); 598 parse_events_error__handle(e, column, strdup(str), strdup(help)); 599 } 600 601 static int add_tracepoint(struct parse_events_state *parse_state, 602 struct list_head *list, 603 const char *sys_name, const char *evt_name, 604 struct parse_events_error *err, 605 struct parse_events_terms *head_config, void *loc_) 606 { 607 YYLTYPE *loc = loc_; 608 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++, 609 !parse_state->fake_tp); 610 611 if (IS_ERR(evsel)) { 612 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column); 613 return PTR_ERR(evsel); 614 } 615 616 if (head_config) { 617 LIST_HEAD(config_terms); 618 619 if (get_config_terms(head_config, &config_terms)) 620 return -ENOMEM; 621 list_splice(&config_terms, &evsel->config_terms); 622 } 623 624 list_add_tail(&evsel->core.node, list); 625 return 0; 626 } 627 628 static int add_tracepoint_multi_event(struct parse_events_state *parse_state, 629 struct list_head *list, 630 const char *sys_name, const char *evt_name, 631 struct parse_events_error *err, 632 struct parse_events_terms *head_config, YYLTYPE *loc) 633 { 634 char *evt_path; 635 struct io_dirent64 *evt_ent; 636 struct io_dir evt_dir; 637 int ret = 0, found = 0; 638 639 evt_path = get_events_file(sys_name); 640 if (!evt_path) { 641 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 642 return -1; 643 } 644 io_dir__init(&evt_dir, open(evt_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY)); 645 if (evt_dir.dirfd < 0) { 646 put_events_file(evt_path); 647 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 648 return -1; 649 } 650 651 while (!ret && (evt_ent = io_dir__readdir(&evt_dir))) { 652 if (!strcmp(evt_ent->d_name, ".") 653 || !strcmp(evt_ent->d_name, "..") 654 || !strcmp(evt_ent->d_name, "enable") 655 || !strcmp(evt_ent->d_name, "filter")) 656 continue; 657 658 if (!strglobmatch(evt_ent->d_name, evt_name)) 659 continue; 660 661 found++; 662 663 ret = add_tracepoint(parse_state, list, sys_name, evt_ent->d_name, 664 err, head_config, loc); 665 } 666 667 if (!found) { 668 tracepoint_error(err, ENOENT, sys_name, evt_name, loc->first_column); 669 ret = -1; 670 } 671 672 put_events_file(evt_path); 673 close(evt_dir.dirfd); 674 return ret; 675 } 676 677 static int add_tracepoint_event(struct parse_events_state *parse_state, 678 struct list_head *list, 679 const char *sys_name, const char *evt_name, 680 struct parse_events_error *err, 681 struct parse_events_terms *head_config, YYLTYPE *loc) 682 { 683 return strpbrk(evt_name, "*?") ? 684 add_tracepoint_multi_event(parse_state, list, sys_name, evt_name, 685 err, head_config, loc) : 686 add_tracepoint(parse_state, list, sys_name, evt_name, 687 err, head_config, loc); 688 } 689 690 static int add_tracepoint_multi_sys(struct parse_events_state *parse_state, 691 struct list_head *list, 692 const char *sys_name, const char *evt_name, 693 struct parse_events_error *err, 694 struct parse_events_terms *head_config, YYLTYPE *loc) 695 { 696 struct io_dirent64 *events_ent; 697 struct io_dir events_dir; 698 int ret = 0; 699 char *events_dir_path = get_tracing_file("events"); 700 701 if (!events_dir_path) { 702 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 703 return -1; 704 } 705 io_dir__init(&events_dir, open(events_dir_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY)); 706 put_events_file(events_dir_path); 707 if (events_dir.dirfd < 0) { 708 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 709 return -1; 710 } 711 712 while (!ret && (events_ent = io_dir__readdir(&events_dir))) { 713 if (!strcmp(events_ent->d_name, ".") 714 || !strcmp(events_ent->d_name, "..") 715 || !strcmp(events_ent->d_name, "enable") 716 || !strcmp(events_ent->d_name, "header_event") 717 || !strcmp(events_ent->d_name, "header_page")) 718 continue; 719 720 if (!strglobmatch(events_ent->d_name, sys_name)) 721 continue; 722 723 ret = add_tracepoint_event(parse_state, list, events_ent->d_name, 724 evt_name, err, head_config, loc); 725 } 726 close(events_dir.dirfd); 727 return ret; 728 } 729 730 size_t default_breakpoint_len(void) 731 { 732 #if defined(__i386__) 733 static int len; 734 735 if (len == 0) { 736 struct perf_env env = {}; 737 738 perf_env__init(&env); 739 len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long); 740 perf_env__exit(&env); 741 } 742 return len; 743 #elif defined(__aarch64__) 744 return 4; 745 #else 746 return sizeof(long); 747 #endif 748 } 749 750 static int 751 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 752 { 753 int i; 754 755 for (i = 0; i < 3; i++) { 756 if (!type || !type[i]) 757 break; 758 759 #define CHECK_SET_TYPE(bit) \ 760 do { \ 761 if (attr->bp_type & bit) \ 762 return -EINVAL; \ 763 else \ 764 attr->bp_type |= bit; \ 765 } while (0) 766 767 switch (type[i]) { 768 case 'r': 769 CHECK_SET_TYPE(HW_BREAKPOINT_R); 770 break; 771 case 'w': 772 CHECK_SET_TYPE(HW_BREAKPOINT_W); 773 break; 774 case 'x': 775 CHECK_SET_TYPE(HW_BREAKPOINT_X); 776 break; 777 default: 778 return -EINVAL; 779 } 780 } 781 782 #undef CHECK_SET_TYPE 783 784 if (!attr->bp_type) /* Default */ 785 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 786 787 return 0; 788 } 789 790 int parse_events_add_breakpoint(struct parse_events_state *parse_state, 791 struct list_head *list, 792 u64 addr, char *type, u64 len, 793 struct parse_events_terms *head_config) 794 { 795 struct perf_event_attr attr; 796 LIST_HEAD(config_terms); 797 const char *name; 798 799 memset(&attr, 0, sizeof(attr)); 800 attr.bp_addr = addr; 801 802 if (parse_breakpoint_type(type, &attr)) 803 return -EINVAL; 804 805 /* Provide some defaults if len is not specified */ 806 if (!len) { 807 if (attr.bp_type == HW_BREAKPOINT_X) 808 len = default_breakpoint_len(); 809 else 810 len = HW_BREAKPOINT_LEN_4; 811 } 812 813 attr.bp_len = len; 814 815 attr.type = PERF_TYPE_BREAKPOINT; 816 attr.sample_period = 1; 817 818 if (head_config) { 819 if (config_attr(&attr, head_config, parse_state->error, 820 config_term_common)) 821 return -EINVAL; 822 823 if (get_config_terms(head_config, &config_terms)) 824 return -ENOMEM; 825 } 826 827 name = get_config_name(head_config); 828 829 return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL, 830 &config_terms, /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 831 } 832 833 static int check_type_val(struct parse_events_term *term, 834 struct parse_events_error *err, 835 enum parse_events__term_val_type type) 836 { 837 if (type == term->type_val) 838 return 0; 839 840 if (err) { 841 parse_events_error__handle(err, term->err_val, 842 type == PARSE_EVENTS__TERM_TYPE_NUM 843 ? strdup("expected numeric value") 844 : strdup("expected string value"), 845 NULL); 846 } 847 return -EINVAL; 848 } 849 850 static bool config_term_shrinked; 851 852 const char *parse_events__term_type_str(enum parse_events__term_type term_type) 853 { 854 /* 855 * Update according to parse-events.l 856 */ 857 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { 858 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>", 859 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config", 860 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1", 861 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2", 862 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3", 863 [PARSE_EVENTS__TERM_TYPE_NAME] = "name", 864 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period", 865 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq", 866 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type", 867 [PARSE_EVENTS__TERM_TYPE_TIME] = "time", 868 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph", 869 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size", 870 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", 871 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", 872 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack", 873 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr", 874 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite", 875 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite", 876 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config", 877 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore", 878 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output", 879 [PARSE_EVENTS__TERM_TYPE_AUX_ACTION] = "aux-action", 880 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size", 881 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id", 882 [PARSE_EVENTS__TERM_TYPE_RAW] = "raw", 883 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache", 884 [PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware", 885 [PARSE_EVENTS__TERM_TYPE_CPU] = "cpu", 886 }; 887 if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR) 888 return "unknown term"; 889 890 return config_term_names[term_type]; 891 } 892 893 static bool 894 config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err) 895 { 896 char *err_str; 897 898 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) { 899 parse_events_error__handle(err, -1, 900 strdup("Invalid term_type"), NULL); 901 return false; 902 } 903 if (!config_term_shrinked) 904 return true; 905 906 switch (term_type) { 907 case PARSE_EVENTS__TERM_TYPE_CONFIG: 908 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 909 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 910 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 911 case PARSE_EVENTS__TERM_TYPE_NAME: 912 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 913 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 914 case PARSE_EVENTS__TERM_TYPE_PERCORE: 915 case PARSE_EVENTS__TERM_TYPE_CPU: 916 return true; 917 case PARSE_EVENTS__TERM_TYPE_USER: 918 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 919 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 920 case PARSE_EVENTS__TERM_TYPE_TIME: 921 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 922 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 923 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 924 case PARSE_EVENTS__TERM_TYPE_INHERIT: 925 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 926 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 927 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 928 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 929 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 930 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 931 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 932 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 933 case PARSE_EVENTS__TERM_TYPE_RAW: 934 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 935 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 936 default: 937 if (!err) 938 return false; 939 940 /* term_type is validated so indexing is safe */ 941 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'", 942 parse_events__term_type_str(term_type)) >= 0) 943 parse_events_error__handle(err, -1, err_str, NULL); 944 return false; 945 } 946 } 947 948 void parse_events__shrink_config_terms(void) 949 { 950 config_term_shrinked = true; 951 } 952 953 static int config_term_common(struct perf_event_attr *attr, 954 struct parse_events_term *term, 955 struct parse_events_error *err) 956 { 957 #define CHECK_TYPE_VAL(type) \ 958 do { \ 959 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \ 960 return -EINVAL; \ 961 } while (0) 962 963 switch (term->type_term) { 964 case PARSE_EVENTS__TERM_TYPE_CONFIG: 965 CHECK_TYPE_VAL(NUM); 966 attr->config = term->val.num; 967 break; 968 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 969 CHECK_TYPE_VAL(NUM); 970 attr->config1 = term->val.num; 971 break; 972 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 973 CHECK_TYPE_VAL(NUM); 974 attr->config2 = term->val.num; 975 break; 976 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 977 CHECK_TYPE_VAL(NUM); 978 attr->config3 = term->val.num; 979 break; 980 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 981 CHECK_TYPE_VAL(NUM); 982 break; 983 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 984 CHECK_TYPE_VAL(NUM); 985 break; 986 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 987 CHECK_TYPE_VAL(STR); 988 if (strcmp(term->val.str, "no") && 989 parse_branch_str(term->val.str, 990 &attr->branch_sample_type)) { 991 parse_events_error__handle(err, term->err_val, 992 strdup("invalid branch sample type"), 993 NULL); 994 return -EINVAL; 995 } 996 break; 997 case PARSE_EVENTS__TERM_TYPE_TIME: 998 CHECK_TYPE_VAL(NUM); 999 if (term->val.num > 1) { 1000 parse_events_error__handle(err, term->err_val, 1001 strdup("expected 0 or 1"), 1002 NULL); 1003 return -EINVAL; 1004 } 1005 break; 1006 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1007 CHECK_TYPE_VAL(STR); 1008 break; 1009 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1010 CHECK_TYPE_VAL(NUM); 1011 break; 1012 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1013 CHECK_TYPE_VAL(NUM); 1014 break; 1015 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1016 CHECK_TYPE_VAL(NUM); 1017 break; 1018 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1019 CHECK_TYPE_VAL(NUM); 1020 break; 1021 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1022 CHECK_TYPE_VAL(NUM); 1023 break; 1024 case PARSE_EVENTS__TERM_TYPE_NAME: 1025 CHECK_TYPE_VAL(STR); 1026 break; 1027 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1028 CHECK_TYPE_VAL(STR); 1029 break; 1030 case PARSE_EVENTS__TERM_TYPE_RAW: 1031 CHECK_TYPE_VAL(STR); 1032 break; 1033 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1034 CHECK_TYPE_VAL(NUM); 1035 break; 1036 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1037 CHECK_TYPE_VAL(NUM); 1038 break; 1039 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1040 CHECK_TYPE_VAL(NUM); 1041 if ((unsigned int)term->val.num > 1) { 1042 parse_events_error__handle(err, term->err_val, 1043 strdup("expected 0 or 1"), 1044 NULL); 1045 return -EINVAL; 1046 } 1047 break; 1048 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1049 CHECK_TYPE_VAL(NUM); 1050 break; 1051 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1052 CHECK_TYPE_VAL(STR); 1053 break; 1054 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1055 CHECK_TYPE_VAL(NUM); 1056 if (term->val.num > UINT_MAX) { 1057 parse_events_error__handle(err, term->err_val, 1058 strdup("too big"), 1059 NULL); 1060 return -EINVAL; 1061 } 1062 break; 1063 case PARSE_EVENTS__TERM_TYPE_CPU: { 1064 struct perf_cpu_map *map; 1065 1066 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) { 1067 if (term->val.num >= (u64)cpu__max_present_cpu().cpu) { 1068 parse_events_error__handle(err, term->err_val, 1069 strdup("too big"), 1070 /*help=*/NULL); 1071 return -EINVAL; 1072 } 1073 break; 1074 } 1075 assert(term->type_val == PARSE_EVENTS__TERM_TYPE_STR); 1076 if (perf_pmus__find(term->val.str) != NULL) 1077 break; 1078 1079 map = perf_cpu_map__new(term->val.str); 1080 if (!map) { 1081 parse_events_error__handle(err, term->err_val, 1082 strdup("not a valid PMU or CPU number"), 1083 /*help=*/NULL); 1084 return -EINVAL; 1085 } 1086 perf_cpu_map__put(map); 1087 break; 1088 } 1089 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1090 case PARSE_EVENTS__TERM_TYPE_USER: 1091 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1092 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1093 default: 1094 parse_events_error__handle(err, term->err_term, 1095 strdup(parse_events__term_type_str(term->type_term)), 1096 parse_events_formats_error_string(NULL)); 1097 return -EINVAL; 1098 } 1099 1100 /* 1101 * Check term availability after basic checking so 1102 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered. 1103 * 1104 * If check availability at the entry of this function, 1105 * user will see "'<sysfs term>' is not usable in 'perf stat'" 1106 * if an invalid config term is provided for legacy events 1107 * (for example, instructions/badterm/...), which is confusing. 1108 */ 1109 if (!config_term_avail(term->type_term, err)) 1110 return -EINVAL; 1111 return 0; 1112 #undef CHECK_TYPE_VAL 1113 } 1114 1115 static int config_term_pmu(struct perf_event_attr *attr, 1116 struct parse_events_term *term, 1117 struct parse_events_error *err) 1118 { 1119 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) { 1120 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); 1121 1122 if (!pmu) { 1123 char *err_str; 1124 1125 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0) 1126 parse_events_error__handle(err, term->err_term, 1127 err_str, /*help=*/NULL); 1128 return -EINVAL; 1129 } 1130 /* 1131 * Rewrite the PMU event to a legacy cache one unless the PMU 1132 * doesn't support legacy cache events or the event is present 1133 * within the PMU. 1134 */ 1135 if (perf_pmu__supports_legacy_cache(pmu) && 1136 !perf_pmu__have_event(pmu, term->config)) { 1137 attr->type = PERF_TYPE_HW_CACHE; 1138 return parse_events__decode_legacy_cache(term->config, pmu->type, 1139 &attr->config); 1140 } else { 1141 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 1142 term->no_value = true; 1143 } 1144 } 1145 if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) { 1146 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); 1147 1148 if (!pmu) { 1149 char *err_str; 1150 1151 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0) 1152 parse_events_error__handle(err, term->err_term, 1153 err_str, /*help=*/NULL); 1154 return -EINVAL; 1155 } 1156 /* 1157 * If the PMU has a sysfs or json event prefer it over 1158 * legacy. ARM requires this. 1159 */ 1160 if (perf_pmu__have_event(pmu, term->config)) { 1161 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 1162 term->no_value = true; 1163 term->alternate_hw_config = true; 1164 } else { 1165 attr->type = PERF_TYPE_HARDWARE; 1166 attr->config = term->val.num; 1167 if (perf_pmus__supports_extended_type()) 1168 attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT; 1169 } 1170 return 0; 1171 } 1172 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || 1173 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) { 1174 /* 1175 * Always succeed for sysfs terms, as we dont know 1176 * at this point what type they need to have. 1177 */ 1178 return 0; 1179 } 1180 return config_term_common(attr, term, err); 1181 } 1182 1183 static int config_term_tracepoint(struct perf_event_attr *attr, 1184 struct parse_events_term *term, 1185 struct parse_events_error *err) 1186 { 1187 switch (term->type_term) { 1188 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1189 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1190 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1191 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1192 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1193 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1194 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1195 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1196 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1197 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1198 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1199 return config_term_common(attr, term, err); 1200 case PARSE_EVENTS__TERM_TYPE_USER: 1201 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1202 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1203 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1204 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1205 case PARSE_EVENTS__TERM_TYPE_NAME: 1206 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1207 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1208 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1209 case PARSE_EVENTS__TERM_TYPE_TIME: 1210 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1211 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1212 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1213 case PARSE_EVENTS__TERM_TYPE_RAW: 1214 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1215 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1216 case PARSE_EVENTS__TERM_TYPE_CPU: 1217 default: 1218 if (err) { 1219 parse_events_error__handle(err, term->err_term, 1220 strdup(parse_events__term_type_str(term->type_term)), 1221 strdup("valid terms: call-graph,stack-size\n") 1222 ); 1223 } 1224 return -EINVAL; 1225 } 1226 1227 return 0; 1228 } 1229 1230 static int config_attr(struct perf_event_attr *attr, 1231 const struct parse_events_terms *head, 1232 struct parse_events_error *err, 1233 config_term_func_t config_term) 1234 { 1235 struct parse_events_term *term; 1236 1237 list_for_each_entry(term, &head->terms, list) 1238 if (config_term(attr, term, err)) 1239 return -EINVAL; 1240 1241 return 0; 1242 } 1243 1244 static int get_config_terms(const struct parse_events_terms *head_config, 1245 struct list_head *head_terms) 1246 { 1247 #define ADD_CONFIG_TERM(__type, __weak) \ 1248 struct evsel_config_term *__t; \ 1249 \ 1250 __t = zalloc(sizeof(*__t)); \ 1251 if (!__t) \ 1252 return -ENOMEM; \ 1253 \ 1254 INIT_LIST_HEAD(&__t->list); \ 1255 __t->type = EVSEL__CONFIG_TERM_ ## __type; \ 1256 __t->weak = __weak; \ 1257 list_add_tail(&__t->list, head_terms) 1258 1259 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \ 1260 do { \ 1261 ADD_CONFIG_TERM(__type, __weak); \ 1262 __t->val.__name = __val; \ 1263 } while (0) 1264 1265 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \ 1266 do { \ 1267 ADD_CONFIG_TERM(__type, __weak); \ 1268 __t->val.str = strdup(__val); \ 1269 if (!__t->val.str) { \ 1270 zfree(&__t); \ 1271 return -ENOMEM; \ 1272 } \ 1273 __t->free_str = true; \ 1274 } while (0) 1275 1276 struct parse_events_term *term; 1277 1278 list_for_each_entry(term, &head_config->terms, list) { 1279 switch (term->type_term) { 1280 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1281 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak); 1282 break; 1283 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1284 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak); 1285 break; 1286 case PARSE_EVENTS__TERM_TYPE_TIME: 1287 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak); 1288 break; 1289 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1290 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak); 1291 break; 1292 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1293 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak); 1294 break; 1295 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1296 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user, 1297 term->val.num, term->weak); 1298 break; 1299 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1300 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1301 term->val.num ? 1 : 0, term->weak); 1302 break; 1303 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1304 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1305 term->val.num ? 0 : 1, term->weak); 1306 break; 1307 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1308 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack, 1309 term->val.num, term->weak); 1310 break; 1311 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1312 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events, 1313 term->val.num, term->weak); 1314 break; 1315 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1316 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1317 term->val.num ? 1 : 0, term->weak); 1318 break; 1319 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1320 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1321 term->val.num ? 0 : 1, term->weak); 1322 break; 1323 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1324 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak); 1325 break; 1326 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1327 ADD_CONFIG_TERM_VAL(PERCORE, percore, 1328 term->val.num ? true : false, term->weak); 1329 break; 1330 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1331 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output, 1332 term->val.num ? 1 : 0, term->weak); 1333 break; 1334 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1335 ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak); 1336 break; 1337 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1338 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size, 1339 term->val.num, term->weak); 1340 break; 1341 case PARSE_EVENTS__TERM_TYPE_USER: 1342 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1343 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1344 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1345 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1346 case PARSE_EVENTS__TERM_TYPE_NAME: 1347 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1348 case PARSE_EVENTS__TERM_TYPE_RAW: 1349 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1350 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1351 case PARSE_EVENTS__TERM_TYPE_CPU: 1352 default: 1353 break; 1354 } 1355 } 1356 return 0; 1357 } 1358 1359 /* 1360 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for 1361 * each bit of attr->config that the user has changed. 1362 */ 1363 static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config, 1364 struct list_head *head_terms) 1365 { 1366 struct parse_events_term *term; 1367 u64 bits = 0; 1368 int type; 1369 1370 list_for_each_entry(term, &head_config->terms, list) { 1371 switch (term->type_term) { 1372 case PARSE_EVENTS__TERM_TYPE_USER: 1373 type = perf_pmu__format_type(pmu, term->config); 1374 if (type != PERF_PMU_FORMAT_VALUE_CONFIG) 1375 continue; 1376 bits |= perf_pmu__format_bits(pmu, term->config); 1377 break; 1378 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1379 bits = ~(u64)0; 1380 break; 1381 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1382 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1383 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1384 case PARSE_EVENTS__TERM_TYPE_NAME: 1385 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1386 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1387 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1388 case PARSE_EVENTS__TERM_TYPE_TIME: 1389 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1390 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1391 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1392 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1393 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1394 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1395 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1396 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1397 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1398 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1399 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1400 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1401 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1402 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1403 case PARSE_EVENTS__TERM_TYPE_RAW: 1404 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1405 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1406 case PARSE_EVENTS__TERM_TYPE_CPU: 1407 default: 1408 break; 1409 } 1410 } 1411 1412 if (bits) 1413 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false); 1414 1415 #undef ADD_CONFIG_TERM 1416 return 0; 1417 } 1418 1419 int parse_events_add_tracepoint(struct parse_events_state *parse_state, 1420 struct list_head *list, 1421 const char *sys, const char *event, 1422 struct parse_events_error *err, 1423 struct parse_events_terms *head_config, void *loc_) 1424 { 1425 YYLTYPE *loc = loc_; 1426 1427 if (head_config) { 1428 struct perf_event_attr attr; 1429 1430 if (config_attr(&attr, head_config, err, 1431 config_term_tracepoint)) 1432 return -EINVAL; 1433 } 1434 1435 if (strpbrk(sys, "*?")) 1436 return add_tracepoint_multi_sys(parse_state, list, sys, event, 1437 err, head_config, loc); 1438 else 1439 return add_tracepoint_event(parse_state, list, sys, event, 1440 err, head_config, loc); 1441 } 1442 1443 static int __parse_events_add_numeric(struct parse_events_state *parse_state, 1444 struct list_head *list, 1445 struct perf_pmu *pmu, u32 type, u32 extended_type, 1446 u64 config, const struct parse_events_terms *head_config, 1447 struct evsel *first_wildcard_match) 1448 { 1449 struct perf_event_attr attr; 1450 LIST_HEAD(config_terms); 1451 const char *name, *metric_id; 1452 struct perf_cpu_map *cpus; 1453 int ret; 1454 1455 memset(&attr, 0, sizeof(attr)); 1456 attr.type = type; 1457 attr.config = config; 1458 if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) { 1459 assert(perf_pmus__supports_extended_type()); 1460 attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT; 1461 } 1462 1463 if (head_config) { 1464 if (config_attr(&attr, head_config, parse_state->error, 1465 config_term_common)) 1466 return -EINVAL; 1467 1468 if (get_config_terms(head_config, &config_terms)) 1469 return -ENOMEM; 1470 } 1471 1472 name = get_config_name(head_config); 1473 metric_id = get_config_metric_id(head_config); 1474 cpus = get_config_cpu(head_config); 1475 ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name, 1476 metric_id, pmu, &config_terms, first_wildcard_match, 1477 cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) ? 0 : -ENOMEM; 1478 perf_cpu_map__put(cpus); 1479 free_config_terms(&config_terms); 1480 return ret; 1481 } 1482 1483 int parse_events_add_numeric(struct parse_events_state *parse_state, 1484 struct list_head *list, 1485 u32 type, u64 config, 1486 const struct parse_events_terms *head_config, 1487 bool wildcard) 1488 { 1489 struct perf_pmu *pmu = NULL; 1490 bool found_supported = false; 1491 1492 /* Wildcards on numeric values are only supported by core PMUs. */ 1493 if (wildcard && perf_pmus__supports_extended_type()) { 1494 struct evsel *first_wildcard_match = NULL; 1495 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 1496 int ret; 1497 1498 found_supported = true; 1499 if (parse_events__filter_pmu(parse_state, pmu)) 1500 continue; 1501 1502 ret = __parse_events_add_numeric(parse_state, list, pmu, 1503 type, pmu->type, 1504 config, head_config, 1505 first_wildcard_match); 1506 if (ret) 1507 return ret; 1508 if (first_wildcard_match == NULL) 1509 first_wildcard_match = 1510 container_of(list->prev, struct evsel, core.node); 1511 } 1512 if (found_supported) 1513 return 0; 1514 } 1515 return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type), 1516 type, /*extended_type=*/0, config, head_config, 1517 /*first_wildcard_match=*/NULL); 1518 } 1519 1520 static bool config_term_percore(struct list_head *config_terms) 1521 { 1522 struct evsel_config_term *term; 1523 1524 list_for_each_entry(term, config_terms, list) { 1525 if (term->type == EVSEL__CONFIG_TERM_PERCORE) 1526 return term->val.percore; 1527 } 1528 1529 return false; 1530 } 1531 1532 static int parse_events_add_pmu(struct parse_events_state *parse_state, 1533 struct list_head *list, struct perf_pmu *pmu, 1534 const struct parse_events_terms *const_parsed_terms, 1535 struct evsel *first_wildcard_match, u64 alternate_hw_config) 1536 { 1537 struct perf_event_attr attr; 1538 struct perf_pmu_info info; 1539 struct evsel *evsel; 1540 struct parse_events_error *err = parse_state->error; 1541 LIST_HEAD(config_terms); 1542 struct parse_events_terms parsed_terms; 1543 bool alias_rewrote_terms = false; 1544 struct perf_cpu_map *term_cpu = NULL; 1545 1546 if (verbose > 1) { 1547 struct strbuf sb; 1548 1549 strbuf_init(&sb, /*hint=*/ 0); 1550 if (pmu->selectable && const_parsed_terms && 1551 list_empty(&const_parsed_terms->terms)) { 1552 strbuf_addf(&sb, "%s//", pmu->name); 1553 } else { 1554 strbuf_addf(&sb, "%s/", pmu->name); 1555 parse_events_terms__to_strbuf(const_parsed_terms, &sb); 1556 strbuf_addch(&sb, '/'); 1557 } 1558 fprintf(stderr, "Attempt to add: %s\n", sb.buf); 1559 strbuf_release(&sb); 1560 } 1561 1562 memset(&attr, 0, sizeof(attr)); 1563 if (pmu->perf_event_attr_init_default) 1564 pmu->perf_event_attr_init_default(pmu, &attr); 1565 1566 attr.type = pmu->type; 1567 1568 if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) { 1569 evsel = __add_event(list, &parse_state->idx, &attr, 1570 /*init_attr=*/true, /*name=*/NULL, 1571 /*metric_id=*/NULL, pmu, 1572 /*config_terms=*/NULL, first_wildcard_match, 1573 /*cpu_list=*/NULL, alternate_hw_config); 1574 return evsel ? 0 : -ENOMEM; 1575 } 1576 1577 parse_events_terms__init(&parsed_terms); 1578 if (const_parsed_terms) { 1579 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1580 1581 if (ret) 1582 return ret; 1583 } 1584 fix_raw(&parsed_terms, pmu); 1585 1586 /* Configure attr/terms with a known PMU, this will set hardcoded terms. */ 1587 if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) { 1588 parse_events_terms__exit(&parsed_terms); 1589 return -EINVAL; 1590 } 1591 1592 /* Look for event names in the terms and rewrite into format based terms. */ 1593 if (perf_pmu__check_alias(pmu, &parsed_terms, 1594 &info, &alias_rewrote_terms, 1595 &alternate_hw_config, err)) { 1596 parse_events_terms__exit(&parsed_terms); 1597 return -EINVAL; 1598 } 1599 1600 if (verbose > 1) { 1601 struct strbuf sb; 1602 1603 strbuf_init(&sb, /*hint=*/ 0); 1604 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1605 fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf); 1606 strbuf_release(&sb); 1607 } 1608 1609 /* Configure attr/terms again if an alias was expanded. */ 1610 if (alias_rewrote_terms && 1611 config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) { 1612 parse_events_terms__exit(&parsed_terms); 1613 return -EINVAL; 1614 } 1615 1616 if (get_config_terms(&parsed_terms, &config_terms)) { 1617 parse_events_terms__exit(&parsed_terms); 1618 return -ENOMEM; 1619 } 1620 1621 /* 1622 * When using default config, record which bits of attr->config were 1623 * changed by the user. 1624 */ 1625 if (pmu->perf_event_attr_init_default && 1626 get_config_chgs(pmu, &parsed_terms, &config_terms)) { 1627 parse_events_terms__exit(&parsed_terms); 1628 return -ENOMEM; 1629 } 1630 1631 /* Skip configuring hard coded terms that were applied by config_attr. */ 1632 if (perf_pmu__config(pmu, &attr, &parsed_terms, /*apply_hardcoded=*/false, 1633 parse_state->error)) { 1634 free_config_terms(&config_terms); 1635 parse_events_terms__exit(&parsed_terms); 1636 return -EINVAL; 1637 } 1638 1639 term_cpu = get_config_cpu(&parsed_terms); 1640 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true, 1641 get_config_name(&parsed_terms), 1642 get_config_metric_id(&parsed_terms), pmu, 1643 &config_terms, first_wildcard_match, term_cpu, alternate_hw_config); 1644 perf_cpu_map__put(term_cpu); 1645 if (!evsel) { 1646 parse_events_terms__exit(&parsed_terms); 1647 return -ENOMEM; 1648 } 1649 1650 if (evsel->name) 1651 evsel->use_config_name = true; 1652 1653 evsel->percore = config_term_percore(&evsel->config_terms); 1654 1655 parse_events_terms__exit(&parsed_terms); 1656 free((char *)evsel->unit); 1657 evsel->unit = strdup(info.unit); 1658 evsel->scale = info.scale; 1659 evsel->per_pkg = info.per_pkg; 1660 evsel->snapshot = info.snapshot; 1661 evsel->retirement_latency.mean = info.retirement_latency_mean; 1662 evsel->retirement_latency.min = info.retirement_latency_min; 1663 evsel->retirement_latency.max = info.retirement_latency_max; 1664 1665 return 0; 1666 } 1667 1668 int parse_events_multi_pmu_add(struct parse_events_state *parse_state, 1669 const char *event_name, u64 hw_config, 1670 const struct parse_events_terms *const_parsed_terms, 1671 struct list_head **listp, void *loc_) 1672 { 1673 struct parse_events_term *term; 1674 struct list_head *list = NULL; 1675 struct perf_pmu *pmu = NULL; 1676 YYLTYPE *loc = loc_; 1677 int ok = 0; 1678 const char *config; 1679 struct parse_events_terms parsed_terms; 1680 struct evsel *first_wildcard_match = NULL; 1681 1682 *listp = NULL; 1683 1684 parse_events_terms__init(&parsed_terms); 1685 if (const_parsed_terms) { 1686 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1687 1688 if (ret) 1689 return ret; 1690 } 1691 1692 config = strdup(event_name); 1693 if (!config) 1694 goto out_err; 1695 1696 if (parse_events_term__num(&term, 1697 PARSE_EVENTS__TERM_TYPE_USER, 1698 config, /*num=*/1, /*novalue=*/true, 1699 loc, /*loc_val=*/NULL) < 0) { 1700 zfree(&config); 1701 goto out_err; 1702 } 1703 list_add_tail(&term->list, &parsed_terms.terms); 1704 1705 /* Add it for all PMUs that support the alias */ 1706 list = malloc(sizeof(struct list_head)); 1707 if (!list) 1708 goto out_err; 1709 1710 INIT_LIST_HEAD(list); 1711 1712 while ((pmu = perf_pmus__scan_for_event(pmu, event_name)) != NULL) { 1713 1714 if (parse_events__filter_pmu(parse_state, pmu)) 1715 continue; 1716 1717 if (!perf_pmu__have_event(pmu, event_name)) 1718 continue; 1719 1720 if (!parse_events_add_pmu(parse_state, list, pmu, 1721 &parsed_terms, first_wildcard_match, hw_config)) { 1722 struct strbuf sb; 1723 1724 strbuf_init(&sb, /*hint=*/ 0); 1725 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1726 pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf); 1727 strbuf_release(&sb); 1728 ok++; 1729 } 1730 if (first_wildcard_match == NULL) 1731 first_wildcard_match = container_of(list->prev, struct evsel, core.node); 1732 } 1733 1734 if (parse_state->fake_pmu) { 1735 if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms, 1736 first_wildcard_match, hw_config)) { 1737 struct strbuf sb; 1738 1739 strbuf_init(&sb, /*hint=*/ 0); 1740 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1741 pr_debug("%s -> fake/%s/\n", event_name, sb.buf); 1742 strbuf_release(&sb); 1743 ok++; 1744 } 1745 } 1746 1747 out_err: 1748 parse_events_terms__exit(&parsed_terms); 1749 if (ok) 1750 *listp = list; 1751 else 1752 free(list); 1753 1754 return ok ? 0 : -1; 1755 } 1756 1757 int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state, 1758 const char *event_or_pmu, 1759 const struct parse_events_terms *const_parsed_terms, 1760 struct list_head **listp, 1761 void *loc_) 1762 { 1763 YYLTYPE *loc = loc_; 1764 struct perf_pmu *pmu; 1765 int ok = 0; 1766 char *help; 1767 struct evsel *first_wildcard_match = NULL; 1768 1769 *listp = malloc(sizeof(**listp)); 1770 if (!*listp) 1771 return -ENOMEM; 1772 1773 INIT_LIST_HEAD(*listp); 1774 1775 /* Attempt to add to list assuming event_or_pmu is a PMU name. */ 1776 pmu = perf_pmus__find(event_or_pmu); 1777 if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms, 1778 first_wildcard_match, 1779 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) 1780 return 0; 1781 1782 if (parse_state->fake_pmu) { 1783 if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(), 1784 const_parsed_terms, 1785 first_wildcard_match, 1786 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) 1787 return 0; 1788 } 1789 1790 pmu = NULL; 1791 /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */ 1792 while ((pmu = perf_pmus__scan_matching_wildcard(pmu, event_or_pmu)) != NULL) { 1793 1794 if (parse_events__filter_pmu(parse_state, pmu)) 1795 continue; 1796 1797 if (!parse_events_add_pmu(parse_state, *listp, pmu, 1798 const_parsed_terms, 1799 first_wildcard_match, 1800 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) { 1801 ok++; 1802 parse_state->wild_card_pmus = true; 1803 } 1804 if (first_wildcard_match == NULL) { 1805 first_wildcard_match = 1806 container_of((*listp)->prev, struct evsel, core.node); 1807 } 1808 } 1809 if (ok) 1810 return 0; 1811 1812 /* Failure to add, assume event_or_pmu is an event name. */ 1813 zfree(listp); 1814 if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, PERF_COUNT_HW_MAX, 1815 const_parsed_terms, listp, loc)) 1816 return 0; 1817 1818 if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0) 1819 help = NULL; 1820 parse_events_error__handle(parse_state->error, loc->first_column, 1821 strdup("Bad event or PMU"), 1822 help); 1823 zfree(listp); 1824 return -EINVAL; 1825 } 1826 1827 void parse_events__set_leader(char *name, struct list_head *list) 1828 { 1829 struct evsel *leader; 1830 1831 if (list_empty(list)) { 1832 WARN_ONCE(true, "WARNING: failed to set leader: empty list"); 1833 return; 1834 } 1835 1836 leader = list_first_entry(list, struct evsel, core.node); 1837 __perf_evlist__set_leader(list, &leader->core); 1838 zfree(&leader->group_name); 1839 leader->group_name = name; 1840 } 1841 1842 static int parse_events__modifier_list(struct parse_events_state *parse_state, 1843 YYLTYPE *loc, 1844 struct list_head *list, 1845 struct parse_events_modifier mod, 1846 bool group) 1847 { 1848 struct evsel *evsel; 1849 1850 if (!group && mod.weak) { 1851 parse_events_error__handle(parse_state->error, loc->first_column, 1852 strdup("Weak modifier is for use with groups"), NULL); 1853 return -EINVAL; 1854 } 1855 1856 __evlist__for_each_entry(list, evsel) { 1857 /* Translate modifiers into the equivalent evsel excludes. */ 1858 int eu = group ? evsel->core.attr.exclude_user : 0; 1859 int ek = group ? evsel->core.attr.exclude_kernel : 0; 1860 int eh = group ? evsel->core.attr.exclude_hv : 0; 1861 int eH = group ? evsel->core.attr.exclude_host : 0; 1862 int eG = group ? evsel->core.attr.exclude_guest : 0; 1863 int exclude = eu | ek | eh; 1864 int exclude_GH = eG | eH; 1865 1866 if (mod.user) { 1867 if (!exclude) 1868 exclude = eu = ek = eh = 1; 1869 eu = 0; 1870 } 1871 if (mod.kernel) { 1872 if (!exclude) 1873 exclude = eu = ek = eh = 1; 1874 ek = 0; 1875 } 1876 if (mod.hypervisor) { 1877 if (!exclude) 1878 exclude = eu = ek = eh = 1; 1879 eh = 0; 1880 } 1881 if (mod.guest) { 1882 if (!exclude_GH) 1883 exclude_GH = eG = eH = 1; 1884 eG = 0; 1885 } 1886 if (mod.host) { 1887 if (!exclude_GH) 1888 exclude_GH = eG = eH = 1; 1889 eH = 0; 1890 } 1891 if (!exclude_GH && exclude_GH_default) { 1892 if (perf_host) 1893 eG = 1; 1894 else if (perf_guest) 1895 eH = 1; 1896 } 1897 1898 evsel->core.attr.exclude_user = eu; 1899 evsel->core.attr.exclude_kernel = ek; 1900 evsel->core.attr.exclude_hv = eh; 1901 evsel->core.attr.exclude_host = eH; 1902 evsel->core.attr.exclude_guest = eG; 1903 evsel->exclude_GH = exclude_GH; 1904 1905 /* Simple modifiers copied to the evsel. */ 1906 if (mod.precise) { 1907 u8 precise = evsel->core.attr.precise_ip + mod.precise; 1908 /* 1909 * precise ip: 1910 * 1911 * 0 - SAMPLE_IP can have arbitrary skid 1912 * 1 - SAMPLE_IP must have constant skid 1913 * 2 - SAMPLE_IP requested to have 0 skid 1914 * 3 - SAMPLE_IP must have 0 skid 1915 * 1916 * See also PERF_RECORD_MISC_EXACT_IP 1917 */ 1918 if (precise > 3) { 1919 char *help; 1920 1921 if (asprintf(&help, 1922 "Maximum combined precise value is 3, adding precision to \"%s\"", 1923 evsel__name(evsel)) > 0) { 1924 parse_events_error__handle(parse_state->error, 1925 loc->first_column, 1926 help, NULL); 1927 } 1928 return -EINVAL; 1929 } 1930 evsel->core.attr.precise_ip = precise; 1931 } 1932 if (mod.precise_max) 1933 evsel->precise_max = 1; 1934 if (mod.non_idle) 1935 evsel->core.attr.exclude_idle = 1; 1936 if (mod.sample_read) 1937 evsel->sample_read = 1; 1938 if (mod.pinned && evsel__is_group_leader(evsel)) 1939 evsel->core.attr.pinned = 1; 1940 if (mod.exclusive && evsel__is_group_leader(evsel)) 1941 evsel->core.attr.exclusive = 1; 1942 if (mod.weak) 1943 evsel->weak_group = true; 1944 if (mod.bpf) 1945 evsel->bpf_counter = true; 1946 if (mod.retire_lat) 1947 evsel->retire_lat = true; 1948 } 1949 return 0; 1950 } 1951 1952 int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc, 1953 struct list_head *list, 1954 struct parse_events_modifier mod) 1955 { 1956 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true); 1957 } 1958 1959 int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc, 1960 struct list_head *list, 1961 struct parse_events_modifier mod) 1962 { 1963 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false); 1964 } 1965 1966 int parse_events__set_default_name(struct list_head *list, char *name) 1967 { 1968 struct evsel *evsel; 1969 bool used_name = false; 1970 1971 __evlist__for_each_entry(list, evsel) { 1972 if (!evsel->name) { 1973 evsel->name = used_name ? strdup(name) : name; 1974 used_name = true; 1975 if (!evsel->name) 1976 return -ENOMEM; 1977 } 1978 } 1979 if (!used_name) 1980 free(name); 1981 return 0; 1982 } 1983 1984 static int parse_events__scanner(const char *str, 1985 FILE *input, 1986 struct parse_events_state *parse_state) 1987 { 1988 YY_BUFFER_STATE buffer; 1989 void *scanner; 1990 int ret; 1991 1992 ret = parse_events_lex_init_extra(parse_state, &scanner); 1993 if (ret) 1994 return ret; 1995 1996 if (str) 1997 buffer = parse_events__scan_string(str, scanner); 1998 else 1999 parse_events_set_in(input, scanner); 2000 2001 #ifdef PARSER_DEBUG 2002 parse_events_debug = 1; 2003 parse_events_set_debug(1, scanner); 2004 #endif 2005 ret = parse_events_parse(parse_state, scanner); 2006 2007 if (str) { 2008 parse_events__flush_buffer(buffer, scanner); 2009 parse_events__delete_buffer(buffer, scanner); 2010 } 2011 parse_events_lex_destroy(scanner); 2012 return ret; 2013 } 2014 2015 /* 2016 * parse event config string, return a list of event terms. 2017 */ 2018 int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input) 2019 { 2020 struct parse_events_state parse_state = { 2021 .terms = NULL, 2022 .stoken = PE_START_TERMS, 2023 }; 2024 int ret; 2025 2026 ret = parse_events__scanner(str, input, &parse_state); 2027 if (!ret) 2028 list_splice(&parse_state.terms->terms, &terms->terms); 2029 2030 zfree(&parse_state.terms); 2031 return ret; 2032 } 2033 2034 static int evsel__compute_group_pmu_name(struct evsel *evsel, 2035 const struct list_head *head) 2036 { 2037 struct evsel *leader = evsel__leader(evsel); 2038 struct evsel *pos; 2039 const char *group_pmu_name; 2040 struct perf_pmu *pmu = evsel__find_pmu(evsel); 2041 2042 if (!pmu) { 2043 /* 2044 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU 2045 * is a core PMU, but in heterogeneous systems this is 2046 * unknown. For now pick the first core PMU. 2047 */ 2048 pmu = perf_pmus__scan_core(NULL); 2049 } 2050 if (!pmu) { 2051 pr_debug("No PMU found for '%s'\n", evsel__name(evsel)); 2052 return -EINVAL; 2053 } 2054 group_pmu_name = pmu->name; 2055 /* 2056 * Software events may be in a group with other uncore PMU events. Use 2057 * the pmu_name of the first non-software event to avoid breaking the 2058 * software event out of the group. 2059 * 2060 * Aux event leaders, like intel_pt, expect a group with events from 2061 * other PMUs, so substitute the AUX event's PMU in this case. 2062 */ 2063 if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) { 2064 struct perf_pmu *leader_pmu = evsel__find_pmu(leader); 2065 2066 if (!leader_pmu) { 2067 /* As with determining pmu above. */ 2068 leader_pmu = perf_pmus__scan_core(NULL); 2069 } 2070 /* 2071 * Starting with the leader, find the first event with a named 2072 * non-software PMU. for_each_group_(member|evsel) isn't used as 2073 * the list isn't yet sorted putting evsel's in the same group 2074 * together. 2075 */ 2076 if (leader_pmu && !perf_pmu__is_software(leader_pmu)) { 2077 group_pmu_name = leader_pmu->name; 2078 } else if (leader->core.nr_members > 1) { 2079 list_for_each_entry(pos, head, core.node) { 2080 struct perf_pmu *pos_pmu; 2081 2082 if (pos == leader || evsel__leader(pos) != leader) 2083 continue; 2084 pos_pmu = evsel__find_pmu(pos); 2085 if (!pos_pmu) { 2086 /* As with determining pmu above. */ 2087 pos_pmu = perf_pmus__scan_core(NULL); 2088 } 2089 if (pos_pmu && !perf_pmu__is_software(pos_pmu)) { 2090 group_pmu_name = pos_pmu->name; 2091 break; 2092 } 2093 } 2094 } 2095 } 2096 /* Record computed name. */ 2097 evsel->group_pmu_name = strdup(group_pmu_name); 2098 return evsel->group_pmu_name ? 0 : -ENOMEM; 2099 } 2100 2101 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs) 2102 { 2103 /* Order by insertion index. */ 2104 return lhs->core.idx - rhs->core.idx; 2105 } 2106 2107 static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r) 2108 { 2109 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node); 2110 const struct evsel *lhs = container_of(lhs_core, struct evsel, core); 2111 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node); 2112 const struct evsel *rhs = container_of(rhs_core, struct evsel, core); 2113 int *force_grouped_idx = _fg_idx; 2114 int lhs_sort_idx, rhs_sort_idx, ret; 2115 const char *lhs_pmu_name, *rhs_pmu_name; 2116 2117 /* 2118 * Get the indexes of the 2 events to sort. If the events are 2119 * in groups then the leader's index is used otherwise the 2120 * event's index is used. An index may be forced for events that 2121 * must be in the same group, namely Intel topdown events. 2122 */ 2123 if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) { 2124 lhs_sort_idx = *force_grouped_idx; 2125 } else { 2126 bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1; 2127 2128 lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx; 2129 } 2130 if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) { 2131 rhs_sort_idx = *force_grouped_idx; 2132 } else { 2133 bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1; 2134 2135 rhs_sort_idx = rhs_has_group ? rhs_core->leader->idx : rhs_core->idx; 2136 } 2137 2138 /* If the indices differ then respect the insertion order. */ 2139 if (lhs_sort_idx != rhs_sort_idx) 2140 return lhs_sort_idx - rhs_sort_idx; 2141 2142 /* 2143 * Ignoring forcing, lhs_sort_idx == rhs_sort_idx so lhs and rhs should 2144 * be in the same group. Events in the same group need to be ordered by 2145 * their grouping PMU name as the group will be broken to ensure only 2146 * events on the same PMU are programmed together. 2147 * 2148 * With forcing the lhs_sort_idx == rhs_sort_idx shows that one or both 2149 * events are being forced to be at force_group_index. If only one event 2150 * is being forced then the other event is the group leader of the group 2151 * we're trying to force the event into. Ensure for the force grouped 2152 * case that the PMU name ordering is also respected. 2153 */ 2154 lhs_pmu_name = lhs->group_pmu_name; 2155 rhs_pmu_name = rhs->group_pmu_name; 2156 ret = strcmp(lhs_pmu_name, rhs_pmu_name); 2157 if (ret) 2158 return ret; 2159 2160 /* 2161 * Architecture specific sorting, by default sort events in the same 2162 * group with the same PMU by their insertion index. On Intel topdown 2163 * constraints must be adhered to - slots first, etc. 2164 */ 2165 return arch_evlist__cmp(lhs, rhs); 2166 } 2167 2168 static int parse_events__sort_events_and_fix_groups(struct list_head *list) 2169 { 2170 int idx = 0, force_grouped_idx = -1; 2171 struct evsel *pos, *cur_leader = NULL; 2172 struct perf_evsel *cur_leaders_grp = NULL; 2173 bool idx_changed = false; 2174 int orig_num_leaders = 0, num_leaders = 0; 2175 int ret; 2176 struct evsel *force_grouped_leader = NULL; 2177 bool last_event_was_forced_leader = false; 2178 2179 /* 2180 * Compute index to insert ungrouped events at. Place them where the 2181 * first ungrouped event appears. 2182 */ 2183 list_for_each_entry(pos, list, core.node) { 2184 const struct evsel *pos_leader = evsel__leader(pos); 2185 2186 ret = evsel__compute_group_pmu_name(pos, list); 2187 if (ret) 2188 return ret; 2189 2190 if (pos == pos_leader) 2191 orig_num_leaders++; 2192 2193 /* 2194 * Ensure indexes are sequential, in particular for multiple 2195 * event lists being merged. The indexes are used to detect when 2196 * the user order is modified. 2197 */ 2198 pos->core.idx = idx++; 2199 2200 /* 2201 * Remember an index to sort all forced grouped events 2202 * together to. Use the group leader as some events 2203 * must appear first within the group. 2204 */ 2205 if (force_grouped_idx == -1 && arch_evsel__must_be_in_group(pos)) 2206 force_grouped_idx = pos_leader->core.idx; 2207 } 2208 2209 /* Sort events. */ 2210 list_sort(&force_grouped_idx, list, evlist__cmp); 2211 2212 /* 2213 * Recompute groups, splitting for PMUs and adding groups for events 2214 * that require them. 2215 */ 2216 idx = 0; 2217 list_for_each_entry(pos, list, core.node) { 2218 const struct evsel *pos_leader = evsel__leader(pos); 2219 const char *pos_pmu_name = pos->group_pmu_name; 2220 const char *cur_leader_pmu_name; 2221 bool pos_force_grouped = force_grouped_idx != -1 && 2222 arch_evsel__must_be_in_group(pos); 2223 2224 /* Reset index and nr_members. */ 2225 if (pos->core.idx != idx) 2226 idx_changed = true; 2227 pos->core.idx = idx++; 2228 pos->core.nr_members = 0; 2229 2230 /* 2231 * Set the group leader respecting the given groupings and that 2232 * groups can't span PMUs. 2233 */ 2234 if (!cur_leader) { 2235 cur_leader = pos; 2236 cur_leaders_grp = &pos->core; 2237 if (pos_force_grouped) 2238 force_grouped_leader = pos; 2239 } 2240 2241 cur_leader_pmu_name = cur_leader->group_pmu_name; 2242 if (strcmp(cur_leader_pmu_name, pos_pmu_name)) { 2243 /* PMU changed so the group/leader must change. */ 2244 cur_leader = pos; 2245 cur_leaders_grp = pos->core.leader; 2246 if (pos_force_grouped && force_grouped_leader == NULL) 2247 force_grouped_leader = pos; 2248 } else if (cur_leaders_grp != pos->core.leader) { 2249 bool split_even_if_last_leader_was_forced = true; 2250 2251 /* 2252 * Event is for a different group. If the last event was 2253 * the forced group leader then subsequent group events 2254 * and forced events should be in the same group. If 2255 * there are no other forced group events then the 2256 * forced group leader wasn't really being forced into a 2257 * group, it just set arch_evsel__must_be_in_group, and 2258 * we don't want the group to split here. 2259 */ 2260 if (force_grouped_idx != -1 && last_event_was_forced_leader) { 2261 struct evsel *pos2 = pos; 2262 /* 2263 * Search the whole list as the group leaders 2264 * aren't currently valid. 2265 */ 2266 list_for_each_entry_continue(pos2, list, core.node) { 2267 if (pos->core.leader == pos2->core.leader && 2268 arch_evsel__must_be_in_group(pos2)) { 2269 split_even_if_last_leader_was_forced = false; 2270 break; 2271 } 2272 } 2273 } 2274 if (!last_event_was_forced_leader || split_even_if_last_leader_was_forced) { 2275 if (pos_force_grouped) { 2276 if (force_grouped_leader) { 2277 cur_leader = force_grouped_leader; 2278 cur_leaders_grp = force_grouped_leader->core.leader; 2279 } else { 2280 cur_leader = force_grouped_leader = pos; 2281 cur_leaders_grp = &pos->core; 2282 } 2283 } else { 2284 cur_leader = pos; 2285 cur_leaders_grp = pos->core.leader; 2286 } 2287 } 2288 } 2289 if (pos_leader != cur_leader) { 2290 /* The leader changed so update it. */ 2291 evsel__set_leader(pos, cur_leader); 2292 } 2293 last_event_was_forced_leader = (force_grouped_leader == pos); 2294 } 2295 list_for_each_entry(pos, list, core.node) { 2296 struct evsel *pos_leader = evsel__leader(pos); 2297 2298 if (pos == pos_leader) 2299 num_leaders++; 2300 pos_leader->core.nr_members++; 2301 } 2302 return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0; 2303 } 2304 2305 int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter, 2306 struct parse_events_error *err, bool fake_pmu, 2307 bool warn_if_reordered, bool fake_tp) 2308 { 2309 struct parse_events_state parse_state = { 2310 .list = LIST_HEAD_INIT(parse_state.list), 2311 .idx = evlist->core.nr_entries, 2312 .error = err, 2313 .stoken = PE_START_EVENTS, 2314 .fake_pmu = fake_pmu, 2315 .fake_tp = fake_tp, 2316 .pmu_filter = pmu_filter, 2317 .match_legacy_cache_terms = true, 2318 }; 2319 int ret, ret2; 2320 2321 ret = parse_events__scanner(str, /*input=*/ NULL, &parse_state); 2322 2323 if (!ret && list_empty(&parse_state.list)) { 2324 WARN_ONCE(true, "WARNING: event parser found nothing\n"); 2325 return -1; 2326 } 2327 2328 ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list); 2329 if (ret2 < 0) 2330 return ret; 2331 2332 /* 2333 * Add list to the evlist even with errors to allow callers to clean up. 2334 */ 2335 evlist__splice_list_tail(evlist, &parse_state.list); 2336 2337 if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) { 2338 pr_warning("WARNING: events were regrouped to match PMUs\n"); 2339 2340 if (verbose > 0) { 2341 struct strbuf sb = STRBUF_INIT; 2342 2343 evlist__uniquify_evsel_names(evlist, &stat_config); 2344 evlist__format_evsels(evlist, &sb, 2048); 2345 pr_debug("evlist after sorting/fixing: '%s'\n", sb.buf); 2346 strbuf_release(&sb); 2347 } 2348 } 2349 if (!ret) { 2350 struct evsel *last; 2351 2352 last = evlist__last(evlist); 2353 last->cmdline_group_boundary = true; 2354 2355 return 0; 2356 } 2357 2358 /* 2359 * There are 2 users - builtin-record and builtin-test objects. 2360 * Both call evlist__delete in case of error, so we dont 2361 * need to bother. 2362 */ 2363 return ret; 2364 } 2365 2366 int parse_event(struct evlist *evlist, const char *str) 2367 { 2368 struct parse_events_error err; 2369 int ret; 2370 2371 parse_events_error__init(&err); 2372 ret = parse_events(evlist, str, &err); 2373 parse_events_error__exit(&err); 2374 return ret; 2375 } 2376 2377 struct parse_events_error_entry { 2378 /** @list: The list the error is part of. */ 2379 struct list_head list; 2380 /** @idx: index in the parsed string */ 2381 int idx; 2382 /** @str: string to display at the index */ 2383 char *str; 2384 /** @help: optional help string */ 2385 char *help; 2386 }; 2387 2388 void parse_events_error__init(struct parse_events_error *err) 2389 { 2390 INIT_LIST_HEAD(&err->list); 2391 } 2392 2393 void parse_events_error__exit(struct parse_events_error *err) 2394 { 2395 struct parse_events_error_entry *pos, *tmp; 2396 2397 list_for_each_entry_safe(pos, tmp, &err->list, list) { 2398 zfree(&pos->str); 2399 zfree(&pos->help); 2400 list_del_init(&pos->list); 2401 free(pos); 2402 } 2403 } 2404 2405 void parse_events_error__handle(struct parse_events_error *err, int idx, 2406 char *str, char *help) 2407 { 2408 struct parse_events_error_entry *entry; 2409 2410 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) 2411 goto out_free; 2412 2413 entry = zalloc(sizeof(*entry)); 2414 if (!entry) { 2415 pr_err("Failed to allocate memory for event parsing error: %s (%s)\n", 2416 str, help ?: "<no help>"); 2417 goto out_free; 2418 } 2419 entry->idx = idx; 2420 entry->str = str; 2421 entry->help = help; 2422 list_add(&entry->list, &err->list); 2423 return; 2424 out_free: 2425 free(str); 2426 free(help); 2427 } 2428 2429 #define MAX_WIDTH 1000 2430 static int get_term_width(void) 2431 { 2432 struct winsize ws; 2433 2434 get_term_dimensions(&ws); 2435 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; 2436 } 2437 2438 static void __parse_events_error__print(int err_idx, const char *err_str, 2439 const char *err_help, const char *event) 2440 { 2441 const char *str = "invalid or unsupported event: "; 2442 char _buf[MAX_WIDTH]; 2443 char *buf = (char *) event; 2444 int idx = 0; 2445 if (err_str) { 2446 /* -2 for extra '' in the final fprintf */ 2447 int width = get_term_width() - 2; 2448 int len_event = strlen(event); 2449 int len_str, max_len, cut = 0; 2450 2451 /* 2452 * Maximum error index indent, we will cut 2453 * the event string if it's bigger. 2454 */ 2455 int max_err_idx = 13; 2456 2457 /* 2458 * Let's be specific with the message when 2459 * we have the precise error. 2460 */ 2461 str = "event syntax error: "; 2462 len_str = strlen(str); 2463 max_len = width - len_str; 2464 2465 buf = _buf; 2466 2467 /* We're cutting from the beginning. */ 2468 if (err_idx > max_err_idx) 2469 cut = err_idx - max_err_idx; 2470 2471 strncpy(buf, event + cut, max_len); 2472 2473 /* Mark cut parts with '..' on both sides. */ 2474 if (cut) 2475 buf[0] = buf[1] = '.'; 2476 2477 if ((len_event - cut) > max_len) { 2478 buf[max_len - 1] = buf[max_len - 2] = '.'; 2479 buf[max_len] = 0; 2480 } 2481 2482 idx = len_str + err_idx - cut; 2483 } 2484 2485 fprintf(stderr, "%s'%s'\n", str, buf); 2486 if (idx) { 2487 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str); 2488 if (err_help) 2489 fprintf(stderr, "\n%s\n", err_help); 2490 } 2491 } 2492 2493 void parse_events_error__print(const struct parse_events_error *err, 2494 const char *event) 2495 { 2496 struct parse_events_error_entry *pos; 2497 bool first = true; 2498 2499 list_for_each_entry(pos, &err->list, list) { 2500 if (!first) 2501 fputs("\n", stderr); 2502 __parse_events_error__print(pos->idx, pos->str, pos->help, event); 2503 first = false; 2504 } 2505 } 2506 2507 /* 2508 * In the list of errors err, do any of the error strings (str) contain the 2509 * given needle string? 2510 */ 2511 bool parse_events_error__contains(const struct parse_events_error *err, 2512 const char *needle) 2513 { 2514 struct parse_events_error_entry *pos; 2515 2516 list_for_each_entry(pos, &err->list, list) { 2517 if (strstr(pos->str, needle) != NULL) 2518 return true; 2519 } 2520 return false; 2521 } 2522 2523 #undef MAX_WIDTH 2524 2525 int parse_events_option(const struct option *opt, const char *str, 2526 int unset __maybe_unused) 2527 { 2528 struct parse_events_option_args *args = opt->value; 2529 struct parse_events_error err; 2530 int ret; 2531 2532 parse_events_error__init(&err); 2533 ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err, 2534 /*fake_pmu=*/false, /*warn_if_reordered=*/true, 2535 /*fake_tp=*/false); 2536 2537 if (ret) { 2538 parse_events_error__print(&err, str); 2539 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 2540 } 2541 parse_events_error__exit(&err); 2542 2543 return ret; 2544 } 2545 2546 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset) 2547 { 2548 struct parse_events_option_args *args = opt->value; 2549 int ret; 2550 2551 if (*args->evlistp == NULL) { 2552 *args->evlistp = evlist__new(); 2553 2554 if (*args->evlistp == NULL) { 2555 fprintf(stderr, "Not enough memory to create evlist\n"); 2556 return -1; 2557 } 2558 } 2559 ret = parse_events_option(opt, str, unset); 2560 if (ret) { 2561 evlist__delete(*args->evlistp); 2562 *args->evlistp = NULL; 2563 } 2564 2565 return ret; 2566 } 2567 2568 static int 2569 foreach_evsel_in_last_glob(struct evlist *evlist, 2570 int (*func)(struct evsel *evsel, 2571 const void *arg), 2572 const void *arg) 2573 { 2574 struct evsel *last = NULL; 2575 int err; 2576 2577 /* 2578 * Don't return when list_empty, give func a chance to report 2579 * error when it found last == NULL. 2580 * 2581 * So no need to WARN here, let *func do this. 2582 */ 2583 if (evlist->core.nr_entries > 0) 2584 last = evlist__last(evlist); 2585 2586 do { 2587 err = (*func)(last, arg); 2588 if (err) 2589 return -1; 2590 if (!last) 2591 return 0; 2592 2593 if (last->core.node.prev == &evlist->core.entries) 2594 return 0; 2595 last = list_entry(last->core.node.prev, struct evsel, core.node); 2596 } while (!last->cmdline_group_boundary); 2597 2598 return 0; 2599 } 2600 2601 /* Will a tracepoint filter work for str or should a BPF filter be used? */ 2602 static bool is_possible_tp_filter(const char *str) 2603 { 2604 return strstr(str, "uid") == NULL; 2605 } 2606 2607 static int set_filter(struct evsel *evsel, const void *arg) 2608 { 2609 const char *str = arg; 2610 int nr_addr_filters = 0; 2611 struct perf_pmu *pmu; 2612 2613 if (evsel == NULL) { 2614 fprintf(stderr, 2615 "--filter option should follow a -e tracepoint or HW tracer option\n"); 2616 return -1; 2617 } 2618 2619 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && is_possible_tp_filter(str)) { 2620 if (evsel__append_tp_filter(evsel, str) < 0) { 2621 fprintf(stderr, 2622 "not enough memory to hold filter string\n"); 2623 return -1; 2624 } 2625 2626 return 0; 2627 } 2628 2629 pmu = evsel__find_pmu(evsel); 2630 if (pmu) { 2631 perf_pmu__scan_file(pmu, "nr_addr_filters", 2632 "%d", &nr_addr_filters); 2633 } 2634 if (!nr_addr_filters) 2635 return perf_bpf_filter__parse(&evsel->bpf_filters, str); 2636 2637 if (evsel__append_addr_filter(evsel, str) < 0) { 2638 fprintf(stderr, 2639 "not enough memory to hold filter string\n"); 2640 return -1; 2641 } 2642 2643 return 0; 2644 } 2645 2646 int parse_filter(const struct option *opt, const char *str, 2647 int unset __maybe_unused) 2648 { 2649 struct evlist *evlist = *(struct evlist **)opt->value; 2650 2651 return foreach_evsel_in_last_glob(evlist, set_filter, 2652 (const void *)str); 2653 } 2654 2655 int parse_uid_filter(struct evlist *evlist, uid_t uid) 2656 { 2657 struct option opt = { 2658 .value = &evlist, 2659 }; 2660 char buf[128]; 2661 int ret; 2662 2663 snprintf(buf, sizeof(buf), "uid == %d", uid); 2664 ret = parse_filter(&opt, buf, /*unset=*/0); 2665 if (ret) { 2666 if (use_browser >= 1) { 2667 /* 2668 * Use ui__warning so a pop up appears above the 2669 * underlying BPF error message. 2670 */ 2671 ui__warning("Failed to add UID filtering that uses BPF filtering.\n"); 2672 } else { 2673 fprintf(stderr, "Failed to add UID filtering that uses BPF filtering.\n"); 2674 } 2675 } 2676 return ret; 2677 } 2678 2679 static int add_exclude_perf_filter(struct evsel *evsel, 2680 const void *arg __maybe_unused) 2681 { 2682 char new_filter[64]; 2683 2684 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2685 fprintf(stderr, 2686 "--exclude-perf option should follow a -e tracepoint option\n"); 2687 return -1; 2688 } 2689 2690 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid()); 2691 2692 if (evsel__append_tp_filter(evsel, new_filter) < 0) { 2693 fprintf(stderr, 2694 "not enough memory to hold filter string\n"); 2695 return -1; 2696 } 2697 2698 return 0; 2699 } 2700 2701 int exclude_perf(const struct option *opt, 2702 const char *arg __maybe_unused, 2703 int unset __maybe_unused) 2704 { 2705 struct evlist *evlist = *(struct evlist **)opt->value; 2706 2707 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter, 2708 NULL); 2709 } 2710 2711 int parse_events__is_hardcoded_term(struct parse_events_term *term) 2712 { 2713 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 2714 } 2715 2716 static int new_term(struct parse_events_term **_term, 2717 struct parse_events_term *temp, 2718 char *str, u64 num) 2719 { 2720 struct parse_events_term *term; 2721 2722 term = malloc(sizeof(*term)); 2723 if (!term) 2724 return -ENOMEM; 2725 2726 *term = *temp; 2727 INIT_LIST_HEAD(&term->list); 2728 term->weak = false; 2729 2730 switch (term->type_val) { 2731 case PARSE_EVENTS__TERM_TYPE_NUM: 2732 term->val.num = num; 2733 break; 2734 case PARSE_EVENTS__TERM_TYPE_STR: 2735 term->val.str = str; 2736 break; 2737 default: 2738 free(term); 2739 return -EINVAL; 2740 } 2741 2742 *_term = term; 2743 return 0; 2744 } 2745 2746 int parse_events_term__num(struct parse_events_term **term, 2747 enum parse_events__term_type type_term, 2748 const char *config, u64 num, 2749 bool no_value, 2750 void *loc_term_, void *loc_val_) 2751 { 2752 YYLTYPE *loc_term = loc_term_; 2753 YYLTYPE *loc_val = loc_val_; 2754 2755 struct parse_events_term temp = { 2756 .type_val = PARSE_EVENTS__TERM_TYPE_NUM, 2757 .type_term = type_term, 2758 .config = config ? : strdup(parse_events__term_type_str(type_term)), 2759 .no_value = no_value, 2760 .err_term = loc_term ? loc_term->first_column : 0, 2761 .err_val = loc_val ? loc_val->first_column : 0, 2762 }; 2763 2764 return new_term(term, &temp, /*str=*/NULL, num); 2765 } 2766 2767 int parse_events_term__str(struct parse_events_term **term, 2768 enum parse_events__term_type type_term, 2769 char *config, char *str, 2770 void *loc_term_, void *loc_val_) 2771 { 2772 YYLTYPE *loc_term = loc_term_; 2773 YYLTYPE *loc_val = loc_val_; 2774 2775 struct parse_events_term temp = { 2776 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2777 .type_term = type_term, 2778 .config = config, 2779 .err_term = loc_term ? loc_term->first_column : 0, 2780 .err_val = loc_val ? loc_val->first_column : 0, 2781 }; 2782 2783 return new_term(term, &temp, str, /*num=*/0); 2784 } 2785 2786 int parse_events_term__term(struct parse_events_term **term, 2787 enum parse_events__term_type term_lhs, 2788 enum parse_events__term_type term_rhs, 2789 void *loc_term, void *loc_val) 2790 { 2791 return parse_events_term__str(term, term_lhs, NULL, 2792 strdup(parse_events__term_type_str(term_rhs)), 2793 loc_term, loc_val); 2794 } 2795 2796 int parse_events_term__clone(struct parse_events_term **new, 2797 const struct parse_events_term *term) 2798 { 2799 char *str; 2800 struct parse_events_term temp = *term; 2801 2802 temp.used = false; 2803 if (term->config) { 2804 temp.config = strdup(term->config); 2805 if (!temp.config) 2806 return -ENOMEM; 2807 } 2808 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2809 return new_term(new, &temp, /*str=*/NULL, term->val.num); 2810 2811 str = strdup(term->val.str); 2812 if (!str) { 2813 zfree(&temp.config); 2814 return -ENOMEM; 2815 } 2816 return new_term(new, &temp, str, /*num=*/0); 2817 } 2818 2819 void parse_events_term__delete(struct parse_events_term *term) 2820 { 2821 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) 2822 zfree(&term->val.str); 2823 2824 zfree(&term->config); 2825 free(term); 2826 } 2827 2828 static int parse_events_terms__copy(const struct parse_events_terms *src, 2829 struct parse_events_terms *dest) 2830 { 2831 struct parse_events_term *term; 2832 2833 list_for_each_entry (term, &src->terms, list) { 2834 struct parse_events_term *n; 2835 int ret; 2836 2837 ret = parse_events_term__clone(&n, term); 2838 if (ret) 2839 return ret; 2840 2841 list_add_tail(&n->list, &dest->terms); 2842 } 2843 return 0; 2844 } 2845 2846 void parse_events_terms__init(struct parse_events_terms *terms) 2847 { 2848 INIT_LIST_HEAD(&terms->terms); 2849 } 2850 2851 void parse_events_terms__exit(struct parse_events_terms *terms) 2852 { 2853 struct parse_events_term *term, *h; 2854 2855 list_for_each_entry_safe(term, h, &terms->terms, list) { 2856 list_del_init(&term->list); 2857 parse_events_term__delete(term); 2858 } 2859 } 2860 2861 void parse_events_terms__delete(struct parse_events_terms *terms) 2862 { 2863 if (!terms) 2864 return; 2865 parse_events_terms__exit(terms); 2866 free(terms); 2867 } 2868 2869 int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb) 2870 { 2871 struct parse_events_term *term; 2872 bool first = true; 2873 2874 if (!terms) 2875 return 0; 2876 2877 list_for_each_entry(term, &terms->terms, list) { 2878 int ret; 2879 2880 if (!first) { 2881 ret = strbuf_addch(sb, ','); 2882 if (ret < 0) 2883 return ret; 2884 } 2885 first = false; 2886 2887 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2888 if (term->no_value) { 2889 assert(term->val.num == 1); 2890 ret = strbuf_addf(sb, "%s", term->config); 2891 } else 2892 ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num); 2893 else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) { 2894 if (term->config) { 2895 ret = strbuf_addf(sb, "%s=", term->config); 2896 if (ret < 0) 2897 return ret; 2898 } else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) { 2899 ret = strbuf_addf(sb, "%s=", 2900 parse_events__term_type_str(term->type_term)); 2901 if (ret < 0) 2902 return ret; 2903 } 2904 assert(!term->no_value); 2905 ret = strbuf_addf(sb, "%s", term->val.str); 2906 } 2907 if (ret < 0) 2908 return ret; 2909 } 2910 return 0; 2911 } 2912 2913 static void config_terms_list(char *buf, size_t buf_sz) 2914 { 2915 int i; 2916 bool first = true; 2917 2918 buf[0] = '\0'; 2919 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) { 2920 const char *name = parse_events__term_type_str(i); 2921 2922 if (!config_term_avail(i, NULL)) 2923 continue; 2924 if (!name) 2925 continue; 2926 if (name[0] == '<') 2927 continue; 2928 2929 if (strlen(buf) + strlen(name) + 2 >= buf_sz) 2930 return; 2931 2932 if (!first) 2933 strcat(buf, ","); 2934 else 2935 first = false; 2936 strcat(buf, name); 2937 } 2938 } 2939 2940 /* 2941 * Return string contains valid config terms of an event. 2942 * @additional_terms: For terms such as PMU sysfs terms. 2943 */ 2944 char *parse_events_formats_error_string(char *additional_terms) 2945 { 2946 char *str; 2947 /* "no-overwrite" is the longest name */ 2948 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR * 2949 (sizeof("no-overwrite") - 1)]; 2950 2951 config_terms_list(static_terms, sizeof(static_terms)); 2952 /* valid terms */ 2953 if (additional_terms) { 2954 if (asprintf(&str, "valid terms: %s,%s", 2955 additional_terms, static_terms) < 0) 2956 goto fail; 2957 } else { 2958 if (asprintf(&str, "valid terms: %s", static_terms) < 0) 2959 goto fail; 2960 } 2961 return str; 2962 2963 fail: 2964 return NULL; 2965 } 2966