1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/hw_breakpoint.h> 3 #include <linux/err.h> 4 #include <linux/list_sort.h> 5 #include <linux/zalloc.h> 6 #include <dirent.h> 7 #include <errno.h> 8 #include <sys/ioctl.h> 9 #include <sys/param.h> 10 #include "cpumap.h" 11 #include "term.h" 12 #include "env.h" 13 #include "evlist.h" 14 #include "evsel.h" 15 #include <subcmd/parse-options.h> 16 #include "parse-events.h" 17 #include "string2.h" 18 #include "strbuf.h" 19 #include "debug.h" 20 #include <api/fs/tracing_path.h> 21 #include <api/io_dir.h> 22 #include <perf/cpumap.h> 23 #include <util/parse-events-bison.h> 24 #include <util/parse-events-flex.h> 25 #include "pmu.h" 26 #include "pmus.h" 27 #include "asm/bug.h" 28 #include "util/parse-branch-options.h" 29 #include "util/evsel_config.h" 30 #include "util/event.h" 31 #include "util/bpf-filter.h" 32 #include "util/stat.h" 33 #include "util/util.h" 34 #include "tracepoint.h" 35 36 #define MAX_NAME_LEN 100 37 38 static int get_config_terms(const struct parse_events_terms *head_config, 39 struct list_head *head_terms); 40 static int parse_events_terms__copy(const struct parse_events_terms *src, 41 struct parse_events_terms *dest); 42 43 const struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { 44 [PERF_COUNT_HW_CPU_CYCLES] = { 45 .symbol = "cpu-cycles", 46 .alias = "cycles", 47 }, 48 [PERF_COUNT_HW_INSTRUCTIONS] = { 49 .symbol = "instructions", 50 .alias = "", 51 }, 52 [PERF_COUNT_HW_CACHE_REFERENCES] = { 53 .symbol = "cache-references", 54 .alias = "", 55 }, 56 [PERF_COUNT_HW_CACHE_MISSES] = { 57 .symbol = "cache-misses", 58 .alias = "", 59 }, 60 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 61 .symbol = "branch-instructions", 62 .alias = "branches", 63 }, 64 [PERF_COUNT_HW_BRANCH_MISSES] = { 65 .symbol = "branch-misses", 66 .alias = "", 67 }, 68 [PERF_COUNT_HW_BUS_CYCLES] = { 69 .symbol = "bus-cycles", 70 .alias = "", 71 }, 72 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = { 73 .symbol = "stalled-cycles-frontend", 74 .alias = "idle-cycles-frontend", 75 }, 76 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = { 77 .symbol = "stalled-cycles-backend", 78 .alias = "idle-cycles-backend", 79 }, 80 [PERF_COUNT_HW_REF_CPU_CYCLES] = { 81 .symbol = "ref-cycles", 82 .alias = "", 83 }, 84 }; 85 86 const struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { 87 [PERF_COUNT_SW_CPU_CLOCK] = { 88 .symbol = "cpu-clock", 89 .alias = "", 90 }, 91 [PERF_COUNT_SW_TASK_CLOCK] = { 92 .symbol = "task-clock", 93 .alias = "", 94 }, 95 [PERF_COUNT_SW_PAGE_FAULTS] = { 96 .symbol = "page-faults", 97 .alias = "faults", 98 }, 99 [PERF_COUNT_SW_CONTEXT_SWITCHES] = { 100 .symbol = "context-switches", 101 .alias = "cs", 102 }, 103 [PERF_COUNT_SW_CPU_MIGRATIONS] = { 104 .symbol = "cpu-migrations", 105 .alias = "migrations", 106 }, 107 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = { 108 .symbol = "minor-faults", 109 .alias = "", 110 }, 111 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = { 112 .symbol = "major-faults", 113 .alias = "", 114 }, 115 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = { 116 .symbol = "alignment-faults", 117 .alias = "", 118 }, 119 [PERF_COUNT_SW_EMULATION_FAULTS] = { 120 .symbol = "emulation-faults", 121 .alias = "", 122 }, 123 [PERF_COUNT_SW_DUMMY] = { 124 .symbol = "dummy", 125 .alias = "", 126 }, 127 [PERF_COUNT_SW_BPF_OUTPUT] = { 128 .symbol = "bpf-output", 129 .alias = "", 130 }, 131 [PERF_COUNT_SW_CGROUP_SWITCHES] = { 132 .symbol = "cgroup-switches", 133 .alias = "", 134 }, 135 }; 136 137 const char *event_type(int type) 138 { 139 switch (type) { 140 case PERF_TYPE_HARDWARE: 141 return "hardware"; 142 143 case PERF_TYPE_SOFTWARE: 144 return "software"; 145 146 case PERF_TYPE_TRACEPOINT: 147 return "tracepoint"; 148 149 case PERF_TYPE_HW_CACHE: 150 return "hardware-cache"; 151 152 default: 153 break; 154 } 155 156 return "unknown"; 157 } 158 159 static char *get_config_str(const struct parse_events_terms *head_terms, 160 enum parse_events__term_type type_term) 161 { 162 struct parse_events_term *term; 163 164 if (!head_terms) 165 return NULL; 166 167 list_for_each_entry(term, &head_terms->terms, list) 168 if (term->type_term == type_term) 169 return term->val.str; 170 171 return NULL; 172 } 173 174 static char *get_config_metric_id(const struct parse_events_terms *head_terms) 175 { 176 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID); 177 } 178 179 static char *get_config_name(const struct parse_events_terms *head_terms) 180 { 181 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME); 182 } 183 184 static struct perf_cpu_map *get_config_cpu(const struct parse_events_terms *head_terms) 185 { 186 struct parse_events_term *term; 187 struct perf_cpu_map *cpus = NULL; 188 189 if (!head_terms) 190 return NULL; 191 192 list_for_each_entry(term, &head_terms->terms, list) { 193 if (term->type_term == PARSE_EVENTS__TERM_TYPE_CPU) { 194 struct perf_cpu_map *cpu = perf_cpu_map__new_int(term->val.num); 195 196 perf_cpu_map__merge(&cpus, cpu); 197 perf_cpu_map__put(cpu); 198 } 199 } 200 201 return cpus; 202 } 203 204 /** 205 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that 206 * matches the raw's string value. If the string value matches an 207 * event then change the term to be an event, if not then change it to 208 * be a config term. For example, "read" may be an event of the PMU or 209 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of 210 * the event can be determined and we don't need to scan all PMUs 211 * ahead-of-time. 212 * @config_terms: the list of terms that may contain a raw term. 213 * @pmu: the PMU to scan for events from. 214 */ 215 static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu) 216 { 217 struct parse_events_term *term; 218 219 list_for_each_entry(term, &config_terms->terms, list) { 220 u64 num; 221 222 if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW) 223 continue; 224 225 if (perf_pmu__have_event(pmu, term->val.str)) { 226 zfree(&term->config); 227 term->config = term->val.str; 228 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 229 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 230 term->val.num = 1; 231 term->no_value = true; 232 continue; 233 } 234 235 zfree(&term->config); 236 term->config = strdup("config"); 237 errno = 0; 238 num = strtoull(term->val.str + 1, NULL, 16); 239 assert(errno == 0); 240 free(term->val.str); 241 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 242 term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG; 243 term->val.num = num; 244 term->no_value = false; 245 } 246 } 247 248 static struct evsel * 249 __add_event(struct list_head *list, int *idx, 250 struct perf_event_attr *attr, 251 bool init_attr, 252 const char *name, const char *metric_id, struct perf_pmu *pmu, 253 struct list_head *config_terms, bool auto_merge_stats, 254 struct perf_cpu_map *cpu_list, u64 alternate_hw_config) 255 { 256 struct evsel *evsel; 257 bool is_pmu_core; 258 struct perf_cpu_map *cpus; 259 260 if (pmu) { 261 is_pmu_core = pmu->is_core; 262 cpus = perf_cpu_map__get(perf_cpu_map__is_empty(cpu_list) ? pmu->cpus : cpu_list); 263 perf_pmu__warn_invalid_formats(pmu); 264 if (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX) { 265 perf_pmu__warn_invalid_config(pmu, attr->config, name, 266 PERF_PMU_FORMAT_VALUE_CONFIG, "config"); 267 perf_pmu__warn_invalid_config(pmu, attr->config1, name, 268 PERF_PMU_FORMAT_VALUE_CONFIG1, "config1"); 269 perf_pmu__warn_invalid_config(pmu, attr->config2, name, 270 PERF_PMU_FORMAT_VALUE_CONFIG2, "config2"); 271 perf_pmu__warn_invalid_config(pmu, attr->config3, name, 272 PERF_PMU_FORMAT_VALUE_CONFIG3, "config3"); 273 } 274 } else { 275 is_pmu_core = (attr->type == PERF_TYPE_HARDWARE || 276 attr->type == PERF_TYPE_HW_CACHE); 277 if (perf_cpu_map__is_empty(cpu_list)) 278 cpus = is_pmu_core ? perf_cpu_map__new_online_cpus() : NULL; 279 else 280 cpus = perf_cpu_map__get(cpu_list); 281 } 282 if (init_attr) 283 event_attr_init(attr); 284 285 evsel = evsel__new_idx(attr, *idx); 286 if (!evsel) { 287 perf_cpu_map__put(cpus); 288 return NULL; 289 } 290 291 (*idx)++; 292 evsel->core.cpus = cpus; 293 evsel->core.own_cpus = perf_cpu_map__get(cpus); 294 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false; 295 evsel->core.is_pmu_core = is_pmu_core; 296 evsel->auto_merge_stats = auto_merge_stats; 297 evsel->pmu = pmu; 298 evsel->alternate_hw_config = alternate_hw_config; 299 300 if (name) 301 evsel->name = strdup(name); 302 303 if (metric_id) 304 evsel->metric_id = strdup(metric_id); 305 306 if (config_terms) 307 list_splice_init(config_terms, &evsel->config_terms); 308 309 if (list) 310 list_add_tail(&evsel->core.node, list); 311 312 return evsel; 313 } 314 315 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr, 316 const char *name, const char *metric_id, 317 struct perf_pmu *pmu) 318 { 319 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name, 320 metric_id, pmu, /*config_terms=*/NULL, 321 /*auto_merge_stats=*/false, /*cpu_list=*/NULL, 322 /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 323 } 324 325 static int add_event(struct list_head *list, int *idx, 326 struct perf_event_attr *attr, const char *name, 327 const char *metric_id, struct list_head *config_terms, 328 u64 alternate_hw_config) 329 { 330 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id, 331 /*pmu=*/NULL, config_terms, 332 /*auto_merge_stats=*/false, /*cpu_list=*/NULL, 333 alternate_hw_config) ? 0 : -ENOMEM; 334 } 335 336 /** 337 * parse_aliases - search names for entries beginning or equalling str ignoring 338 * case. If mutliple entries in names match str then the longest 339 * is chosen. 340 * @str: The needle to look for. 341 * @names: The haystack to search. 342 * @size: The size of the haystack. 343 * @longest: Out argument giving the length of the matching entry. 344 */ 345 static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size, 346 int *longest) 347 { 348 *longest = -1; 349 for (int i = 0; i < size; i++) { 350 for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) { 351 int n = strlen(names[i][j]); 352 353 if (n > *longest && !strncasecmp(str, names[i][j], n)) 354 *longest = n; 355 } 356 if (*longest > 0) 357 return i; 358 } 359 360 return -1; 361 } 362 363 typedef int config_term_func_t(struct perf_event_attr *attr, 364 struct parse_events_term *term, 365 struct parse_events_error *err); 366 static int config_term_common(struct perf_event_attr *attr, 367 struct parse_events_term *term, 368 struct parse_events_error *err); 369 static int config_attr(struct perf_event_attr *attr, 370 const struct parse_events_terms *head, 371 struct parse_events_error *err, 372 config_term_func_t config_term); 373 374 /** 375 * parse_events__decode_legacy_cache - Search name for the legacy cache event 376 * name composed of 1, 2 or 3 hyphen 377 * separated sections. The first section is 378 * the cache type while the others are the 379 * optional op and optional result. To make 380 * life hard the names in the table also 381 * contain hyphens and the longest name 382 * should always be selected. 383 */ 384 int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config) 385 { 386 int len, cache_type = -1, cache_op = -1, cache_result = -1; 387 const char *name_end = &name[strlen(name) + 1]; 388 const char *str = name; 389 390 cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len); 391 if (cache_type == -1) 392 return -EINVAL; 393 str += len + 1; 394 395 if (str < name_end) { 396 cache_op = parse_aliases(str, evsel__hw_cache_op, 397 PERF_COUNT_HW_CACHE_OP_MAX, &len); 398 if (cache_op >= 0) { 399 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 400 return -EINVAL; 401 str += len + 1; 402 } else { 403 cache_result = parse_aliases(str, evsel__hw_cache_result, 404 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 405 if (cache_result >= 0) 406 str += len + 1; 407 } 408 } 409 if (str < name_end) { 410 if (cache_op < 0) { 411 cache_op = parse_aliases(str, evsel__hw_cache_op, 412 PERF_COUNT_HW_CACHE_OP_MAX, &len); 413 if (cache_op >= 0) { 414 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 415 return -EINVAL; 416 } 417 } else if (cache_result < 0) { 418 cache_result = parse_aliases(str, evsel__hw_cache_result, 419 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 420 } 421 } 422 423 /* 424 * Fall back to reads: 425 */ 426 if (cache_op == -1) 427 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 428 429 /* 430 * Fall back to accesses: 431 */ 432 if (cache_result == -1) 433 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 434 435 *config = cache_type | (cache_op << 8) | (cache_result << 16); 436 if (perf_pmus__supports_extended_type()) 437 *config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT; 438 return 0; 439 } 440 441 /** 442 * parse_events__filter_pmu - returns false if a wildcard PMU should be 443 * considered, true if it should be filtered. 444 */ 445 bool parse_events__filter_pmu(const struct parse_events_state *parse_state, 446 const struct perf_pmu *pmu) 447 { 448 if (parse_state->pmu_filter == NULL) 449 return false; 450 451 return strcmp(parse_state->pmu_filter, pmu->name) != 0; 452 } 453 454 static int parse_events_add_pmu(struct parse_events_state *parse_state, 455 struct list_head *list, struct perf_pmu *pmu, 456 const struct parse_events_terms *const_parsed_terms, 457 bool auto_merge_stats, u64 alternate_hw_config); 458 459 int parse_events_add_cache(struct list_head *list, int *idx, const char *name, 460 struct parse_events_state *parse_state, 461 struct parse_events_terms *parsed_terms) 462 { 463 struct perf_pmu *pmu = NULL; 464 bool found_supported = false; 465 const char *config_name = get_config_name(parsed_terms); 466 const char *metric_id = get_config_metric_id(parsed_terms); 467 struct perf_cpu_map *cpus = get_config_cpu(parsed_terms); 468 int ret = 0; 469 470 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 471 LIST_HEAD(config_terms); 472 struct perf_event_attr attr; 473 474 if (parse_events__filter_pmu(parse_state, pmu)) 475 continue; 476 477 if (perf_pmu__have_event(pmu, name)) { 478 /* 479 * The PMU has the event so add as not a legacy cache 480 * event. 481 */ 482 ret = parse_events_add_pmu(parse_state, list, pmu, 483 parsed_terms, 484 perf_pmu__auto_merge_stats(pmu), 485 /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 486 if (ret) 487 goto out_err; 488 continue; 489 } 490 491 if (!pmu->is_core) { 492 /* Legacy cache events are only supported by core PMUs. */ 493 continue; 494 } 495 496 memset(&attr, 0, sizeof(attr)); 497 attr.type = PERF_TYPE_HW_CACHE; 498 499 ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config); 500 if (ret) 501 return ret; 502 503 found_supported = true; 504 505 if (parsed_terms) { 506 if (config_attr(&attr, parsed_terms, parse_state->error, 507 config_term_common)) { 508 ret = -EINVAL; 509 goto out_err; 510 } 511 if (get_config_terms(parsed_terms, &config_terms)) { 512 ret = -ENOMEM; 513 goto out_err; 514 } 515 } 516 517 if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name, 518 metric_id, pmu, &config_terms, /*auto_merge_stats=*/false, 519 cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) == NULL) 520 ret = -ENOMEM; 521 522 free_config_terms(&config_terms); 523 if (ret) 524 goto out_err; 525 } 526 out_err: 527 perf_cpu_map__put(cpus); 528 return found_supported ? 0 : -EINVAL; 529 } 530 531 static void tracepoint_error(struct parse_events_error *e, int err, 532 const char *sys, const char *name, int column) 533 { 534 const char *str; 535 char help[BUFSIZ]; 536 537 if (!e) 538 return; 539 540 /* 541 * We get error directly from syscall errno ( > 0), 542 * or from encoded pointer's error ( < 0). 543 */ 544 err = abs(err); 545 546 switch (err) { 547 case EACCES: 548 str = "can't access trace events"; 549 break; 550 case ENOENT: 551 str = "unknown tracepoint"; 552 break; 553 default: 554 str = "failed to add tracepoint"; 555 break; 556 } 557 558 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name); 559 parse_events_error__handle(e, column, strdup(str), strdup(help)); 560 } 561 562 static int add_tracepoint(struct parse_events_state *parse_state, 563 struct list_head *list, 564 const char *sys_name, const char *evt_name, 565 struct parse_events_error *err, 566 struct parse_events_terms *head_config, void *loc_) 567 { 568 YYLTYPE *loc = loc_; 569 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++, 570 !parse_state->fake_tp); 571 572 if (IS_ERR(evsel)) { 573 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column); 574 return PTR_ERR(evsel); 575 } 576 577 if (head_config) { 578 LIST_HEAD(config_terms); 579 580 if (get_config_terms(head_config, &config_terms)) 581 return -ENOMEM; 582 list_splice(&config_terms, &evsel->config_terms); 583 } 584 585 list_add_tail(&evsel->core.node, list); 586 return 0; 587 } 588 589 static int add_tracepoint_multi_event(struct parse_events_state *parse_state, 590 struct list_head *list, 591 const char *sys_name, const char *evt_name, 592 struct parse_events_error *err, 593 struct parse_events_terms *head_config, YYLTYPE *loc) 594 { 595 char *evt_path; 596 struct io_dirent64 *evt_ent; 597 struct io_dir evt_dir; 598 int ret = 0, found = 0; 599 600 evt_path = get_events_file(sys_name); 601 if (!evt_path) { 602 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 603 return -1; 604 } 605 io_dir__init(&evt_dir, open(evt_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY)); 606 if (evt_dir.dirfd < 0) { 607 put_events_file(evt_path); 608 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 609 return -1; 610 } 611 612 while (!ret && (evt_ent = io_dir__readdir(&evt_dir))) { 613 if (!strcmp(evt_ent->d_name, ".") 614 || !strcmp(evt_ent->d_name, "..") 615 || !strcmp(evt_ent->d_name, "enable") 616 || !strcmp(evt_ent->d_name, "filter")) 617 continue; 618 619 if (!strglobmatch(evt_ent->d_name, evt_name)) 620 continue; 621 622 found++; 623 624 ret = add_tracepoint(parse_state, list, sys_name, evt_ent->d_name, 625 err, head_config, loc); 626 } 627 628 if (!found) { 629 tracepoint_error(err, ENOENT, sys_name, evt_name, loc->first_column); 630 ret = -1; 631 } 632 633 put_events_file(evt_path); 634 close(evt_dir.dirfd); 635 return ret; 636 } 637 638 static int add_tracepoint_event(struct parse_events_state *parse_state, 639 struct list_head *list, 640 const char *sys_name, const char *evt_name, 641 struct parse_events_error *err, 642 struct parse_events_terms *head_config, YYLTYPE *loc) 643 { 644 return strpbrk(evt_name, "*?") ? 645 add_tracepoint_multi_event(parse_state, list, sys_name, evt_name, 646 err, head_config, loc) : 647 add_tracepoint(parse_state, list, sys_name, evt_name, 648 err, head_config, loc); 649 } 650 651 static int add_tracepoint_multi_sys(struct parse_events_state *parse_state, 652 struct list_head *list, 653 const char *sys_name, const char *evt_name, 654 struct parse_events_error *err, 655 struct parse_events_terms *head_config, YYLTYPE *loc) 656 { 657 struct io_dirent64 *events_ent; 658 struct io_dir events_dir; 659 int ret = 0; 660 char *events_dir_path = get_tracing_file("events"); 661 662 if (!events_dir_path) { 663 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 664 return -1; 665 } 666 io_dir__init(&events_dir, open(events_dir_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY)); 667 put_events_file(events_dir_path); 668 if (events_dir.dirfd < 0) { 669 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 670 return -1; 671 } 672 673 while (!ret && (events_ent = io_dir__readdir(&events_dir))) { 674 if (!strcmp(events_ent->d_name, ".") 675 || !strcmp(events_ent->d_name, "..") 676 || !strcmp(events_ent->d_name, "enable") 677 || !strcmp(events_ent->d_name, "header_event") 678 || !strcmp(events_ent->d_name, "header_page")) 679 continue; 680 681 if (!strglobmatch(events_ent->d_name, sys_name)) 682 continue; 683 684 ret = add_tracepoint_event(parse_state, list, events_ent->d_name, 685 evt_name, err, head_config, loc); 686 } 687 close(events_dir.dirfd); 688 return ret; 689 } 690 691 size_t default_breakpoint_len(void) 692 { 693 #if defined(__i386__) 694 static int len; 695 696 if (len == 0) { 697 struct perf_env env = {}; 698 699 perf_env__init(&env); 700 len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long); 701 perf_env__exit(&env); 702 } 703 return len; 704 #elif defined(__aarch64__) 705 return 4; 706 #else 707 return sizeof(long); 708 #endif 709 } 710 711 static int 712 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 713 { 714 int i; 715 716 for (i = 0; i < 3; i++) { 717 if (!type || !type[i]) 718 break; 719 720 #define CHECK_SET_TYPE(bit) \ 721 do { \ 722 if (attr->bp_type & bit) \ 723 return -EINVAL; \ 724 else \ 725 attr->bp_type |= bit; \ 726 } while (0) 727 728 switch (type[i]) { 729 case 'r': 730 CHECK_SET_TYPE(HW_BREAKPOINT_R); 731 break; 732 case 'w': 733 CHECK_SET_TYPE(HW_BREAKPOINT_W); 734 break; 735 case 'x': 736 CHECK_SET_TYPE(HW_BREAKPOINT_X); 737 break; 738 default: 739 return -EINVAL; 740 } 741 } 742 743 #undef CHECK_SET_TYPE 744 745 if (!attr->bp_type) /* Default */ 746 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 747 748 return 0; 749 } 750 751 int parse_events_add_breakpoint(struct parse_events_state *parse_state, 752 struct list_head *list, 753 u64 addr, char *type, u64 len, 754 struct parse_events_terms *head_config) 755 { 756 struct perf_event_attr attr; 757 LIST_HEAD(config_terms); 758 const char *name; 759 760 memset(&attr, 0, sizeof(attr)); 761 attr.bp_addr = addr; 762 763 if (parse_breakpoint_type(type, &attr)) 764 return -EINVAL; 765 766 /* Provide some defaults if len is not specified */ 767 if (!len) { 768 if (attr.bp_type == HW_BREAKPOINT_X) 769 len = default_breakpoint_len(); 770 else 771 len = HW_BREAKPOINT_LEN_4; 772 } 773 774 attr.bp_len = len; 775 776 attr.type = PERF_TYPE_BREAKPOINT; 777 attr.sample_period = 1; 778 779 if (head_config) { 780 if (config_attr(&attr, head_config, parse_state->error, 781 config_term_common)) 782 return -EINVAL; 783 784 if (get_config_terms(head_config, &config_terms)) 785 return -ENOMEM; 786 } 787 788 name = get_config_name(head_config); 789 790 return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL, 791 &config_terms, /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 792 } 793 794 static int check_type_val(struct parse_events_term *term, 795 struct parse_events_error *err, 796 enum parse_events__term_val_type type) 797 { 798 if (type == term->type_val) 799 return 0; 800 801 if (err) { 802 parse_events_error__handle(err, term->err_val, 803 type == PARSE_EVENTS__TERM_TYPE_NUM 804 ? strdup("expected numeric value") 805 : strdup("expected string value"), 806 NULL); 807 } 808 return -EINVAL; 809 } 810 811 static bool config_term_shrinked; 812 813 const char *parse_events__term_type_str(enum parse_events__term_type term_type) 814 { 815 /* 816 * Update according to parse-events.l 817 */ 818 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { 819 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>", 820 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config", 821 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1", 822 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2", 823 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3", 824 [PARSE_EVENTS__TERM_TYPE_NAME] = "name", 825 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period", 826 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq", 827 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type", 828 [PARSE_EVENTS__TERM_TYPE_TIME] = "time", 829 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph", 830 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size", 831 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", 832 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", 833 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack", 834 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr", 835 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite", 836 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite", 837 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config", 838 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore", 839 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output", 840 [PARSE_EVENTS__TERM_TYPE_AUX_ACTION] = "aux-action", 841 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size", 842 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id", 843 [PARSE_EVENTS__TERM_TYPE_RAW] = "raw", 844 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache", 845 [PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware", 846 [PARSE_EVENTS__TERM_TYPE_CPU] = "cpu", 847 }; 848 if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR) 849 return "unknown term"; 850 851 return config_term_names[term_type]; 852 } 853 854 static bool 855 config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err) 856 { 857 char *err_str; 858 859 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) { 860 parse_events_error__handle(err, -1, 861 strdup("Invalid term_type"), NULL); 862 return false; 863 } 864 if (!config_term_shrinked) 865 return true; 866 867 switch (term_type) { 868 case PARSE_EVENTS__TERM_TYPE_CONFIG: 869 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 870 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 871 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 872 case PARSE_EVENTS__TERM_TYPE_NAME: 873 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 874 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 875 case PARSE_EVENTS__TERM_TYPE_PERCORE: 876 case PARSE_EVENTS__TERM_TYPE_CPU: 877 return true; 878 case PARSE_EVENTS__TERM_TYPE_USER: 879 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 880 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 881 case PARSE_EVENTS__TERM_TYPE_TIME: 882 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 883 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 884 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 885 case PARSE_EVENTS__TERM_TYPE_INHERIT: 886 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 887 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 888 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 889 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 890 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 891 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 892 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 893 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 894 case PARSE_EVENTS__TERM_TYPE_RAW: 895 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 896 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 897 default: 898 if (!err) 899 return false; 900 901 /* term_type is validated so indexing is safe */ 902 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'", 903 parse_events__term_type_str(term_type)) >= 0) 904 parse_events_error__handle(err, -1, err_str, NULL); 905 return false; 906 } 907 } 908 909 void parse_events__shrink_config_terms(void) 910 { 911 config_term_shrinked = true; 912 } 913 914 static int config_term_common(struct perf_event_attr *attr, 915 struct parse_events_term *term, 916 struct parse_events_error *err) 917 { 918 #define CHECK_TYPE_VAL(type) \ 919 do { \ 920 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \ 921 return -EINVAL; \ 922 } while (0) 923 924 switch (term->type_term) { 925 case PARSE_EVENTS__TERM_TYPE_CONFIG: 926 CHECK_TYPE_VAL(NUM); 927 attr->config = term->val.num; 928 break; 929 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 930 CHECK_TYPE_VAL(NUM); 931 attr->config1 = term->val.num; 932 break; 933 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 934 CHECK_TYPE_VAL(NUM); 935 attr->config2 = term->val.num; 936 break; 937 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 938 CHECK_TYPE_VAL(NUM); 939 attr->config3 = term->val.num; 940 break; 941 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 942 CHECK_TYPE_VAL(NUM); 943 break; 944 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 945 CHECK_TYPE_VAL(NUM); 946 break; 947 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 948 CHECK_TYPE_VAL(STR); 949 if (strcmp(term->val.str, "no") && 950 parse_branch_str(term->val.str, 951 &attr->branch_sample_type)) { 952 parse_events_error__handle(err, term->err_val, 953 strdup("invalid branch sample type"), 954 NULL); 955 return -EINVAL; 956 } 957 break; 958 case PARSE_EVENTS__TERM_TYPE_TIME: 959 CHECK_TYPE_VAL(NUM); 960 if (term->val.num > 1) { 961 parse_events_error__handle(err, term->err_val, 962 strdup("expected 0 or 1"), 963 NULL); 964 return -EINVAL; 965 } 966 break; 967 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 968 CHECK_TYPE_VAL(STR); 969 break; 970 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 971 CHECK_TYPE_VAL(NUM); 972 break; 973 case PARSE_EVENTS__TERM_TYPE_INHERIT: 974 CHECK_TYPE_VAL(NUM); 975 break; 976 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 977 CHECK_TYPE_VAL(NUM); 978 break; 979 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 980 CHECK_TYPE_VAL(NUM); 981 break; 982 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 983 CHECK_TYPE_VAL(NUM); 984 break; 985 case PARSE_EVENTS__TERM_TYPE_NAME: 986 CHECK_TYPE_VAL(STR); 987 break; 988 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 989 CHECK_TYPE_VAL(STR); 990 break; 991 case PARSE_EVENTS__TERM_TYPE_RAW: 992 CHECK_TYPE_VAL(STR); 993 break; 994 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 995 CHECK_TYPE_VAL(NUM); 996 break; 997 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 998 CHECK_TYPE_VAL(NUM); 999 break; 1000 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1001 CHECK_TYPE_VAL(NUM); 1002 if ((unsigned int)term->val.num > 1) { 1003 parse_events_error__handle(err, term->err_val, 1004 strdup("expected 0 or 1"), 1005 NULL); 1006 return -EINVAL; 1007 } 1008 break; 1009 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1010 CHECK_TYPE_VAL(NUM); 1011 break; 1012 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1013 CHECK_TYPE_VAL(STR); 1014 break; 1015 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1016 CHECK_TYPE_VAL(NUM); 1017 if (term->val.num > UINT_MAX) { 1018 parse_events_error__handle(err, term->err_val, 1019 strdup("too big"), 1020 NULL); 1021 return -EINVAL; 1022 } 1023 break; 1024 case PARSE_EVENTS__TERM_TYPE_CPU: 1025 CHECK_TYPE_VAL(NUM); 1026 if (term->val.num >= (u64)cpu__max_present_cpu().cpu) { 1027 parse_events_error__handle(err, term->err_val, 1028 strdup("too big"), 1029 NULL); 1030 return -EINVAL; 1031 } 1032 break; 1033 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1034 case PARSE_EVENTS__TERM_TYPE_USER: 1035 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1036 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1037 default: 1038 parse_events_error__handle(err, term->err_term, 1039 strdup(parse_events__term_type_str(term->type_term)), 1040 parse_events_formats_error_string(NULL)); 1041 return -EINVAL; 1042 } 1043 1044 /* 1045 * Check term availability after basic checking so 1046 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered. 1047 * 1048 * If check availability at the entry of this function, 1049 * user will see "'<sysfs term>' is not usable in 'perf stat'" 1050 * if an invalid config term is provided for legacy events 1051 * (for example, instructions/badterm/...), which is confusing. 1052 */ 1053 if (!config_term_avail(term->type_term, err)) 1054 return -EINVAL; 1055 return 0; 1056 #undef CHECK_TYPE_VAL 1057 } 1058 1059 static int config_term_pmu(struct perf_event_attr *attr, 1060 struct parse_events_term *term, 1061 struct parse_events_error *err) 1062 { 1063 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) { 1064 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); 1065 1066 if (!pmu) { 1067 char *err_str; 1068 1069 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0) 1070 parse_events_error__handle(err, term->err_term, 1071 err_str, /*help=*/NULL); 1072 return -EINVAL; 1073 } 1074 /* 1075 * Rewrite the PMU event to a legacy cache one unless the PMU 1076 * doesn't support legacy cache events or the event is present 1077 * within the PMU. 1078 */ 1079 if (perf_pmu__supports_legacy_cache(pmu) && 1080 !perf_pmu__have_event(pmu, term->config)) { 1081 attr->type = PERF_TYPE_HW_CACHE; 1082 return parse_events__decode_legacy_cache(term->config, pmu->type, 1083 &attr->config); 1084 } else { 1085 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 1086 term->no_value = true; 1087 } 1088 } 1089 if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) { 1090 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); 1091 1092 if (!pmu) { 1093 char *err_str; 1094 1095 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0) 1096 parse_events_error__handle(err, term->err_term, 1097 err_str, /*help=*/NULL); 1098 return -EINVAL; 1099 } 1100 /* 1101 * If the PMU has a sysfs or json event prefer it over 1102 * legacy. ARM requires this. 1103 */ 1104 if (perf_pmu__have_event(pmu, term->config)) { 1105 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 1106 term->no_value = true; 1107 term->alternate_hw_config = true; 1108 } else { 1109 attr->type = PERF_TYPE_HARDWARE; 1110 attr->config = term->val.num; 1111 if (perf_pmus__supports_extended_type()) 1112 attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT; 1113 } 1114 return 0; 1115 } 1116 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || 1117 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) { 1118 /* 1119 * Always succeed for sysfs terms, as we dont know 1120 * at this point what type they need to have. 1121 */ 1122 return 0; 1123 } 1124 return config_term_common(attr, term, err); 1125 } 1126 1127 static int config_term_tracepoint(struct perf_event_attr *attr, 1128 struct parse_events_term *term, 1129 struct parse_events_error *err) 1130 { 1131 switch (term->type_term) { 1132 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1133 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1134 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1135 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1136 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1137 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1138 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1139 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1140 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1141 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1142 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1143 return config_term_common(attr, term, err); 1144 case PARSE_EVENTS__TERM_TYPE_USER: 1145 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1146 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1147 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1148 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1149 case PARSE_EVENTS__TERM_TYPE_NAME: 1150 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1151 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1152 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1153 case PARSE_EVENTS__TERM_TYPE_TIME: 1154 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1155 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1156 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1157 case PARSE_EVENTS__TERM_TYPE_RAW: 1158 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1159 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1160 case PARSE_EVENTS__TERM_TYPE_CPU: 1161 default: 1162 if (err) { 1163 parse_events_error__handle(err, term->err_term, 1164 strdup(parse_events__term_type_str(term->type_term)), 1165 strdup("valid terms: call-graph,stack-size\n") 1166 ); 1167 } 1168 return -EINVAL; 1169 } 1170 1171 return 0; 1172 } 1173 1174 static int config_attr(struct perf_event_attr *attr, 1175 const struct parse_events_terms *head, 1176 struct parse_events_error *err, 1177 config_term_func_t config_term) 1178 { 1179 struct parse_events_term *term; 1180 1181 list_for_each_entry(term, &head->terms, list) 1182 if (config_term(attr, term, err)) 1183 return -EINVAL; 1184 1185 return 0; 1186 } 1187 1188 static int get_config_terms(const struct parse_events_terms *head_config, 1189 struct list_head *head_terms) 1190 { 1191 #define ADD_CONFIG_TERM(__type, __weak) \ 1192 struct evsel_config_term *__t; \ 1193 \ 1194 __t = zalloc(sizeof(*__t)); \ 1195 if (!__t) \ 1196 return -ENOMEM; \ 1197 \ 1198 INIT_LIST_HEAD(&__t->list); \ 1199 __t->type = EVSEL__CONFIG_TERM_ ## __type; \ 1200 __t->weak = __weak; \ 1201 list_add_tail(&__t->list, head_terms) 1202 1203 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \ 1204 do { \ 1205 ADD_CONFIG_TERM(__type, __weak); \ 1206 __t->val.__name = __val; \ 1207 } while (0) 1208 1209 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \ 1210 do { \ 1211 ADD_CONFIG_TERM(__type, __weak); \ 1212 __t->val.str = strdup(__val); \ 1213 if (!__t->val.str) { \ 1214 zfree(&__t); \ 1215 return -ENOMEM; \ 1216 } \ 1217 __t->free_str = true; \ 1218 } while (0) 1219 1220 struct parse_events_term *term; 1221 1222 list_for_each_entry(term, &head_config->terms, list) { 1223 switch (term->type_term) { 1224 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1225 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak); 1226 break; 1227 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1228 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak); 1229 break; 1230 case PARSE_EVENTS__TERM_TYPE_TIME: 1231 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak); 1232 break; 1233 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1234 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak); 1235 break; 1236 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1237 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak); 1238 break; 1239 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1240 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user, 1241 term->val.num, term->weak); 1242 break; 1243 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1244 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1245 term->val.num ? 1 : 0, term->weak); 1246 break; 1247 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1248 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1249 term->val.num ? 0 : 1, term->weak); 1250 break; 1251 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1252 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack, 1253 term->val.num, term->weak); 1254 break; 1255 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1256 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events, 1257 term->val.num, term->weak); 1258 break; 1259 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1260 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1261 term->val.num ? 1 : 0, term->weak); 1262 break; 1263 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1264 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1265 term->val.num ? 0 : 1, term->weak); 1266 break; 1267 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1268 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak); 1269 break; 1270 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1271 ADD_CONFIG_TERM_VAL(PERCORE, percore, 1272 term->val.num ? true : false, term->weak); 1273 break; 1274 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1275 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output, 1276 term->val.num ? 1 : 0, term->weak); 1277 break; 1278 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1279 ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak); 1280 break; 1281 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1282 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size, 1283 term->val.num, term->weak); 1284 break; 1285 case PARSE_EVENTS__TERM_TYPE_USER: 1286 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1287 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1288 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1289 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1290 case PARSE_EVENTS__TERM_TYPE_NAME: 1291 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1292 case PARSE_EVENTS__TERM_TYPE_RAW: 1293 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1294 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1295 case PARSE_EVENTS__TERM_TYPE_CPU: 1296 default: 1297 break; 1298 } 1299 } 1300 return 0; 1301 } 1302 1303 /* 1304 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for 1305 * each bit of attr->config that the user has changed. 1306 */ 1307 static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config, 1308 struct list_head *head_terms) 1309 { 1310 struct parse_events_term *term; 1311 u64 bits = 0; 1312 int type; 1313 1314 list_for_each_entry(term, &head_config->terms, list) { 1315 switch (term->type_term) { 1316 case PARSE_EVENTS__TERM_TYPE_USER: 1317 type = perf_pmu__format_type(pmu, term->config); 1318 if (type != PERF_PMU_FORMAT_VALUE_CONFIG) 1319 continue; 1320 bits |= perf_pmu__format_bits(pmu, term->config); 1321 break; 1322 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1323 bits = ~(u64)0; 1324 break; 1325 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1326 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1327 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1328 case PARSE_EVENTS__TERM_TYPE_NAME: 1329 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1330 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1331 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1332 case PARSE_EVENTS__TERM_TYPE_TIME: 1333 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1334 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1335 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1336 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1337 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1338 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1339 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1340 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1341 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1342 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1343 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1344 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1345 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1346 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1347 case PARSE_EVENTS__TERM_TYPE_RAW: 1348 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1349 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1350 case PARSE_EVENTS__TERM_TYPE_CPU: 1351 default: 1352 break; 1353 } 1354 } 1355 1356 if (bits) 1357 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false); 1358 1359 #undef ADD_CONFIG_TERM 1360 return 0; 1361 } 1362 1363 int parse_events_add_tracepoint(struct parse_events_state *parse_state, 1364 struct list_head *list, 1365 const char *sys, const char *event, 1366 struct parse_events_error *err, 1367 struct parse_events_terms *head_config, void *loc_) 1368 { 1369 YYLTYPE *loc = loc_; 1370 1371 if (head_config) { 1372 struct perf_event_attr attr; 1373 1374 if (config_attr(&attr, head_config, err, 1375 config_term_tracepoint)) 1376 return -EINVAL; 1377 } 1378 1379 if (strpbrk(sys, "*?")) 1380 return add_tracepoint_multi_sys(parse_state, list, sys, event, 1381 err, head_config, loc); 1382 else 1383 return add_tracepoint_event(parse_state, list, sys, event, 1384 err, head_config, loc); 1385 } 1386 1387 static int __parse_events_add_numeric(struct parse_events_state *parse_state, 1388 struct list_head *list, 1389 struct perf_pmu *pmu, u32 type, u32 extended_type, 1390 u64 config, const struct parse_events_terms *head_config) 1391 { 1392 struct perf_event_attr attr; 1393 LIST_HEAD(config_terms); 1394 const char *name, *metric_id; 1395 struct perf_cpu_map *cpus; 1396 int ret; 1397 1398 memset(&attr, 0, sizeof(attr)); 1399 attr.type = type; 1400 attr.config = config; 1401 if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) { 1402 assert(perf_pmus__supports_extended_type()); 1403 attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT; 1404 } 1405 1406 if (head_config) { 1407 if (config_attr(&attr, head_config, parse_state->error, 1408 config_term_common)) 1409 return -EINVAL; 1410 1411 if (get_config_terms(head_config, &config_terms)) 1412 return -ENOMEM; 1413 } 1414 1415 name = get_config_name(head_config); 1416 metric_id = get_config_metric_id(head_config); 1417 cpus = get_config_cpu(head_config); 1418 ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name, 1419 metric_id, pmu, &config_terms, /*auto_merge_stats=*/false, 1420 cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) ? 0 : -ENOMEM; 1421 perf_cpu_map__put(cpus); 1422 free_config_terms(&config_terms); 1423 return ret; 1424 } 1425 1426 int parse_events_add_numeric(struct parse_events_state *parse_state, 1427 struct list_head *list, 1428 u32 type, u64 config, 1429 const struct parse_events_terms *head_config, 1430 bool wildcard) 1431 { 1432 struct perf_pmu *pmu = NULL; 1433 bool found_supported = false; 1434 1435 /* Wildcards on numeric values are only supported by core PMUs. */ 1436 if (wildcard && perf_pmus__supports_extended_type()) { 1437 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 1438 int ret; 1439 1440 found_supported = true; 1441 if (parse_events__filter_pmu(parse_state, pmu)) 1442 continue; 1443 1444 ret = __parse_events_add_numeric(parse_state, list, pmu, 1445 type, pmu->type, 1446 config, head_config); 1447 if (ret) 1448 return ret; 1449 } 1450 if (found_supported) 1451 return 0; 1452 } 1453 return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type), 1454 type, /*extended_type=*/0, config, head_config); 1455 } 1456 1457 static bool config_term_percore(struct list_head *config_terms) 1458 { 1459 struct evsel_config_term *term; 1460 1461 list_for_each_entry(term, config_terms, list) { 1462 if (term->type == EVSEL__CONFIG_TERM_PERCORE) 1463 return term->val.percore; 1464 } 1465 1466 return false; 1467 } 1468 1469 static int parse_events_add_pmu(struct parse_events_state *parse_state, 1470 struct list_head *list, struct perf_pmu *pmu, 1471 const struct parse_events_terms *const_parsed_terms, 1472 bool auto_merge_stats, u64 alternate_hw_config) 1473 { 1474 struct perf_event_attr attr; 1475 struct perf_pmu_info info; 1476 struct evsel *evsel; 1477 struct parse_events_error *err = parse_state->error; 1478 LIST_HEAD(config_terms); 1479 struct parse_events_terms parsed_terms; 1480 bool alias_rewrote_terms = false; 1481 struct perf_cpu_map *term_cpu = NULL; 1482 1483 if (verbose > 1) { 1484 struct strbuf sb; 1485 1486 strbuf_init(&sb, /*hint=*/ 0); 1487 if (pmu->selectable && const_parsed_terms && 1488 list_empty(&const_parsed_terms->terms)) { 1489 strbuf_addf(&sb, "%s//", pmu->name); 1490 } else { 1491 strbuf_addf(&sb, "%s/", pmu->name); 1492 parse_events_terms__to_strbuf(const_parsed_terms, &sb); 1493 strbuf_addch(&sb, '/'); 1494 } 1495 fprintf(stderr, "Attempt to add: %s\n", sb.buf); 1496 strbuf_release(&sb); 1497 } 1498 1499 memset(&attr, 0, sizeof(attr)); 1500 if (pmu->perf_event_attr_init_default) 1501 pmu->perf_event_attr_init_default(pmu, &attr); 1502 1503 attr.type = pmu->type; 1504 1505 if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) { 1506 evsel = __add_event(list, &parse_state->idx, &attr, 1507 /*init_attr=*/true, /*name=*/NULL, 1508 /*metric_id=*/NULL, pmu, 1509 /*config_terms=*/NULL, auto_merge_stats, 1510 /*cpu_list=*/NULL, alternate_hw_config); 1511 return evsel ? 0 : -ENOMEM; 1512 } 1513 1514 parse_events_terms__init(&parsed_terms); 1515 if (const_parsed_terms) { 1516 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1517 1518 if (ret) 1519 return ret; 1520 } 1521 fix_raw(&parsed_terms, pmu); 1522 1523 /* Configure attr/terms with a known PMU, this will set hardcoded terms. */ 1524 if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) { 1525 parse_events_terms__exit(&parsed_terms); 1526 return -EINVAL; 1527 } 1528 1529 /* Look for event names in the terms and rewrite into format based terms. */ 1530 if (perf_pmu__check_alias(pmu, &parsed_terms, 1531 &info, &alias_rewrote_terms, 1532 &alternate_hw_config, err)) { 1533 parse_events_terms__exit(&parsed_terms); 1534 return -EINVAL; 1535 } 1536 1537 if (verbose > 1) { 1538 struct strbuf sb; 1539 1540 strbuf_init(&sb, /*hint=*/ 0); 1541 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1542 fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf); 1543 strbuf_release(&sb); 1544 } 1545 1546 /* Configure attr/terms again if an alias was expanded. */ 1547 if (alias_rewrote_terms && 1548 config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) { 1549 parse_events_terms__exit(&parsed_terms); 1550 return -EINVAL; 1551 } 1552 1553 if (get_config_terms(&parsed_terms, &config_terms)) { 1554 parse_events_terms__exit(&parsed_terms); 1555 return -ENOMEM; 1556 } 1557 1558 /* 1559 * When using default config, record which bits of attr->config were 1560 * changed by the user. 1561 */ 1562 if (pmu->perf_event_attr_init_default && 1563 get_config_chgs(pmu, &parsed_terms, &config_terms)) { 1564 parse_events_terms__exit(&parsed_terms); 1565 return -ENOMEM; 1566 } 1567 1568 /* Skip configuring hard coded terms that were applied by config_attr. */ 1569 if (perf_pmu__config(pmu, &attr, &parsed_terms, /*apply_hardcoded=*/false, 1570 parse_state->error)) { 1571 free_config_terms(&config_terms); 1572 parse_events_terms__exit(&parsed_terms); 1573 return -EINVAL; 1574 } 1575 1576 term_cpu = get_config_cpu(&parsed_terms); 1577 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true, 1578 get_config_name(&parsed_terms), 1579 get_config_metric_id(&parsed_terms), pmu, 1580 &config_terms, auto_merge_stats, term_cpu, alternate_hw_config); 1581 perf_cpu_map__put(term_cpu); 1582 if (!evsel) { 1583 parse_events_terms__exit(&parsed_terms); 1584 return -ENOMEM; 1585 } 1586 1587 if (evsel->name) 1588 evsel->use_config_name = true; 1589 1590 evsel->percore = config_term_percore(&evsel->config_terms); 1591 1592 parse_events_terms__exit(&parsed_terms); 1593 free((char *)evsel->unit); 1594 evsel->unit = strdup(info.unit); 1595 evsel->scale = info.scale; 1596 evsel->per_pkg = info.per_pkg; 1597 evsel->snapshot = info.snapshot; 1598 evsel->retirement_latency.mean = info.retirement_latency_mean; 1599 evsel->retirement_latency.min = info.retirement_latency_min; 1600 evsel->retirement_latency.max = info.retirement_latency_max; 1601 1602 return 0; 1603 } 1604 1605 int parse_events_multi_pmu_add(struct parse_events_state *parse_state, 1606 const char *event_name, u64 hw_config, 1607 const struct parse_events_terms *const_parsed_terms, 1608 struct list_head **listp, void *loc_) 1609 { 1610 struct parse_events_term *term; 1611 struct list_head *list = NULL; 1612 struct perf_pmu *pmu = NULL; 1613 YYLTYPE *loc = loc_; 1614 int ok = 0; 1615 const char *config; 1616 struct parse_events_terms parsed_terms; 1617 1618 *listp = NULL; 1619 1620 parse_events_terms__init(&parsed_terms); 1621 if (const_parsed_terms) { 1622 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1623 1624 if (ret) 1625 return ret; 1626 } 1627 1628 config = strdup(event_name); 1629 if (!config) 1630 goto out_err; 1631 1632 if (parse_events_term__num(&term, 1633 PARSE_EVENTS__TERM_TYPE_USER, 1634 config, /*num=*/1, /*novalue=*/true, 1635 loc, /*loc_val=*/NULL) < 0) { 1636 zfree(&config); 1637 goto out_err; 1638 } 1639 list_add_tail(&term->list, &parsed_terms.terms); 1640 1641 /* Add it for all PMUs that support the alias */ 1642 list = malloc(sizeof(struct list_head)); 1643 if (!list) 1644 goto out_err; 1645 1646 INIT_LIST_HEAD(list); 1647 1648 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 1649 bool auto_merge_stats; 1650 1651 if (parse_events__filter_pmu(parse_state, pmu)) 1652 continue; 1653 1654 if (!perf_pmu__have_event(pmu, event_name)) 1655 continue; 1656 1657 auto_merge_stats = perf_pmu__auto_merge_stats(pmu); 1658 if (!parse_events_add_pmu(parse_state, list, pmu, 1659 &parsed_terms, auto_merge_stats, hw_config)) { 1660 struct strbuf sb; 1661 1662 strbuf_init(&sb, /*hint=*/ 0); 1663 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1664 pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf); 1665 strbuf_release(&sb); 1666 ok++; 1667 } 1668 } 1669 1670 if (parse_state->fake_pmu) { 1671 if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms, 1672 /*auto_merge_stats=*/true, hw_config)) { 1673 struct strbuf sb; 1674 1675 strbuf_init(&sb, /*hint=*/ 0); 1676 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1677 pr_debug("%s -> fake/%s/\n", event_name, sb.buf); 1678 strbuf_release(&sb); 1679 ok++; 1680 } 1681 } 1682 1683 out_err: 1684 parse_events_terms__exit(&parsed_terms); 1685 if (ok) 1686 *listp = list; 1687 else 1688 free(list); 1689 1690 return ok ? 0 : -1; 1691 } 1692 1693 int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state, 1694 const char *event_or_pmu, 1695 const struct parse_events_terms *const_parsed_terms, 1696 struct list_head **listp, 1697 void *loc_) 1698 { 1699 YYLTYPE *loc = loc_; 1700 struct perf_pmu *pmu; 1701 int ok = 0; 1702 char *help; 1703 1704 *listp = malloc(sizeof(**listp)); 1705 if (!*listp) 1706 return -ENOMEM; 1707 1708 INIT_LIST_HEAD(*listp); 1709 1710 /* Attempt to add to list assuming event_or_pmu is a PMU name. */ 1711 pmu = perf_pmus__find(event_or_pmu); 1712 if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms, 1713 /*auto_merge_stats=*/false, 1714 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) 1715 return 0; 1716 1717 if (parse_state->fake_pmu) { 1718 if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(), 1719 const_parsed_terms, 1720 /*auto_merge_stats=*/false, 1721 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) 1722 return 0; 1723 } 1724 1725 pmu = NULL; 1726 /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */ 1727 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 1728 if (!parse_events__filter_pmu(parse_state, pmu) && 1729 perf_pmu__wildcard_match(pmu, event_or_pmu)) { 1730 bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu); 1731 1732 if (!parse_events_add_pmu(parse_state, *listp, pmu, 1733 const_parsed_terms, 1734 auto_merge_stats, 1735 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) { 1736 ok++; 1737 parse_state->wild_card_pmus = true; 1738 } 1739 } 1740 } 1741 if (ok) 1742 return 0; 1743 1744 /* Failure to add, assume event_or_pmu is an event name. */ 1745 zfree(listp); 1746 if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, PERF_COUNT_HW_MAX, 1747 const_parsed_terms, listp, loc)) 1748 return 0; 1749 1750 if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0) 1751 help = NULL; 1752 parse_events_error__handle(parse_state->error, loc->first_column, 1753 strdup("Bad event or PMU"), 1754 help); 1755 zfree(listp); 1756 return -EINVAL; 1757 } 1758 1759 void parse_events__set_leader(char *name, struct list_head *list) 1760 { 1761 struct evsel *leader; 1762 1763 if (list_empty(list)) { 1764 WARN_ONCE(true, "WARNING: failed to set leader: empty list"); 1765 return; 1766 } 1767 1768 leader = list_first_entry(list, struct evsel, core.node); 1769 __perf_evlist__set_leader(list, &leader->core); 1770 zfree(&leader->group_name); 1771 leader->group_name = name; 1772 } 1773 1774 static int parse_events__modifier_list(struct parse_events_state *parse_state, 1775 YYLTYPE *loc, 1776 struct list_head *list, 1777 struct parse_events_modifier mod, 1778 bool group) 1779 { 1780 struct evsel *evsel; 1781 1782 if (!group && mod.weak) { 1783 parse_events_error__handle(parse_state->error, loc->first_column, 1784 strdup("Weak modifier is for use with groups"), NULL); 1785 return -EINVAL; 1786 } 1787 1788 __evlist__for_each_entry(list, evsel) { 1789 /* Translate modifiers into the equivalent evsel excludes. */ 1790 int eu = group ? evsel->core.attr.exclude_user : 0; 1791 int ek = group ? evsel->core.attr.exclude_kernel : 0; 1792 int eh = group ? evsel->core.attr.exclude_hv : 0; 1793 int eH = group ? evsel->core.attr.exclude_host : 0; 1794 int eG = group ? evsel->core.attr.exclude_guest : 0; 1795 int exclude = eu | ek | eh; 1796 int exclude_GH = group ? evsel->exclude_GH : 0; 1797 1798 if (mod.user) { 1799 if (!exclude) 1800 exclude = eu = ek = eh = 1; 1801 if (!exclude_GH && !perf_guest && exclude_GH_default) 1802 eG = 1; 1803 eu = 0; 1804 } 1805 if (mod.kernel) { 1806 if (!exclude) 1807 exclude = eu = ek = eh = 1; 1808 ek = 0; 1809 } 1810 if (mod.hypervisor) { 1811 if (!exclude) 1812 exclude = eu = ek = eh = 1; 1813 eh = 0; 1814 } 1815 if (mod.guest) { 1816 if (!exclude_GH) 1817 exclude_GH = eG = eH = 1; 1818 eG = 0; 1819 } 1820 if (mod.host) { 1821 if (!exclude_GH) 1822 exclude_GH = eG = eH = 1; 1823 eH = 0; 1824 } 1825 evsel->core.attr.exclude_user = eu; 1826 evsel->core.attr.exclude_kernel = ek; 1827 evsel->core.attr.exclude_hv = eh; 1828 evsel->core.attr.exclude_host = eH; 1829 evsel->core.attr.exclude_guest = eG; 1830 evsel->exclude_GH = exclude_GH; 1831 1832 /* Simple modifiers copied to the evsel. */ 1833 if (mod.precise) { 1834 u8 precise = evsel->core.attr.precise_ip + mod.precise; 1835 /* 1836 * precise ip: 1837 * 1838 * 0 - SAMPLE_IP can have arbitrary skid 1839 * 1 - SAMPLE_IP must have constant skid 1840 * 2 - SAMPLE_IP requested to have 0 skid 1841 * 3 - SAMPLE_IP must have 0 skid 1842 * 1843 * See also PERF_RECORD_MISC_EXACT_IP 1844 */ 1845 if (precise > 3) { 1846 char *help; 1847 1848 if (asprintf(&help, 1849 "Maximum combined precise value is 3, adding precision to \"%s\"", 1850 evsel__name(evsel)) > 0) { 1851 parse_events_error__handle(parse_state->error, 1852 loc->first_column, 1853 help, NULL); 1854 } 1855 return -EINVAL; 1856 } 1857 evsel->core.attr.precise_ip = precise; 1858 } 1859 if (mod.precise_max) 1860 evsel->precise_max = 1; 1861 if (mod.non_idle) 1862 evsel->core.attr.exclude_idle = 1; 1863 if (mod.sample_read) 1864 evsel->sample_read = 1; 1865 if (mod.pinned && evsel__is_group_leader(evsel)) 1866 evsel->core.attr.pinned = 1; 1867 if (mod.exclusive && evsel__is_group_leader(evsel)) 1868 evsel->core.attr.exclusive = 1; 1869 if (mod.weak) 1870 evsel->weak_group = true; 1871 if (mod.bpf) 1872 evsel->bpf_counter = true; 1873 if (mod.retire_lat) 1874 evsel->retire_lat = true; 1875 } 1876 return 0; 1877 } 1878 1879 int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc, 1880 struct list_head *list, 1881 struct parse_events_modifier mod) 1882 { 1883 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true); 1884 } 1885 1886 int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc, 1887 struct list_head *list, 1888 struct parse_events_modifier mod) 1889 { 1890 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false); 1891 } 1892 1893 int parse_events__set_default_name(struct list_head *list, char *name) 1894 { 1895 struct evsel *evsel; 1896 bool used_name = false; 1897 1898 __evlist__for_each_entry(list, evsel) { 1899 if (!evsel->name) { 1900 evsel->name = used_name ? strdup(name) : name; 1901 used_name = true; 1902 if (!evsel->name) 1903 return -ENOMEM; 1904 } 1905 } 1906 if (!used_name) 1907 free(name); 1908 return 0; 1909 } 1910 1911 static int parse_events__scanner(const char *str, 1912 FILE *input, 1913 struct parse_events_state *parse_state) 1914 { 1915 YY_BUFFER_STATE buffer; 1916 void *scanner; 1917 int ret; 1918 1919 ret = parse_events_lex_init_extra(parse_state, &scanner); 1920 if (ret) 1921 return ret; 1922 1923 if (str) 1924 buffer = parse_events__scan_string(str, scanner); 1925 else 1926 parse_events_set_in(input, scanner); 1927 1928 #ifdef PARSER_DEBUG 1929 parse_events_debug = 1; 1930 parse_events_set_debug(1, scanner); 1931 #endif 1932 ret = parse_events_parse(parse_state, scanner); 1933 1934 if (str) { 1935 parse_events__flush_buffer(buffer, scanner); 1936 parse_events__delete_buffer(buffer, scanner); 1937 } 1938 parse_events_lex_destroy(scanner); 1939 return ret; 1940 } 1941 1942 /* 1943 * parse event config string, return a list of event terms. 1944 */ 1945 int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input) 1946 { 1947 struct parse_events_state parse_state = { 1948 .terms = NULL, 1949 .stoken = PE_START_TERMS, 1950 }; 1951 int ret; 1952 1953 ret = parse_events__scanner(str, input, &parse_state); 1954 if (!ret) 1955 list_splice(&parse_state.terms->terms, &terms->terms); 1956 1957 zfree(&parse_state.terms); 1958 return ret; 1959 } 1960 1961 static int evsel__compute_group_pmu_name(struct evsel *evsel, 1962 const struct list_head *head) 1963 { 1964 struct evsel *leader = evsel__leader(evsel); 1965 struct evsel *pos; 1966 const char *group_pmu_name; 1967 struct perf_pmu *pmu = evsel__find_pmu(evsel); 1968 1969 if (!pmu) { 1970 /* 1971 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU 1972 * is a core PMU, but in heterogeneous systems this is 1973 * unknown. For now pick the first core PMU. 1974 */ 1975 pmu = perf_pmus__scan_core(NULL); 1976 } 1977 if (!pmu) { 1978 pr_debug("No PMU found for '%s'\n", evsel__name(evsel)); 1979 return -EINVAL; 1980 } 1981 group_pmu_name = pmu->name; 1982 /* 1983 * Software events may be in a group with other uncore PMU events. Use 1984 * the pmu_name of the first non-software event to avoid breaking the 1985 * software event out of the group. 1986 * 1987 * Aux event leaders, like intel_pt, expect a group with events from 1988 * other PMUs, so substitute the AUX event's PMU in this case. 1989 */ 1990 if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) { 1991 struct perf_pmu *leader_pmu = evsel__find_pmu(leader); 1992 1993 if (!leader_pmu) { 1994 /* As with determining pmu above. */ 1995 leader_pmu = perf_pmus__scan_core(NULL); 1996 } 1997 /* 1998 * Starting with the leader, find the first event with a named 1999 * non-software PMU. for_each_group_(member|evsel) isn't used as 2000 * the list isn't yet sorted putting evsel's in the same group 2001 * together. 2002 */ 2003 if (leader_pmu && !perf_pmu__is_software(leader_pmu)) { 2004 group_pmu_name = leader_pmu->name; 2005 } else if (leader->core.nr_members > 1) { 2006 list_for_each_entry(pos, head, core.node) { 2007 struct perf_pmu *pos_pmu; 2008 2009 if (pos == leader || evsel__leader(pos) != leader) 2010 continue; 2011 pos_pmu = evsel__find_pmu(pos); 2012 if (!pos_pmu) { 2013 /* As with determining pmu above. */ 2014 pos_pmu = perf_pmus__scan_core(NULL); 2015 } 2016 if (pos_pmu && !perf_pmu__is_software(pos_pmu)) { 2017 group_pmu_name = pos_pmu->name; 2018 break; 2019 } 2020 } 2021 } 2022 } 2023 /* Record computed name. */ 2024 evsel->group_pmu_name = strdup(group_pmu_name); 2025 return evsel->group_pmu_name ? 0 : -ENOMEM; 2026 } 2027 2028 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs) 2029 { 2030 /* Order by insertion index. */ 2031 return lhs->core.idx - rhs->core.idx; 2032 } 2033 2034 static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r) 2035 { 2036 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node); 2037 const struct evsel *lhs = container_of(lhs_core, struct evsel, core); 2038 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node); 2039 const struct evsel *rhs = container_of(rhs_core, struct evsel, core); 2040 int *force_grouped_idx = _fg_idx; 2041 int lhs_sort_idx, rhs_sort_idx, ret; 2042 const char *lhs_pmu_name, *rhs_pmu_name; 2043 2044 /* 2045 * Get the indexes of the 2 events to sort. If the events are 2046 * in groups then the leader's index is used otherwise the 2047 * event's index is used. An index may be forced for events that 2048 * must be in the same group, namely Intel topdown events. 2049 */ 2050 if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) { 2051 lhs_sort_idx = *force_grouped_idx; 2052 } else { 2053 bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1; 2054 2055 lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx; 2056 } 2057 if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) { 2058 rhs_sort_idx = *force_grouped_idx; 2059 } else { 2060 bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1; 2061 2062 rhs_sort_idx = rhs_has_group ? rhs_core->leader->idx : rhs_core->idx; 2063 } 2064 2065 /* If the indices differ then respect the insertion order. */ 2066 if (lhs_sort_idx != rhs_sort_idx) 2067 return lhs_sort_idx - rhs_sort_idx; 2068 2069 /* 2070 * Ignoring forcing, lhs_sort_idx == rhs_sort_idx so lhs and rhs should 2071 * be in the same group. Events in the same group need to be ordered by 2072 * their grouping PMU name as the group will be broken to ensure only 2073 * events on the same PMU are programmed together. 2074 * 2075 * With forcing the lhs_sort_idx == rhs_sort_idx shows that one or both 2076 * events are being forced to be at force_group_index. If only one event 2077 * is being forced then the other event is the group leader of the group 2078 * we're trying to force the event into. Ensure for the force grouped 2079 * case that the PMU name ordering is also respected. 2080 */ 2081 lhs_pmu_name = lhs->group_pmu_name; 2082 rhs_pmu_name = rhs->group_pmu_name; 2083 ret = strcmp(lhs_pmu_name, rhs_pmu_name); 2084 if (ret) 2085 return ret; 2086 2087 /* 2088 * Architecture specific sorting, by default sort events in the same 2089 * group with the same PMU by their insertion index. On Intel topdown 2090 * constraints must be adhered to - slots first, etc. 2091 */ 2092 return arch_evlist__cmp(lhs, rhs); 2093 } 2094 2095 static int parse_events__sort_events_and_fix_groups(struct list_head *list) 2096 { 2097 int idx = 0, force_grouped_idx = -1; 2098 struct evsel *pos, *cur_leader = NULL; 2099 struct perf_evsel *cur_leaders_grp = NULL; 2100 bool idx_changed = false; 2101 int orig_num_leaders = 0, num_leaders = 0; 2102 int ret; 2103 struct evsel *force_grouped_leader = NULL; 2104 bool last_event_was_forced_leader = false; 2105 2106 /* 2107 * Compute index to insert ungrouped events at. Place them where the 2108 * first ungrouped event appears. 2109 */ 2110 list_for_each_entry(pos, list, core.node) { 2111 const struct evsel *pos_leader = evsel__leader(pos); 2112 2113 ret = evsel__compute_group_pmu_name(pos, list); 2114 if (ret) 2115 return ret; 2116 2117 if (pos == pos_leader) 2118 orig_num_leaders++; 2119 2120 /* 2121 * Ensure indexes are sequential, in particular for multiple 2122 * event lists being merged. The indexes are used to detect when 2123 * the user order is modified. 2124 */ 2125 pos->core.idx = idx++; 2126 2127 /* 2128 * Remember an index to sort all forced grouped events 2129 * together to. Use the group leader as some events 2130 * must appear first within the group. 2131 */ 2132 if (force_grouped_idx == -1 && arch_evsel__must_be_in_group(pos)) 2133 force_grouped_idx = pos_leader->core.idx; 2134 } 2135 2136 /* Sort events. */ 2137 list_sort(&force_grouped_idx, list, evlist__cmp); 2138 2139 /* 2140 * Recompute groups, splitting for PMUs and adding groups for events 2141 * that require them. 2142 */ 2143 idx = 0; 2144 list_for_each_entry(pos, list, core.node) { 2145 const struct evsel *pos_leader = evsel__leader(pos); 2146 const char *pos_pmu_name = pos->group_pmu_name; 2147 const char *cur_leader_pmu_name; 2148 bool pos_force_grouped = force_grouped_idx != -1 && 2149 arch_evsel__must_be_in_group(pos); 2150 2151 /* Reset index and nr_members. */ 2152 if (pos->core.idx != idx) 2153 idx_changed = true; 2154 pos->core.idx = idx++; 2155 pos->core.nr_members = 0; 2156 2157 /* 2158 * Set the group leader respecting the given groupings and that 2159 * groups can't span PMUs. 2160 */ 2161 if (!cur_leader) { 2162 cur_leader = pos; 2163 cur_leaders_grp = &pos->core; 2164 if (pos_force_grouped) 2165 force_grouped_leader = pos; 2166 } 2167 2168 cur_leader_pmu_name = cur_leader->group_pmu_name; 2169 if (strcmp(cur_leader_pmu_name, pos_pmu_name)) { 2170 /* PMU changed so the group/leader must change. */ 2171 cur_leader = pos; 2172 cur_leaders_grp = pos->core.leader; 2173 if (pos_force_grouped && force_grouped_leader == NULL) 2174 force_grouped_leader = pos; 2175 } else if (cur_leaders_grp != pos->core.leader) { 2176 bool split_even_if_last_leader_was_forced = true; 2177 2178 /* 2179 * Event is for a different group. If the last event was 2180 * the forced group leader then subsequent group events 2181 * and forced events should be in the same group. If 2182 * there are no other forced group events then the 2183 * forced group leader wasn't really being forced into a 2184 * group, it just set arch_evsel__must_be_in_group, and 2185 * we don't want the group to split here. 2186 */ 2187 if (force_grouped_idx != -1 && last_event_was_forced_leader) { 2188 struct evsel *pos2 = pos; 2189 /* 2190 * Search the whole list as the group leaders 2191 * aren't currently valid. 2192 */ 2193 list_for_each_entry_continue(pos2, list, core.node) { 2194 if (pos->core.leader == pos2->core.leader && 2195 arch_evsel__must_be_in_group(pos2)) { 2196 split_even_if_last_leader_was_forced = false; 2197 break; 2198 } 2199 } 2200 } 2201 if (!last_event_was_forced_leader || split_even_if_last_leader_was_forced) { 2202 if (pos_force_grouped) { 2203 if (force_grouped_leader) { 2204 cur_leader = force_grouped_leader; 2205 cur_leaders_grp = force_grouped_leader->core.leader; 2206 } else { 2207 cur_leader = force_grouped_leader = pos; 2208 cur_leaders_grp = &pos->core; 2209 } 2210 } else { 2211 cur_leader = pos; 2212 cur_leaders_grp = pos->core.leader; 2213 } 2214 } 2215 } 2216 if (pos_leader != cur_leader) { 2217 /* The leader changed so update it. */ 2218 evsel__set_leader(pos, cur_leader); 2219 } 2220 last_event_was_forced_leader = (force_grouped_leader == pos); 2221 } 2222 list_for_each_entry(pos, list, core.node) { 2223 struct evsel *pos_leader = evsel__leader(pos); 2224 2225 if (pos == pos_leader) 2226 num_leaders++; 2227 pos_leader->core.nr_members++; 2228 } 2229 return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0; 2230 } 2231 2232 int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter, 2233 struct parse_events_error *err, bool fake_pmu, 2234 bool warn_if_reordered, bool fake_tp) 2235 { 2236 struct parse_events_state parse_state = { 2237 .list = LIST_HEAD_INIT(parse_state.list), 2238 .idx = evlist->core.nr_entries, 2239 .error = err, 2240 .stoken = PE_START_EVENTS, 2241 .fake_pmu = fake_pmu, 2242 .fake_tp = fake_tp, 2243 .pmu_filter = pmu_filter, 2244 .match_legacy_cache_terms = true, 2245 }; 2246 int ret, ret2; 2247 2248 ret = parse_events__scanner(str, /*input=*/ NULL, &parse_state); 2249 2250 if (!ret && list_empty(&parse_state.list)) { 2251 WARN_ONCE(true, "WARNING: event parser found nothing\n"); 2252 return -1; 2253 } 2254 2255 ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list); 2256 if (ret2 < 0) 2257 return ret; 2258 2259 /* 2260 * Add list to the evlist even with errors to allow callers to clean up. 2261 */ 2262 evlist__splice_list_tail(evlist, &parse_state.list); 2263 2264 if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) { 2265 pr_warning("WARNING: events were regrouped to match PMUs\n"); 2266 2267 if (verbose > 0) { 2268 struct strbuf sb = STRBUF_INIT; 2269 2270 evlist__uniquify_name(evlist); 2271 evlist__format_evsels(evlist, &sb, 2048); 2272 pr_debug("evlist after sorting/fixing: '%s'\n", sb.buf); 2273 strbuf_release(&sb); 2274 } 2275 } 2276 if (!ret) { 2277 struct evsel *last; 2278 2279 last = evlist__last(evlist); 2280 last->cmdline_group_boundary = true; 2281 2282 return 0; 2283 } 2284 2285 /* 2286 * There are 2 users - builtin-record and builtin-test objects. 2287 * Both call evlist__delete in case of error, so we dont 2288 * need to bother. 2289 */ 2290 return ret; 2291 } 2292 2293 int parse_event(struct evlist *evlist, const char *str) 2294 { 2295 struct parse_events_error err; 2296 int ret; 2297 2298 parse_events_error__init(&err); 2299 ret = parse_events(evlist, str, &err); 2300 parse_events_error__exit(&err); 2301 return ret; 2302 } 2303 2304 struct parse_events_error_entry { 2305 /** @list: The list the error is part of. */ 2306 struct list_head list; 2307 /** @idx: index in the parsed string */ 2308 int idx; 2309 /** @str: string to display at the index */ 2310 char *str; 2311 /** @help: optional help string */ 2312 char *help; 2313 }; 2314 2315 void parse_events_error__init(struct parse_events_error *err) 2316 { 2317 INIT_LIST_HEAD(&err->list); 2318 } 2319 2320 void parse_events_error__exit(struct parse_events_error *err) 2321 { 2322 struct parse_events_error_entry *pos, *tmp; 2323 2324 list_for_each_entry_safe(pos, tmp, &err->list, list) { 2325 zfree(&pos->str); 2326 zfree(&pos->help); 2327 list_del_init(&pos->list); 2328 free(pos); 2329 } 2330 } 2331 2332 void parse_events_error__handle(struct parse_events_error *err, int idx, 2333 char *str, char *help) 2334 { 2335 struct parse_events_error_entry *entry; 2336 2337 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) 2338 goto out_free; 2339 2340 entry = zalloc(sizeof(*entry)); 2341 if (!entry) { 2342 pr_err("Failed to allocate memory for event parsing error: %s (%s)\n", 2343 str, help ?: "<no help>"); 2344 goto out_free; 2345 } 2346 entry->idx = idx; 2347 entry->str = str; 2348 entry->help = help; 2349 list_add(&entry->list, &err->list); 2350 return; 2351 out_free: 2352 free(str); 2353 free(help); 2354 } 2355 2356 #define MAX_WIDTH 1000 2357 static int get_term_width(void) 2358 { 2359 struct winsize ws; 2360 2361 get_term_dimensions(&ws); 2362 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; 2363 } 2364 2365 static void __parse_events_error__print(int err_idx, const char *err_str, 2366 const char *err_help, const char *event) 2367 { 2368 const char *str = "invalid or unsupported event: "; 2369 char _buf[MAX_WIDTH]; 2370 char *buf = (char *) event; 2371 int idx = 0; 2372 if (err_str) { 2373 /* -2 for extra '' in the final fprintf */ 2374 int width = get_term_width() - 2; 2375 int len_event = strlen(event); 2376 int len_str, max_len, cut = 0; 2377 2378 /* 2379 * Maximum error index indent, we will cut 2380 * the event string if it's bigger. 2381 */ 2382 int max_err_idx = 13; 2383 2384 /* 2385 * Let's be specific with the message when 2386 * we have the precise error. 2387 */ 2388 str = "event syntax error: "; 2389 len_str = strlen(str); 2390 max_len = width - len_str; 2391 2392 buf = _buf; 2393 2394 /* We're cutting from the beginning. */ 2395 if (err_idx > max_err_idx) 2396 cut = err_idx - max_err_idx; 2397 2398 strncpy(buf, event + cut, max_len); 2399 2400 /* Mark cut parts with '..' on both sides. */ 2401 if (cut) 2402 buf[0] = buf[1] = '.'; 2403 2404 if ((len_event - cut) > max_len) { 2405 buf[max_len - 1] = buf[max_len - 2] = '.'; 2406 buf[max_len] = 0; 2407 } 2408 2409 idx = len_str + err_idx - cut; 2410 } 2411 2412 fprintf(stderr, "%s'%s'\n", str, buf); 2413 if (idx) { 2414 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str); 2415 if (err_help) 2416 fprintf(stderr, "\n%s\n", err_help); 2417 } 2418 } 2419 2420 void parse_events_error__print(const struct parse_events_error *err, 2421 const char *event) 2422 { 2423 struct parse_events_error_entry *pos; 2424 bool first = true; 2425 2426 list_for_each_entry(pos, &err->list, list) { 2427 if (!first) 2428 fputs("\n", stderr); 2429 __parse_events_error__print(pos->idx, pos->str, pos->help, event); 2430 first = false; 2431 } 2432 } 2433 2434 /* 2435 * In the list of errors err, do any of the error strings (str) contain the 2436 * given needle string? 2437 */ 2438 bool parse_events_error__contains(const struct parse_events_error *err, 2439 const char *needle) 2440 { 2441 struct parse_events_error_entry *pos; 2442 2443 list_for_each_entry(pos, &err->list, list) { 2444 if (strstr(pos->str, needle) != NULL) 2445 return true; 2446 } 2447 return false; 2448 } 2449 2450 #undef MAX_WIDTH 2451 2452 int parse_events_option(const struct option *opt, const char *str, 2453 int unset __maybe_unused) 2454 { 2455 struct parse_events_option_args *args = opt->value; 2456 struct parse_events_error err; 2457 int ret; 2458 2459 parse_events_error__init(&err); 2460 ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err, 2461 /*fake_pmu=*/false, /*warn_if_reordered=*/true, 2462 /*fake_tp=*/false); 2463 2464 if (ret) { 2465 parse_events_error__print(&err, str); 2466 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 2467 } 2468 parse_events_error__exit(&err); 2469 2470 return ret; 2471 } 2472 2473 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset) 2474 { 2475 struct parse_events_option_args *args = opt->value; 2476 int ret; 2477 2478 if (*args->evlistp == NULL) { 2479 *args->evlistp = evlist__new(); 2480 2481 if (*args->evlistp == NULL) { 2482 fprintf(stderr, "Not enough memory to create evlist\n"); 2483 return -1; 2484 } 2485 } 2486 ret = parse_events_option(opt, str, unset); 2487 if (ret) { 2488 evlist__delete(*args->evlistp); 2489 *args->evlistp = NULL; 2490 } 2491 2492 return ret; 2493 } 2494 2495 static int 2496 foreach_evsel_in_last_glob(struct evlist *evlist, 2497 int (*func)(struct evsel *evsel, 2498 const void *arg), 2499 const void *arg) 2500 { 2501 struct evsel *last = NULL; 2502 int err; 2503 2504 /* 2505 * Don't return when list_empty, give func a chance to report 2506 * error when it found last == NULL. 2507 * 2508 * So no need to WARN here, let *func do this. 2509 */ 2510 if (evlist->core.nr_entries > 0) 2511 last = evlist__last(evlist); 2512 2513 do { 2514 err = (*func)(last, arg); 2515 if (err) 2516 return -1; 2517 if (!last) 2518 return 0; 2519 2520 if (last->core.node.prev == &evlist->core.entries) 2521 return 0; 2522 last = list_entry(last->core.node.prev, struct evsel, core.node); 2523 } while (!last->cmdline_group_boundary); 2524 2525 return 0; 2526 } 2527 2528 static int set_filter(struct evsel *evsel, const void *arg) 2529 { 2530 const char *str = arg; 2531 bool found = false; 2532 int nr_addr_filters = 0; 2533 struct perf_pmu *pmu = NULL; 2534 2535 if (evsel == NULL) { 2536 fprintf(stderr, 2537 "--filter option should follow a -e tracepoint or HW tracer option\n"); 2538 return -1; 2539 } 2540 2541 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 2542 if (evsel__append_tp_filter(evsel, str) < 0) { 2543 fprintf(stderr, 2544 "not enough memory to hold filter string\n"); 2545 return -1; 2546 } 2547 2548 return 0; 2549 } 2550 2551 while ((pmu = perf_pmus__scan(pmu)) != NULL) 2552 if (pmu->type == evsel->core.attr.type) { 2553 found = true; 2554 break; 2555 } 2556 2557 if (found) 2558 perf_pmu__scan_file(pmu, "nr_addr_filters", 2559 "%d", &nr_addr_filters); 2560 2561 if (!nr_addr_filters) 2562 return perf_bpf_filter__parse(&evsel->bpf_filters, str); 2563 2564 if (evsel__append_addr_filter(evsel, str) < 0) { 2565 fprintf(stderr, 2566 "not enough memory to hold filter string\n"); 2567 return -1; 2568 } 2569 2570 return 0; 2571 } 2572 2573 int parse_filter(const struct option *opt, const char *str, 2574 int unset __maybe_unused) 2575 { 2576 struct evlist *evlist = *(struct evlist **)opt->value; 2577 2578 return foreach_evsel_in_last_glob(evlist, set_filter, 2579 (const void *)str); 2580 } 2581 2582 static int add_exclude_perf_filter(struct evsel *evsel, 2583 const void *arg __maybe_unused) 2584 { 2585 char new_filter[64]; 2586 2587 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2588 fprintf(stderr, 2589 "--exclude-perf option should follow a -e tracepoint option\n"); 2590 return -1; 2591 } 2592 2593 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid()); 2594 2595 if (evsel__append_tp_filter(evsel, new_filter) < 0) { 2596 fprintf(stderr, 2597 "not enough memory to hold filter string\n"); 2598 return -1; 2599 } 2600 2601 return 0; 2602 } 2603 2604 int exclude_perf(const struct option *opt, 2605 const char *arg __maybe_unused, 2606 int unset __maybe_unused) 2607 { 2608 struct evlist *evlist = *(struct evlist **)opt->value; 2609 2610 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter, 2611 NULL); 2612 } 2613 2614 int parse_events__is_hardcoded_term(struct parse_events_term *term) 2615 { 2616 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 2617 } 2618 2619 static int new_term(struct parse_events_term **_term, 2620 struct parse_events_term *temp, 2621 char *str, u64 num) 2622 { 2623 struct parse_events_term *term; 2624 2625 term = malloc(sizeof(*term)); 2626 if (!term) 2627 return -ENOMEM; 2628 2629 *term = *temp; 2630 INIT_LIST_HEAD(&term->list); 2631 term->weak = false; 2632 2633 switch (term->type_val) { 2634 case PARSE_EVENTS__TERM_TYPE_NUM: 2635 term->val.num = num; 2636 break; 2637 case PARSE_EVENTS__TERM_TYPE_STR: 2638 term->val.str = str; 2639 break; 2640 default: 2641 free(term); 2642 return -EINVAL; 2643 } 2644 2645 *_term = term; 2646 return 0; 2647 } 2648 2649 int parse_events_term__num(struct parse_events_term **term, 2650 enum parse_events__term_type type_term, 2651 const char *config, u64 num, 2652 bool no_value, 2653 void *loc_term_, void *loc_val_) 2654 { 2655 YYLTYPE *loc_term = loc_term_; 2656 YYLTYPE *loc_val = loc_val_; 2657 2658 struct parse_events_term temp = { 2659 .type_val = PARSE_EVENTS__TERM_TYPE_NUM, 2660 .type_term = type_term, 2661 .config = config ? : strdup(parse_events__term_type_str(type_term)), 2662 .no_value = no_value, 2663 .err_term = loc_term ? loc_term->first_column : 0, 2664 .err_val = loc_val ? loc_val->first_column : 0, 2665 }; 2666 2667 return new_term(term, &temp, /*str=*/NULL, num); 2668 } 2669 2670 int parse_events_term__str(struct parse_events_term **term, 2671 enum parse_events__term_type type_term, 2672 char *config, char *str, 2673 void *loc_term_, void *loc_val_) 2674 { 2675 YYLTYPE *loc_term = loc_term_; 2676 YYLTYPE *loc_val = loc_val_; 2677 2678 struct parse_events_term temp = { 2679 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2680 .type_term = type_term, 2681 .config = config, 2682 .err_term = loc_term ? loc_term->first_column : 0, 2683 .err_val = loc_val ? loc_val->first_column : 0, 2684 }; 2685 2686 return new_term(term, &temp, str, /*num=*/0); 2687 } 2688 2689 int parse_events_term__term(struct parse_events_term **term, 2690 enum parse_events__term_type term_lhs, 2691 enum parse_events__term_type term_rhs, 2692 void *loc_term, void *loc_val) 2693 { 2694 return parse_events_term__str(term, term_lhs, NULL, 2695 strdup(parse_events__term_type_str(term_rhs)), 2696 loc_term, loc_val); 2697 } 2698 2699 int parse_events_term__clone(struct parse_events_term **new, 2700 const struct parse_events_term *term) 2701 { 2702 char *str; 2703 struct parse_events_term temp = *term; 2704 2705 temp.used = false; 2706 if (term->config) { 2707 temp.config = strdup(term->config); 2708 if (!temp.config) 2709 return -ENOMEM; 2710 } 2711 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2712 return new_term(new, &temp, /*str=*/NULL, term->val.num); 2713 2714 str = strdup(term->val.str); 2715 if (!str) { 2716 zfree(&temp.config); 2717 return -ENOMEM; 2718 } 2719 return new_term(new, &temp, str, /*num=*/0); 2720 } 2721 2722 void parse_events_term__delete(struct parse_events_term *term) 2723 { 2724 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) 2725 zfree(&term->val.str); 2726 2727 zfree(&term->config); 2728 free(term); 2729 } 2730 2731 static int parse_events_terms__copy(const struct parse_events_terms *src, 2732 struct parse_events_terms *dest) 2733 { 2734 struct parse_events_term *term; 2735 2736 list_for_each_entry (term, &src->terms, list) { 2737 struct parse_events_term *n; 2738 int ret; 2739 2740 ret = parse_events_term__clone(&n, term); 2741 if (ret) 2742 return ret; 2743 2744 list_add_tail(&n->list, &dest->terms); 2745 } 2746 return 0; 2747 } 2748 2749 void parse_events_terms__init(struct parse_events_terms *terms) 2750 { 2751 INIT_LIST_HEAD(&terms->terms); 2752 } 2753 2754 void parse_events_terms__exit(struct parse_events_terms *terms) 2755 { 2756 struct parse_events_term *term, *h; 2757 2758 list_for_each_entry_safe(term, h, &terms->terms, list) { 2759 list_del_init(&term->list); 2760 parse_events_term__delete(term); 2761 } 2762 } 2763 2764 void parse_events_terms__delete(struct parse_events_terms *terms) 2765 { 2766 if (!terms) 2767 return; 2768 parse_events_terms__exit(terms); 2769 free(terms); 2770 } 2771 2772 int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb) 2773 { 2774 struct parse_events_term *term; 2775 bool first = true; 2776 2777 if (!terms) 2778 return 0; 2779 2780 list_for_each_entry(term, &terms->terms, list) { 2781 int ret; 2782 2783 if (!first) { 2784 ret = strbuf_addch(sb, ','); 2785 if (ret < 0) 2786 return ret; 2787 } 2788 first = false; 2789 2790 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2791 if (term->no_value) { 2792 assert(term->val.num == 1); 2793 ret = strbuf_addf(sb, "%s", term->config); 2794 } else 2795 ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num); 2796 else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) { 2797 if (term->config) { 2798 ret = strbuf_addf(sb, "%s=", term->config); 2799 if (ret < 0) 2800 return ret; 2801 } else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) { 2802 ret = strbuf_addf(sb, "%s=", 2803 parse_events__term_type_str(term->type_term)); 2804 if (ret < 0) 2805 return ret; 2806 } 2807 assert(!term->no_value); 2808 ret = strbuf_addf(sb, "%s", term->val.str); 2809 } 2810 if (ret < 0) 2811 return ret; 2812 } 2813 return 0; 2814 } 2815 2816 static void config_terms_list(char *buf, size_t buf_sz) 2817 { 2818 int i; 2819 bool first = true; 2820 2821 buf[0] = '\0'; 2822 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) { 2823 const char *name = parse_events__term_type_str(i); 2824 2825 if (!config_term_avail(i, NULL)) 2826 continue; 2827 if (!name) 2828 continue; 2829 if (name[0] == '<') 2830 continue; 2831 2832 if (strlen(buf) + strlen(name) + 2 >= buf_sz) 2833 return; 2834 2835 if (!first) 2836 strcat(buf, ","); 2837 else 2838 first = false; 2839 strcat(buf, name); 2840 } 2841 } 2842 2843 /* 2844 * Return string contains valid config terms of an event. 2845 * @additional_terms: For terms such as PMU sysfs terms. 2846 */ 2847 char *parse_events_formats_error_string(char *additional_terms) 2848 { 2849 char *str; 2850 /* "no-overwrite" is the longest name */ 2851 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR * 2852 (sizeof("no-overwrite") - 1)]; 2853 2854 config_terms_list(static_terms, sizeof(static_terms)); 2855 /* valid terms */ 2856 if (additional_terms) { 2857 if (asprintf(&str, "valid terms: %s,%s", 2858 additional_terms, static_terms) < 0) 2859 goto fail; 2860 } else { 2861 if (asprintf(&str, "valid terms: %s", static_terms) < 0) 2862 goto fail; 2863 } 2864 return str; 2865 2866 fail: 2867 return NULL; 2868 } 2869