1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/hw_breakpoint.h> 3 #include <linux/err.h> 4 #include <linux/list_sort.h> 5 #include <linux/zalloc.h> 6 #include <dirent.h> 7 #include <errno.h> 8 #include <sys/ioctl.h> 9 #include <sys/param.h> 10 #include "term.h" 11 #include "env.h" 12 #include "evlist.h" 13 #include "evsel.h" 14 #include <subcmd/parse-options.h> 15 #include "parse-events.h" 16 #include "string2.h" 17 #include "strbuf.h" 18 #include "debug.h" 19 #include <api/fs/tracing_path.h> 20 #include <api/io_dir.h> 21 #include <perf/cpumap.h> 22 #include <util/parse-events-bison.h> 23 #include <util/parse-events-flex.h> 24 #include "pmu.h" 25 #include "pmus.h" 26 #include "asm/bug.h" 27 #include "util/parse-branch-options.h" 28 #include "util/evsel_config.h" 29 #include "util/event.h" 30 #include "util/bpf-filter.h" 31 #include "util/stat.h" 32 #include "util/util.h" 33 #include "tracepoint.h" 34 35 #define MAX_NAME_LEN 100 36 37 static int get_config_terms(const struct parse_events_terms *head_config, 38 struct list_head *head_terms); 39 static int parse_events_terms__copy(const struct parse_events_terms *src, 40 struct parse_events_terms *dest); 41 42 const struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { 43 [PERF_COUNT_HW_CPU_CYCLES] = { 44 .symbol = "cpu-cycles", 45 .alias = "cycles", 46 }, 47 [PERF_COUNT_HW_INSTRUCTIONS] = { 48 .symbol = "instructions", 49 .alias = "", 50 }, 51 [PERF_COUNT_HW_CACHE_REFERENCES] = { 52 .symbol = "cache-references", 53 .alias = "", 54 }, 55 [PERF_COUNT_HW_CACHE_MISSES] = { 56 .symbol = "cache-misses", 57 .alias = "", 58 }, 59 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 60 .symbol = "branch-instructions", 61 .alias = "branches", 62 }, 63 [PERF_COUNT_HW_BRANCH_MISSES] = { 64 .symbol = "branch-misses", 65 .alias = "", 66 }, 67 [PERF_COUNT_HW_BUS_CYCLES] = { 68 .symbol = "bus-cycles", 69 .alias = "", 70 }, 71 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = { 72 .symbol = "stalled-cycles-frontend", 73 .alias = "idle-cycles-frontend", 74 }, 75 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = { 76 .symbol = "stalled-cycles-backend", 77 .alias = "idle-cycles-backend", 78 }, 79 [PERF_COUNT_HW_REF_CPU_CYCLES] = { 80 .symbol = "ref-cycles", 81 .alias = "", 82 }, 83 }; 84 85 const struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { 86 [PERF_COUNT_SW_CPU_CLOCK] = { 87 .symbol = "cpu-clock", 88 .alias = "", 89 }, 90 [PERF_COUNT_SW_TASK_CLOCK] = { 91 .symbol = "task-clock", 92 .alias = "", 93 }, 94 [PERF_COUNT_SW_PAGE_FAULTS] = { 95 .symbol = "page-faults", 96 .alias = "faults", 97 }, 98 [PERF_COUNT_SW_CONTEXT_SWITCHES] = { 99 .symbol = "context-switches", 100 .alias = "cs", 101 }, 102 [PERF_COUNT_SW_CPU_MIGRATIONS] = { 103 .symbol = "cpu-migrations", 104 .alias = "migrations", 105 }, 106 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = { 107 .symbol = "minor-faults", 108 .alias = "", 109 }, 110 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = { 111 .symbol = "major-faults", 112 .alias = "", 113 }, 114 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = { 115 .symbol = "alignment-faults", 116 .alias = "", 117 }, 118 [PERF_COUNT_SW_EMULATION_FAULTS] = { 119 .symbol = "emulation-faults", 120 .alias = "", 121 }, 122 [PERF_COUNT_SW_DUMMY] = { 123 .symbol = "dummy", 124 .alias = "", 125 }, 126 [PERF_COUNT_SW_BPF_OUTPUT] = { 127 .symbol = "bpf-output", 128 .alias = "", 129 }, 130 [PERF_COUNT_SW_CGROUP_SWITCHES] = { 131 .symbol = "cgroup-switches", 132 .alias = "", 133 }, 134 }; 135 136 const char *event_type(int type) 137 { 138 switch (type) { 139 case PERF_TYPE_HARDWARE: 140 return "hardware"; 141 142 case PERF_TYPE_SOFTWARE: 143 return "software"; 144 145 case PERF_TYPE_TRACEPOINT: 146 return "tracepoint"; 147 148 case PERF_TYPE_HW_CACHE: 149 return "hardware-cache"; 150 151 default: 152 break; 153 } 154 155 return "unknown"; 156 } 157 158 static char *get_config_str(const struct parse_events_terms *head_terms, 159 enum parse_events__term_type type_term) 160 { 161 struct parse_events_term *term; 162 163 if (!head_terms) 164 return NULL; 165 166 list_for_each_entry(term, &head_terms->terms, list) 167 if (term->type_term == type_term) 168 return term->val.str; 169 170 return NULL; 171 } 172 173 static char *get_config_metric_id(const struct parse_events_terms *head_terms) 174 { 175 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID); 176 } 177 178 static char *get_config_name(const struct parse_events_terms *head_terms) 179 { 180 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME); 181 } 182 183 /** 184 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that 185 * matches the raw's string value. If the string value matches an 186 * event then change the term to be an event, if not then change it to 187 * be a config term. For example, "read" may be an event of the PMU or 188 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of 189 * the event can be determined and we don't need to scan all PMUs 190 * ahead-of-time. 191 * @config_terms: the list of terms that may contain a raw term. 192 * @pmu: the PMU to scan for events from. 193 */ 194 static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu) 195 { 196 struct parse_events_term *term; 197 198 list_for_each_entry(term, &config_terms->terms, list) { 199 u64 num; 200 201 if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW) 202 continue; 203 204 if (perf_pmu__have_event(pmu, term->val.str)) { 205 zfree(&term->config); 206 term->config = term->val.str; 207 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 208 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 209 term->val.num = 1; 210 term->no_value = true; 211 continue; 212 } 213 214 zfree(&term->config); 215 term->config = strdup("config"); 216 errno = 0; 217 num = strtoull(term->val.str + 1, NULL, 16); 218 assert(errno == 0); 219 free(term->val.str); 220 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 221 term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG; 222 term->val.num = num; 223 term->no_value = false; 224 } 225 } 226 227 static struct evsel * 228 __add_event(struct list_head *list, int *idx, 229 struct perf_event_attr *attr, 230 bool init_attr, 231 const char *name, const char *metric_id, struct perf_pmu *pmu, 232 struct list_head *config_terms, bool auto_merge_stats, 233 struct perf_cpu_map *cpu_list, u64 alternate_hw_config) 234 { 235 struct evsel *evsel; 236 struct perf_cpu_map *cpus = perf_cpu_map__is_empty(cpu_list) && pmu ? pmu->cpus : cpu_list; 237 238 cpus = perf_cpu_map__get(cpus); 239 if (pmu) 240 perf_pmu__warn_invalid_formats(pmu); 241 242 if (pmu && (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX)) { 243 perf_pmu__warn_invalid_config(pmu, attr->config, name, 244 PERF_PMU_FORMAT_VALUE_CONFIG, "config"); 245 perf_pmu__warn_invalid_config(pmu, attr->config1, name, 246 PERF_PMU_FORMAT_VALUE_CONFIG1, "config1"); 247 perf_pmu__warn_invalid_config(pmu, attr->config2, name, 248 PERF_PMU_FORMAT_VALUE_CONFIG2, "config2"); 249 perf_pmu__warn_invalid_config(pmu, attr->config3, name, 250 PERF_PMU_FORMAT_VALUE_CONFIG3, "config3"); 251 } 252 if (init_attr) 253 event_attr_init(attr); 254 255 evsel = evsel__new_idx(attr, *idx); 256 if (!evsel) { 257 perf_cpu_map__put(cpus); 258 return NULL; 259 } 260 261 (*idx)++; 262 evsel->core.cpus = cpus; 263 evsel->core.own_cpus = perf_cpu_map__get(cpus); 264 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false; 265 evsel->core.is_pmu_core = pmu ? pmu->is_core : false; 266 evsel->auto_merge_stats = auto_merge_stats; 267 evsel->pmu = pmu; 268 evsel->alternate_hw_config = alternate_hw_config; 269 270 if (name) 271 evsel->name = strdup(name); 272 273 if (metric_id) 274 evsel->metric_id = strdup(metric_id); 275 276 if (config_terms) 277 list_splice_init(config_terms, &evsel->config_terms); 278 279 if (list) 280 list_add_tail(&evsel->core.node, list); 281 282 return evsel; 283 } 284 285 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr, 286 const char *name, const char *metric_id, 287 struct perf_pmu *pmu) 288 { 289 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name, 290 metric_id, pmu, /*config_terms=*/NULL, 291 /*auto_merge_stats=*/false, /*cpu_list=*/NULL, 292 /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 293 } 294 295 static int add_event(struct list_head *list, int *idx, 296 struct perf_event_attr *attr, const char *name, 297 const char *metric_id, struct list_head *config_terms, 298 u64 alternate_hw_config) 299 { 300 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id, 301 /*pmu=*/NULL, config_terms, 302 /*auto_merge_stats=*/false, /*cpu_list=*/NULL, 303 alternate_hw_config) ? 0 : -ENOMEM; 304 } 305 306 /** 307 * parse_aliases - search names for entries beginning or equalling str ignoring 308 * case. If mutliple entries in names match str then the longest 309 * is chosen. 310 * @str: The needle to look for. 311 * @names: The haystack to search. 312 * @size: The size of the haystack. 313 * @longest: Out argument giving the length of the matching entry. 314 */ 315 static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size, 316 int *longest) 317 { 318 *longest = -1; 319 for (int i = 0; i < size; i++) { 320 for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) { 321 int n = strlen(names[i][j]); 322 323 if (n > *longest && !strncasecmp(str, names[i][j], n)) 324 *longest = n; 325 } 326 if (*longest > 0) 327 return i; 328 } 329 330 return -1; 331 } 332 333 typedef int config_term_func_t(struct perf_event_attr *attr, 334 struct parse_events_term *term, 335 struct parse_events_error *err); 336 static int config_term_common(struct perf_event_attr *attr, 337 struct parse_events_term *term, 338 struct parse_events_error *err); 339 static int config_attr(struct perf_event_attr *attr, 340 const struct parse_events_terms *head, 341 struct parse_events_error *err, 342 config_term_func_t config_term); 343 344 /** 345 * parse_events__decode_legacy_cache - Search name for the legacy cache event 346 * name composed of 1, 2 or 3 hyphen 347 * separated sections. The first section is 348 * the cache type while the others are the 349 * optional op and optional result. To make 350 * life hard the names in the table also 351 * contain hyphens and the longest name 352 * should always be selected. 353 */ 354 int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config) 355 { 356 int len, cache_type = -1, cache_op = -1, cache_result = -1; 357 const char *name_end = &name[strlen(name) + 1]; 358 const char *str = name; 359 360 cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len); 361 if (cache_type == -1) 362 return -EINVAL; 363 str += len + 1; 364 365 if (str < name_end) { 366 cache_op = parse_aliases(str, evsel__hw_cache_op, 367 PERF_COUNT_HW_CACHE_OP_MAX, &len); 368 if (cache_op >= 0) { 369 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 370 return -EINVAL; 371 str += len + 1; 372 } else { 373 cache_result = parse_aliases(str, evsel__hw_cache_result, 374 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 375 if (cache_result >= 0) 376 str += len + 1; 377 } 378 } 379 if (str < name_end) { 380 if (cache_op < 0) { 381 cache_op = parse_aliases(str, evsel__hw_cache_op, 382 PERF_COUNT_HW_CACHE_OP_MAX, &len); 383 if (cache_op >= 0) { 384 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 385 return -EINVAL; 386 } 387 } else if (cache_result < 0) { 388 cache_result = parse_aliases(str, evsel__hw_cache_result, 389 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 390 } 391 } 392 393 /* 394 * Fall back to reads: 395 */ 396 if (cache_op == -1) 397 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 398 399 /* 400 * Fall back to accesses: 401 */ 402 if (cache_result == -1) 403 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 404 405 *config = cache_type | (cache_op << 8) | (cache_result << 16); 406 if (perf_pmus__supports_extended_type()) 407 *config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT; 408 return 0; 409 } 410 411 /** 412 * parse_events__filter_pmu - returns false if a wildcard PMU should be 413 * considered, true if it should be filtered. 414 */ 415 bool parse_events__filter_pmu(const struct parse_events_state *parse_state, 416 const struct perf_pmu *pmu) 417 { 418 if (parse_state->pmu_filter == NULL) 419 return false; 420 421 return strcmp(parse_state->pmu_filter, pmu->name) != 0; 422 } 423 424 static int parse_events_add_pmu(struct parse_events_state *parse_state, 425 struct list_head *list, struct perf_pmu *pmu, 426 const struct parse_events_terms *const_parsed_terms, 427 bool auto_merge_stats, u64 alternate_hw_config); 428 429 int parse_events_add_cache(struct list_head *list, int *idx, const char *name, 430 struct parse_events_state *parse_state, 431 struct parse_events_terms *parsed_terms) 432 { 433 struct perf_pmu *pmu = NULL; 434 bool found_supported = false; 435 const char *config_name = get_config_name(parsed_terms); 436 const char *metric_id = get_config_metric_id(parsed_terms); 437 438 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 439 LIST_HEAD(config_terms); 440 struct perf_event_attr attr; 441 int ret; 442 443 if (parse_events__filter_pmu(parse_state, pmu)) 444 continue; 445 446 if (perf_pmu__have_event(pmu, name)) { 447 /* 448 * The PMU has the event so add as not a legacy cache 449 * event. 450 */ 451 ret = parse_events_add_pmu(parse_state, list, pmu, 452 parsed_terms, 453 perf_pmu__auto_merge_stats(pmu), 454 /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 455 if (ret) 456 return ret; 457 continue; 458 } 459 460 if (!pmu->is_core) { 461 /* Legacy cache events are only supported by core PMUs. */ 462 continue; 463 } 464 465 memset(&attr, 0, sizeof(attr)); 466 attr.type = PERF_TYPE_HW_CACHE; 467 468 ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config); 469 if (ret) 470 return ret; 471 472 found_supported = true; 473 474 if (parsed_terms) { 475 if (config_attr(&attr, parsed_terms, parse_state->error, 476 config_term_common)) 477 return -EINVAL; 478 479 if (get_config_terms(parsed_terms, &config_terms)) 480 return -ENOMEM; 481 } 482 483 if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name, 484 metric_id, pmu, &config_terms, /*auto_merge_stats=*/false, 485 /*cpu_list=*/NULL, 486 /*alternate_hw_config=*/PERF_COUNT_HW_MAX) == NULL) 487 return -ENOMEM; 488 489 free_config_terms(&config_terms); 490 } 491 return found_supported ? 0 : -EINVAL; 492 } 493 494 static void tracepoint_error(struct parse_events_error *e, int err, 495 const char *sys, const char *name, int column) 496 { 497 const char *str; 498 char help[BUFSIZ]; 499 500 if (!e) 501 return; 502 503 /* 504 * We get error directly from syscall errno ( > 0), 505 * or from encoded pointer's error ( < 0). 506 */ 507 err = abs(err); 508 509 switch (err) { 510 case EACCES: 511 str = "can't access trace events"; 512 break; 513 case ENOENT: 514 str = "unknown tracepoint"; 515 break; 516 default: 517 str = "failed to add tracepoint"; 518 break; 519 } 520 521 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name); 522 parse_events_error__handle(e, column, strdup(str), strdup(help)); 523 } 524 525 static int add_tracepoint(struct parse_events_state *parse_state, 526 struct list_head *list, 527 const char *sys_name, const char *evt_name, 528 struct parse_events_error *err, 529 struct parse_events_terms *head_config, void *loc_) 530 { 531 YYLTYPE *loc = loc_; 532 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++, 533 !parse_state->fake_tp); 534 535 if (IS_ERR(evsel)) { 536 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column); 537 return PTR_ERR(evsel); 538 } 539 540 if (head_config) { 541 LIST_HEAD(config_terms); 542 543 if (get_config_terms(head_config, &config_terms)) 544 return -ENOMEM; 545 list_splice(&config_terms, &evsel->config_terms); 546 } 547 548 list_add_tail(&evsel->core.node, list); 549 return 0; 550 } 551 552 static int add_tracepoint_multi_event(struct parse_events_state *parse_state, 553 struct list_head *list, 554 const char *sys_name, const char *evt_name, 555 struct parse_events_error *err, 556 struct parse_events_terms *head_config, YYLTYPE *loc) 557 { 558 char *evt_path; 559 struct io_dirent64 *evt_ent; 560 struct io_dir evt_dir; 561 int ret = 0, found = 0; 562 563 evt_path = get_events_file(sys_name); 564 if (!evt_path) { 565 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 566 return -1; 567 } 568 io_dir__init(&evt_dir, open(evt_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY)); 569 if (evt_dir.dirfd < 0) { 570 put_events_file(evt_path); 571 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 572 return -1; 573 } 574 575 while (!ret && (evt_ent = io_dir__readdir(&evt_dir))) { 576 if (!strcmp(evt_ent->d_name, ".") 577 || !strcmp(evt_ent->d_name, "..") 578 || !strcmp(evt_ent->d_name, "enable") 579 || !strcmp(evt_ent->d_name, "filter")) 580 continue; 581 582 if (!strglobmatch(evt_ent->d_name, evt_name)) 583 continue; 584 585 found++; 586 587 ret = add_tracepoint(parse_state, list, sys_name, evt_ent->d_name, 588 err, head_config, loc); 589 } 590 591 if (!found) { 592 tracepoint_error(err, ENOENT, sys_name, evt_name, loc->first_column); 593 ret = -1; 594 } 595 596 put_events_file(evt_path); 597 close(evt_dir.dirfd); 598 return ret; 599 } 600 601 static int add_tracepoint_event(struct parse_events_state *parse_state, 602 struct list_head *list, 603 const char *sys_name, const char *evt_name, 604 struct parse_events_error *err, 605 struct parse_events_terms *head_config, YYLTYPE *loc) 606 { 607 return strpbrk(evt_name, "*?") ? 608 add_tracepoint_multi_event(parse_state, list, sys_name, evt_name, 609 err, head_config, loc) : 610 add_tracepoint(parse_state, list, sys_name, evt_name, 611 err, head_config, loc); 612 } 613 614 static int add_tracepoint_multi_sys(struct parse_events_state *parse_state, 615 struct list_head *list, 616 const char *sys_name, const char *evt_name, 617 struct parse_events_error *err, 618 struct parse_events_terms *head_config, YYLTYPE *loc) 619 { 620 struct io_dirent64 *events_ent; 621 struct io_dir events_dir; 622 int ret = 0; 623 char *events_dir_path = get_tracing_file("events"); 624 625 if (!events_dir_path) { 626 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 627 return -1; 628 } 629 io_dir__init(&events_dir, open(events_dir_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY)); 630 put_events_file(events_dir_path); 631 if (events_dir.dirfd < 0) { 632 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 633 return -1; 634 } 635 636 while (!ret && (events_ent = io_dir__readdir(&events_dir))) { 637 if (!strcmp(events_ent->d_name, ".") 638 || !strcmp(events_ent->d_name, "..") 639 || !strcmp(events_ent->d_name, "enable") 640 || !strcmp(events_ent->d_name, "header_event") 641 || !strcmp(events_ent->d_name, "header_page")) 642 continue; 643 644 if (!strglobmatch(events_ent->d_name, sys_name)) 645 continue; 646 647 ret = add_tracepoint_event(parse_state, list, events_ent->d_name, 648 evt_name, err, head_config, loc); 649 } 650 close(events_dir.dirfd); 651 return ret; 652 } 653 654 size_t default_breakpoint_len(void) 655 { 656 #if defined(__i386__) 657 static int len; 658 659 if (len == 0) { 660 struct perf_env env = {}; 661 662 perf_env__init(&env); 663 len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long); 664 perf_env__exit(&env); 665 } 666 return len; 667 #elif defined(__aarch64__) 668 return 4; 669 #else 670 return sizeof(long); 671 #endif 672 } 673 674 static int 675 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 676 { 677 int i; 678 679 for (i = 0; i < 3; i++) { 680 if (!type || !type[i]) 681 break; 682 683 #define CHECK_SET_TYPE(bit) \ 684 do { \ 685 if (attr->bp_type & bit) \ 686 return -EINVAL; \ 687 else \ 688 attr->bp_type |= bit; \ 689 } while (0) 690 691 switch (type[i]) { 692 case 'r': 693 CHECK_SET_TYPE(HW_BREAKPOINT_R); 694 break; 695 case 'w': 696 CHECK_SET_TYPE(HW_BREAKPOINT_W); 697 break; 698 case 'x': 699 CHECK_SET_TYPE(HW_BREAKPOINT_X); 700 break; 701 default: 702 return -EINVAL; 703 } 704 } 705 706 #undef CHECK_SET_TYPE 707 708 if (!attr->bp_type) /* Default */ 709 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 710 711 return 0; 712 } 713 714 int parse_events_add_breakpoint(struct parse_events_state *parse_state, 715 struct list_head *list, 716 u64 addr, char *type, u64 len, 717 struct parse_events_terms *head_config) 718 { 719 struct perf_event_attr attr; 720 LIST_HEAD(config_terms); 721 const char *name; 722 723 memset(&attr, 0, sizeof(attr)); 724 attr.bp_addr = addr; 725 726 if (parse_breakpoint_type(type, &attr)) 727 return -EINVAL; 728 729 /* Provide some defaults if len is not specified */ 730 if (!len) { 731 if (attr.bp_type == HW_BREAKPOINT_X) 732 len = default_breakpoint_len(); 733 else 734 len = HW_BREAKPOINT_LEN_4; 735 } 736 737 attr.bp_len = len; 738 739 attr.type = PERF_TYPE_BREAKPOINT; 740 attr.sample_period = 1; 741 742 if (head_config) { 743 if (config_attr(&attr, head_config, parse_state->error, 744 config_term_common)) 745 return -EINVAL; 746 747 if (get_config_terms(head_config, &config_terms)) 748 return -ENOMEM; 749 } 750 751 name = get_config_name(head_config); 752 753 return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL, 754 &config_terms, /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 755 } 756 757 static int check_type_val(struct parse_events_term *term, 758 struct parse_events_error *err, 759 enum parse_events__term_val_type type) 760 { 761 if (type == term->type_val) 762 return 0; 763 764 if (err) { 765 parse_events_error__handle(err, term->err_val, 766 type == PARSE_EVENTS__TERM_TYPE_NUM 767 ? strdup("expected numeric value") 768 : strdup("expected string value"), 769 NULL); 770 } 771 return -EINVAL; 772 } 773 774 static bool config_term_shrinked; 775 776 const char *parse_events__term_type_str(enum parse_events__term_type term_type) 777 { 778 /* 779 * Update according to parse-events.l 780 */ 781 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { 782 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>", 783 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config", 784 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1", 785 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2", 786 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3", 787 [PARSE_EVENTS__TERM_TYPE_NAME] = "name", 788 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period", 789 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq", 790 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type", 791 [PARSE_EVENTS__TERM_TYPE_TIME] = "time", 792 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph", 793 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size", 794 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", 795 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", 796 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack", 797 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr", 798 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite", 799 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite", 800 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config", 801 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore", 802 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output", 803 [PARSE_EVENTS__TERM_TYPE_AUX_ACTION] = "aux-action", 804 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size", 805 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id", 806 [PARSE_EVENTS__TERM_TYPE_RAW] = "raw", 807 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache", 808 [PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware", 809 }; 810 if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR) 811 return "unknown term"; 812 813 return config_term_names[term_type]; 814 } 815 816 static bool 817 config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err) 818 { 819 char *err_str; 820 821 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) { 822 parse_events_error__handle(err, -1, 823 strdup("Invalid term_type"), NULL); 824 return false; 825 } 826 if (!config_term_shrinked) 827 return true; 828 829 switch (term_type) { 830 case PARSE_EVENTS__TERM_TYPE_CONFIG: 831 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 832 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 833 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 834 case PARSE_EVENTS__TERM_TYPE_NAME: 835 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 836 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 837 case PARSE_EVENTS__TERM_TYPE_PERCORE: 838 return true; 839 case PARSE_EVENTS__TERM_TYPE_USER: 840 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 841 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 842 case PARSE_EVENTS__TERM_TYPE_TIME: 843 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 844 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 845 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 846 case PARSE_EVENTS__TERM_TYPE_INHERIT: 847 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 848 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 849 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 850 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 851 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 852 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 853 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 854 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 855 case PARSE_EVENTS__TERM_TYPE_RAW: 856 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 857 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 858 default: 859 if (!err) 860 return false; 861 862 /* term_type is validated so indexing is safe */ 863 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'", 864 parse_events__term_type_str(term_type)) >= 0) 865 parse_events_error__handle(err, -1, err_str, NULL); 866 return false; 867 } 868 } 869 870 void parse_events__shrink_config_terms(void) 871 { 872 config_term_shrinked = true; 873 } 874 875 static int config_term_common(struct perf_event_attr *attr, 876 struct parse_events_term *term, 877 struct parse_events_error *err) 878 { 879 #define CHECK_TYPE_VAL(type) \ 880 do { \ 881 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \ 882 return -EINVAL; \ 883 } while (0) 884 885 switch (term->type_term) { 886 case PARSE_EVENTS__TERM_TYPE_CONFIG: 887 CHECK_TYPE_VAL(NUM); 888 attr->config = term->val.num; 889 break; 890 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 891 CHECK_TYPE_VAL(NUM); 892 attr->config1 = term->val.num; 893 break; 894 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 895 CHECK_TYPE_VAL(NUM); 896 attr->config2 = term->val.num; 897 break; 898 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 899 CHECK_TYPE_VAL(NUM); 900 attr->config3 = term->val.num; 901 break; 902 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 903 CHECK_TYPE_VAL(NUM); 904 break; 905 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 906 CHECK_TYPE_VAL(NUM); 907 break; 908 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 909 CHECK_TYPE_VAL(STR); 910 if (strcmp(term->val.str, "no") && 911 parse_branch_str(term->val.str, 912 &attr->branch_sample_type)) { 913 parse_events_error__handle(err, term->err_val, 914 strdup("invalid branch sample type"), 915 NULL); 916 return -EINVAL; 917 } 918 break; 919 case PARSE_EVENTS__TERM_TYPE_TIME: 920 CHECK_TYPE_VAL(NUM); 921 if (term->val.num > 1) { 922 parse_events_error__handle(err, term->err_val, 923 strdup("expected 0 or 1"), 924 NULL); 925 return -EINVAL; 926 } 927 break; 928 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 929 CHECK_TYPE_VAL(STR); 930 break; 931 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 932 CHECK_TYPE_VAL(NUM); 933 break; 934 case PARSE_EVENTS__TERM_TYPE_INHERIT: 935 CHECK_TYPE_VAL(NUM); 936 break; 937 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 938 CHECK_TYPE_VAL(NUM); 939 break; 940 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 941 CHECK_TYPE_VAL(NUM); 942 break; 943 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 944 CHECK_TYPE_VAL(NUM); 945 break; 946 case PARSE_EVENTS__TERM_TYPE_NAME: 947 CHECK_TYPE_VAL(STR); 948 break; 949 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 950 CHECK_TYPE_VAL(STR); 951 break; 952 case PARSE_EVENTS__TERM_TYPE_RAW: 953 CHECK_TYPE_VAL(STR); 954 break; 955 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 956 CHECK_TYPE_VAL(NUM); 957 break; 958 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 959 CHECK_TYPE_VAL(NUM); 960 break; 961 case PARSE_EVENTS__TERM_TYPE_PERCORE: 962 CHECK_TYPE_VAL(NUM); 963 if ((unsigned int)term->val.num > 1) { 964 parse_events_error__handle(err, term->err_val, 965 strdup("expected 0 or 1"), 966 NULL); 967 return -EINVAL; 968 } 969 break; 970 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 971 CHECK_TYPE_VAL(NUM); 972 break; 973 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 974 CHECK_TYPE_VAL(STR); 975 break; 976 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 977 CHECK_TYPE_VAL(NUM); 978 if (term->val.num > UINT_MAX) { 979 parse_events_error__handle(err, term->err_val, 980 strdup("too big"), 981 NULL); 982 return -EINVAL; 983 } 984 break; 985 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 986 case PARSE_EVENTS__TERM_TYPE_USER: 987 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 988 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 989 default: 990 parse_events_error__handle(err, term->err_term, 991 strdup(parse_events__term_type_str(term->type_term)), 992 parse_events_formats_error_string(NULL)); 993 return -EINVAL; 994 } 995 996 /* 997 * Check term availability after basic checking so 998 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered. 999 * 1000 * If check availability at the entry of this function, 1001 * user will see "'<sysfs term>' is not usable in 'perf stat'" 1002 * if an invalid config term is provided for legacy events 1003 * (for example, instructions/badterm/...), which is confusing. 1004 */ 1005 if (!config_term_avail(term->type_term, err)) 1006 return -EINVAL; 1007 return 0; 1008 #undef CHECK_TYPE_VAL 1009 } 1010 1011 static int config_term_pmu(struct perf_event_attr *attr, 1012 struct parse_events_term *term, 1013 struct parse_events_error *err) 1014 { 1015 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) { 1016 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); 1017 1018 if (!pmu) { 1019 char *err_str; 1020 1021 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0) 1022 parse_events_error__handle(err, term->err_term, 1023 err_str, /*help=*/NULL); 1024 return -EINVAL; 1025 } 1026 /* 1027 * Rewrite the PMU event to a legacy cache one unless the PMU 1028 * doesn't support legacy cache events or the event is present 1029 * within the PMU. 1030 */ 1031 if (perf_pmu__supports_legacy_cache(pmu) && 1032 !perf_pmu__have_event(pmu, term->config)) { 1033 attr->type = PERF_TYPE_HW_CACHE; 1034 return parse_events__decode_legacy_cache(term->config, pmu->type, 1035 &attr->config); 1036 } else { 1037 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 1038 term->no_value = true; 1039 } 1040 } 1041 if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) { 1042 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); 1043 1044 if (!pmu) { 1045 char *err_str; 1046 1047 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0) 1048 parse_events_error__handle(err, term->err_term, 1049 err_str, /*help=*/NULL); 1050 return -EINVAL; 1051 } 1052 /* 1053 * If the PMU has a sysfs or json event prefer it over 1054 * legacy. ARM requires this. 1055 */ 1056 if (perf_pmu__have_event(pmu, term->config)) { 1057 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 1058 term->no_value = true; 1059 term->alternate_hw_config = true; 1060 } else { 1061 attr->type = PERF_TYPE_HARDWARE; 1062 attr->config = term->val.num; 1063 if (perf_pmus__supports_extended_type()) 1064 attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT; 1065 } 1066 return 0; 1067 } 1068 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || 1069 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) { 1070 /* 1071 * Always succeed for sysfs terms, as we dont know 1072 * at this point what type they need to have. 1073 */ 1074 return 0; 1075 } 1076 return config_term_common(attr, term, err); 1077 } 1078 1079 static int config_term_tracepoint(struct perf_event_attr *attr, 1080 struct parse_events_term *term, 1081 struct parse_events_error *err) 1082 { 1083 switch (term->type_term) { 1084 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1085 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1086 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1087 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1088 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1089 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1090 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1091 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1092 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1093 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1094 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1095 return config_term_common(attr, term, err); 1096 case PARSE_EVENTS__TERM_TYPE_USER: 1097 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1098 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1099 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1100 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1101 case PARSE_EVENTS__TERM_TYPE_NAME: 1102 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1103 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1104 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1105 case PARSE_EVENTS__TERM_TYPE_TIME: 1106 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1107 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1108 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1109 case PARSE_EVENTS__TERM_TYPE_RAW: 1110 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1111 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1112 default: 1113 if (err) { 1114 parse_events_error__handle(err, term->err_term, 1115 strdup(parse_events__term_type_str(term->type_term)), 1116 strdup("valid terms: call-graph,stack-size\n") 1117 ); 1118 } 1119 return -EINVAL; 1120 } 1121 1122 return 0; 1123 } 1124 1125 static int config_attr(struct perf_event_attr *attr, 1126 const struct parse_events_terms *head, 1127 struct parse_events_error *err, 1128 config_term_func_t config_term) 1129 { 1130 struct parse_events_term *term; 1131 1132 list_for_each_entry(term, &head->terms, list) 1133 if (config_term(attr, term, err)) 1134 return -EINVAL; 1135 1136 return 0; 1137 } 1138 1139 static int get_config_terms(const struct parse_events_terms *head_config, 1140 struct list_head *head_terms) 1141 { 1142 #define ADD_CONFIG_TERM(__type, __weak) \ 1143 struct evsel_config_term *__t; \ 1144 \ 1145 __t = zalloc(sizeof(*__t)); \ 1146 if (!__t) \ 1147 return -ENOMEM; \ 1148 \ 1149 INIT_LIST_HEAD(&__t->list); \ 1150 __t->type = EVSEL__CONFIG_TERM_ ## __type; \ 1151 __t->weak = __weak; \ 1152 list_add_tail(&__t->list, head_terms) 1153 1154 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \ 1155 do { \ 1156 ADD_CONFIG_TERM(__type, __weak); \ 1157 __t->val.__name = __val; \ 1158 } while (0) 1159 1160 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \ 1161 do { \ 1162 ADD_CONFIG_TERM(__type, __weak); \ 1163 __t->val.str = strdup(__val); \ 1164 if (!__t->val.str) { \ 1165 zfree(&__t); \ 1166 return -ENOMEM; \ 1167 } \ 1168 __t->free_str = true; \ 1169 } while (0) 1170 1171 struct parse_events_term *term; 1172 1173 list_for_each_entry(term, &head_config->terms, list) { 1174 switch (term->type_term) { 1175 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1176 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak); 1177 break; 1178 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1179 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak); 1180 break; 1181 case PARSE_EVENTS__TERM_TYPE_TIME: 1182 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak); 1183 break; 1184 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1185 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak); 1186 break; 1187 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1188 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak); 1189 break; 1190 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1191 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user, 1192 term->val.num, term->weak); 1193 break; 1194 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1195 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1196 term->val.num ? 1 : 0, term->weak); 1197 break; 1198 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1199 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1200 term->val.num ? 0 : 1, term->weak); 1201 break; 1202 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1203 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack, 1204 term->val.num, term->weak); 1205 break; 1206 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1207 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events, 1208 term->val.num, term->weak); 1209 break; 1210 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1211 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1212 term->val.num ? 1 : 0, term->weak); 1213 break; 1214 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1215 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1216 term->val.num ? 0 : 1, term->weak); 1217 break; 1218 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1219 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak); 1220 break; 1221 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1222 ADD_CONFIG_TERM_VAL(PERCORE, percore, 1223 term->val.num ? true : false, term->weak); 1224 break; 1225 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1226 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output, 1227 term->val.num ? 1 : 0, term->weak); 1228 break; 1229 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1230 ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak); 1231 break; 1232 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1233 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size, 1234 term->val.num, term->weak); 1235 break; 1236 case PARSE_EVENTS__TERM_TYPE_USER: 1237 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1238 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1239 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1240 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1241 case PARSE_EVENTS__TERM_TYPE_NAME: 1242 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1243 case PARSE_EVENTS__TERM_TYPE_RAW: 1244 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1245 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1246 default: 1247 break; 1248 } 1249 } 1250 return 0; 1251 } 1252 1253 /* 1254 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for 1255 * each bit of attr->config that the user has changed. 1256 */ 1257 static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config, 1258 struct list_head *head_terms) 1259 { 1260 struct parse_events_term *term; 1261 u64 bits = 0; 1262 int type; 1263 1264 list_for_each_entry(term, &head_config->terms, list) { 1265 switch (term->type_term) { 1266 case PARSE_EVENTS__TERM_TYPE_USER: 1267 type = perf_pmu__format_type(pmu, term->config); 1268 if (type != PERF_PMU_FORMAT_VALUE_CONFIG) 1269 continue; 1270 bits |= perf_pmu__format_bits(pmu, term->config); 1271 break; 1272 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1273 bits = ~(u64)0; 1274 break; 1275 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1276 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1277 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1278 case PARSE_EVENTS__TERM_TYPE_NAME: 1279 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1280 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1281 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1282 case PARSE_EVENTS__TERM_TYPE_TIME: 1283 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1284 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1285 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1286 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1287 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1288 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1289 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1290 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1291 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1292 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1293 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1294 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1295 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1296 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1297 case PARSE_EVENTS__TERM_TYPE_RAW: 1298 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1299 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1300 default: 1301 break; 1302 } 1303 } 1304 1305 if (bits) 1306 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false); 1307 1308 #undef ADD_CONFIG_TERM 1309 return 0; 1310 } 1311 1312 int parse_events_add_tracepoint(struct parse_events_state *parse_state, 1313 struct list_head *list, 1314 const char *sys, const char *event, 1315 struct parse_events_error *err, 1316 struct parse_events_terms *head_config, void *loc_) 1317 { 1318 YYLTYPE *loc = loc_; 1319 1320 if (head_config) { 1321 struct perf_event_attr attr; 1322 1323 if (config_attr(&attr, head_config, err, 1324 config_term_tracepoint)) 1325 return -EINVAL; 1326 } 1327 1328 if (strpbrk(sys, "*?")) 1329 return add_tracepoint_multi_sys(parse_state, list, sys, event, 1330 err, head_config, loc); 1331 else 1332 return add_tracepoint_event(parse_state, list, sys, event, 1333 err, head_config, loc); 1334 } 1335 1336 static int __parse_events_add_numeric(struct parse_events_state *parse_state, 1337 struct list_head *list, 1338 struct perf_pmu *pmu, u32 type, u32 extended_type, 1339 u64 config, const struct parse_events_terms *head_config) 1340 { 1341 struct perf_event_attr attr; 1342 LIST_HEAD(config_terms); 1343 const char *name, *metric_id; 1344 int ret; 1345 1346 memset(&attr, 0, sizeof(attr)); 1347 attr.type = type; 1348 attr.config = config; 1349 if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) { 1350 assert(perf_pmus__supports_extended_type()); 1351 attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT; 1352 } 1353 1354 if (head_config) { 1355 if (config_attr(&attr, head_config, parse_state->error, 1356 config_term_common)) 1357 return -EINVAL; 1358 1359 if (get_config_terms(head_config, &config_terms)) 1360 return -ENOMEM; 1361 } 1362 1363 name = get_config_name(head_config); 1364 metric_id = get_config_metric_id(head_config); 1365 ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name, 1366 metric_id, pmu, &config_terms, /*auto_merge_stats=*/false, 1367 /*cpu_list=*/NULL, /*alternate_hw_config=*/PERF_COUNT_HW_MAX 1368 ) == NULL ? -ENOMEM : 0; 1369 free_config_terms(&config_terms); 1370 return ret; 1371 } 1372 1373 int parse_events_add_numeric(struct parse_events_state *parse_state, 1374 struct list_head *list, 1375 u32 type, u64 config, 1376 const struct parse_events_terms *head_config, 1377 bool wildcard) 1378 { 1379 struct perf_pmu *pmu = NULL; 1380 bool found_supported = false; 1381 1382 /* Wildcards on numeric values are only supported by core PMUs. */ 1383 if (wildcard && perf_pmus__supports_extended_type()) { 1384 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 1385 int ret; 1386 1387 found_supported = true; 1388 if (parse_events__filter_pmu(parse_state, pmu)) 1389 continue; 1390 1391 ret = __parse_events_add_numeric(parse_state, list, pmu, 1392 type, pmu->type, 1393 config, head_config); 1394 if (ret) 1395 return ret; 1396 } 1397 if (found_supported) 1398 return 0; 1399 } 1400 return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type), 1401 type, /*extended_type=*/0, config, head_config); 1402 } 1403 1404 static bool config_term_percore(struct list_head *config_terms) 1405 { 1406 struct evsel_config_term *term; 1407 1408 list_for_each_entry(term, config_terms, list) { 1409 if (term->type == EVSEL__CONFIG_TERM_PERCORE) 1410 return term->val.percore; 1411 } 1412 1413 return false; 1414 } 1415 1416 static int parse_events_add_pmu(struct parse_events_state *parse_state, 1417 struct list_head *list, struct perf_pmu *pmu, 1418 const struct parse_events_terms *const_parsed_terms, 1419 bool auto_merge_stats, u64 alternate_hw_config) 1420 { 1421 struct perf_event_attr attr; 1422 struct perf_pmu_info info; 1423 struct evsel *evsel; 1424 struct parse_events_error *err = parse_state->error; 1425 LIST_HEAD(config_terms); 1426 struct parse_events_terms parsed_terms; 1427 bool alias_rewrote_terms = false; 1428 1429 if (verbose > 1) { 1430 struct strbuf sb; 1431 1432 strbuf_init(&sb, /*hint=*/ 0); 1433 if (pmu->selectable && const_parsed_terms && 1434 list_empty(&const_parsed_terms->terms)) { 1435 strbuf_addf(&sb, "%s//", pmu->name); 1436 } else { 1437 strbuf_addf(&sb, "%s/", pmu->name); 1438 parse_events_terms__to_strbuf(const_parsed_terms, &sb); 1439 strbuf_addch(&sb, '/'); 1440 } 1441 fprintf(stderr, "Attempt to add: %s\n", sb.buf); 1442 strbuf_release(&sb); 1443 } 1444 1445 memset(&attr, 0, sizeof(attr)); 1446 if (pmu->perf_event_attr_init_default) 1447 pmu->perf_event_attr_init_default(pmu, &attr); 1448 1449 attr.type = pmu->type; 1450 1451 if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) { 1452 evsel = __add_event(list, &parse_state->idx, &attr, 1453 /*init_attr=*/true, /*name=*/NULL, 1454 /*metric_id=*/NULL, pmu, 1455 /*config_terms=*/NULL, auto_merge_stats, 1456 /*cpu_list=*/NULL, alternate_hw_config); 1457 return evsel ? 0 : -ENOMEM; 1458 } 1459 1460 parse_events_terms__init(&parsed_terms); 1461 if (const_parsed_terms) { 1462 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1463 1464 if (ret) 1465 return ret; 1466 } 1467 fix_raw(&parsed_terms, pmu); 1468 1469 /* Configure attr/terms with a known PMU, this will set hardcoded terms. */ 1470 if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) { 1471 parse_events_terms__exit(&parsed_terms); 1472 return -EINVAL; 1473 } 1474 1475 /* Look for event names in the terms and rewrite into format based terms. */ 1476 if (perf_pmu__check_alias(pmu, &parsed_terms, 1477 &info, &alias_rewrote_terms, 1478 &alternate_hw_config, err)) { 1479 parse_events_terms__exit(&parsed_terms); 1480 return -EINVAL; 1481 } 1482 1483 if (verbose > 1) { 1484 struct strbuf sb; 1485 1486 strbuf_init(&sb, /*hint=*/ 0); 1487 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1488 fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf); 1489 strbuf_release(&sb); 1490 } 1491 1492 /* Configure attr/terms again if an alias was expanded. */ 1493 if (alias_rewrote_terms && 1494 config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) { 1495 parse_events_terms__exit(&parsed_terms); 1496 return -EINVAL; 1497 } 1498 1499 if (get_config_terms(&parsed_terms, &config_terms)) { 1500 parse_events_terms__exit(&parsed_terms); 1501 return -ENOMEM; 1502 } 1503 1504 /* 1505 * When using default config, record which bits of attr->config were 1506 * changed by the user. 1507 */ 1508 if (pmu->perf_event_attr_init_default && 1509 get_config_chgs(pmu, &parsed_terms, &config_terms)) { 1510 parse_events_terms__exit(&parsed_terms); 1511 return -ENOMEM; 1512 } 1513 1514 /* Skip configuring hard coded terms that were applied by config_attr. */ 1515 if (perf_pmu__config(pmu, &attr, &parsed_terms, /*apply_hardcoded=*/false, 1516 parse_state->error)) { 1517 free_config_terms(&config_terms); 1518 parse_events_terms__exit(&parsed_terms); 1519 return -EINVAL; 1520 } 1521 1522 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true, 1523 get_config_name(&parsed_terms), 1524 get_config_metric_id(&parsed_terms), pmu, 1525 &config_terms, auto_merge_stats, /*cpu_list=*/NULL, 1526 alternate_hw_config); 1527 if (!evsel) { 1528 parse_events_terms__exit(&parsed_terms); 1529 return -ENOMEM; 1530 } 1531 1532 if (evsel->name) 1533 evsel->use_config_name = true; 1534 1535 evsel->percore = config_term_percore(&evsel->config_terms); 1536 1537 parse_events_terms__exit(&parsed_terms); 1538 free((char *)evsel->unit); 1539 evsel->unit = strdup(info.unit); 1540 evsel->scale = info.scale; 1541 evsel->per_pkg = info.per_pkg; 1542 evsel->snapshot = info.snapshot; 1543 evsel->retirement_latency.mean = info.retirement_latency_mean; 1544 evsel->retirement_latency.min = info.retirement_latency_min; 1545 evsel->retirement_latency.max = info.retirement_latency_max; 1546 1547 return 0; 1548 } 1549 1550 int parse_events_multi_pmu_add(struct parse_events_state *parse_state, 1551 const char *event_name, u64 hw_config, 1552 const struct parse_events_terms *const_parsed_terms, 1553 struct list_head **listp, void *loc_) 1554 { 1555 struct parse_events_term *term; 1556 struct list_head *list = NULL; 1557 struct perf_pmu *pmu = NULL; 1558 YYLTYPE *loc = loc_; 1559 int ok = 0; 1560 const char *config; 1561 struct parse_events_terms parsed_terms; 1562 1563 *listp = NULL; 1564 1565 parse_events_terms__init(&parsed_terms); 1566 if (const_parsed_terms) { 1567 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1568 1569 if (ret) 1570 return ret; 1571 } 1572 1573 config = strdup(event_name); 1574 if (!config) 1575 goto out_err; 1576 1577 if (parse_events_term__num(&term, 1578 PARSE_EVENTS__TERM_TYPE_USER, 1579 config, /*num=*/1, /*novalue=*/true, 1580 loc, /*loc_val=*/NULL) < 0) { 1581 zfree(&config); 1582 goto out_err; 1583 } 1584 list_add_tail(&term->list, &parsed_terms.terms); 1585 1586 /* Add it for all PMUs that support the alias */ 1587 list = malloc(sizeof(struct list_head)); 1588 if (!list) 1589 goto out_err; 1590 1591 INIT_LIST_HEAD(list); 1592 1593 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 1594 bool auto_merge_stats; 1595 1596 if (parse_events__filter_pmu(parse_state, pmu)) 1597 continue; 1598 1599 if (!perf_pmu__have_event(pmu, event_name)) 1600 continue; 1601 1602 auto_merge_stats = perf_pmu__auto_merge_stats(pmu); 1603 if (!parse_events_add_pmu(parse_state, list, pmu, 1604 &parsed_terms, auto_merge_stats, hw_config)) { 1605 struct strbuf sb; 1606 1607 strbuf_init(&sb, /*hint=*/ 0); 1608 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1609 pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf); 1610 strbuf_release(&sb); 1611 ok++; 1612 } 1613 } 1614 1615 if (parse_state->fake_pmu) { 1616 if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms, 1617 /*auto_merge_stats=*/true, hw_config)) { 1618 struct strbuf sb; 1619 1620 strbuf_init(&sb, /*hint=*/ 0); 1621 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1622 pr_debug("%s -> fake/%s/\n", event_name, sb.buf); 1623 strbuf_release(&sb); 1624 ok++; 1625 } 1626 } 1627 1628 out_err: 1629 parse_events_terms__exit(&parsed_terms); 1630 if (ok) 1631 *listp = list; 1632 else 1633 free(list); 1634 1635 return ok ? 0 : -1; 1636 } 1637 1638 int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state, 1639 const char *event_or_pmu, 1640 const struct parse_events_terms *const_parsed_terms, 1641 struct list_head **listp, 1642 void *loc_) 1643 { 1644 YYLTYPE *loc = loc_; 1645 struct perf_pmu *pmu; 1646 int ok = 0; 1647 char *help; 1648 1649 *listp = malloc(sizeof(**listp)); 1650 if (!*listp) 1651 return -ENOMEM; 1652 1653 INIT_LIST_HEAD(*listp); 1654 1655 /* Attempt to add to list assuming event_or_pmu is a PMU name. */ 1656 pmu = perf_pmus__find(event_or_pmu); 1657 if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms, 1658 /*auto_merge_stats=*/false, 1659 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) 1660 return 0; 1661 1662 if (parse_state->fake_pmu) { 1663 if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(), 1664 const_parsed_terms, 1665 /*auto_merge_stats=*/false, 1666 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) 1667 return 0; 1668 } 1669 1670 pmu = NULL; 1671 /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */ 1672 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 1673 if (!parse_events__filter_pmu(parse_state, pmu) && 1674 perf_pmu__wildcard_match(pmu, event_or_pmu)) { 1675 bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu); 1676 1677 if (!parse_events_add_pmu(parse_state, *listp, pmu, 1678 const_parsed_terms, 1679 auto_merge_stats, 1680 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) { 1681 ok++; 1682 parse_state->wild_card_pmus = true; 1683 } 1684 } 1685 } 1686 if (ok) 1687 return 0; 1688 1689 /* Failure to add, assume event_or_pmu is an event name. */ 1690 zfree(listp); 1691 if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, PERF_COUNT_HW_MAX, 1692 const_parsed_terms, listp, loc)) 1693 return 0; 1694 1695 if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0) 1696 help = NULL; 1697 parse_events_error__handle(parse_state->error, loc->first_column, 1698 strdup("Bad event or PMU"), 1699 help); 1700 zfree(listp); 1701 return -EINVAL; 1702 } 1703 1704 void parse_events__set_leader(char *name, struct list_head *list) 1705 { 1706 struct evsel *leader; 1707 1708 if (list_empty(list)) { 1709 WARN_ONCE(true, "WARNING: failed to set leader: empty list"); 1710 return; 1711 } 1712 1713 leader = list_first_entry(list, struct evsel, core.node); 1714 __perf_evlist__set_leader(list, &leader->core); 1715 zfree(&leader->group_name); 1716 leader->group_name = name; 1717 } 1718 1719 static int parse_events__modifier_list(struct parse_events_state *parse_state, 1720 YYLTYPE *loc, 1721 struct list_head *list, 1722 struct parse_events_modifier mod, 1723 bool group) 1724 { 1725 struct evsel *evsel; 1726 1727 if (!group && mod.weak) { 1728 parse_events_error__handle(parse_state->error, loc->first_column, 1729 strdup("Weak modifier is for use with groups"), NULL); 1730 return -EINVAL; 1731 } 1732 1733 __evlist__for_each_entry(list, evsel) { 1734 /* Translate modifiers into the equivalent evsel excludes. */ 1735 int eu = group ? evsel->core.attr.exclude_user : 0; 1736 int ek = group ? evsel->core.attr.exclude_kernel : 0; 1737 int eh = group ? evsel->core.attr.exclude_hv : 0; 1738 int eH = group ? evsel->core.attr.exclude_host : 0; 1739 int eG = group ? evsel->core.attr.exclude_guest : 0; 1740 int exclude = eu | ek | eh; 1741 int exclude_GH = group ? evsel->exclude_GH : 0; 1742 1743 if (mod.user) { 1744 if (!exclude) 1745 exclude = eu = ek = eh = 1; 1746 if (!exclude_GH && !perf_guest && exclude_GH_default) 1747 eG = 1; 1748 eu = 0; 1749 } 1750 if (mod.kernel) { 1751 if (!exclude) 1752 exclude = eu = ek = eh = 1; 1753 ek = 0; 1754 } 1755 if (mod.hypervisor) { 1756 if (!exclude) 1757 exclude = eu = ek = eh = 1; 1758 eh = 0; 1759 } 1760 if (mod.guest) { 1761 if (!exclude_GH) 1762 exclude_GH = eG = eH = 1; 1763 eG = 0; 1764 } 1765 if (mod.host) { 1766 if (!exclude_GH) 1767 exclude_GH = eG = eH = 1; 1768 eH = 0; 1769 } 1770 evsel->core.attr.exclude_user = eu; 1771 evsel->core.attr.exclude_kernel = ek; 1772 evsel->core.attr.exclude_hv = eh; 1773 evsel->core.attr.exclude_host = eH; 1774 evsel->core.attr.exclude_guest = eG; 1775 evsel->exclude_GH = exclude_GH; 1776 1777 /* Simple modifiers copied to the evsel. */ 1778 if (mod.precise) { 1779 u8 precise = evsel->core.attr.precise_ip + mod.precise; 1780 /* 1781 * precise ip: 1782 * 1783 * 0 - SAMPLE_IP can have arbitrary skid 1784 * 1 - SAMPLE_IP must have constant skid 1785 * 2 - SAMPLE_IP requested to have 0 skid 1786 * 3 - SAMPLE_IP must have 0 skid 1787 * 1788 * See also PERF_RECORD_MISC_EXACT_IP 1789 */ 1790 if (precise > 3) { 1791 char *help; 1792 1793 if (asprintf(&help, 1794 "Maximum combined precise value is 3, adding precision to \"%s\"", 1795 evsel__name(evsel)) > 0) { 1796 parse_events_error__handle(parse_state->error, 1797 loc->first_column, 1798 help, NULL); 1799 } 1800 return -EINVAL; 1801 } 1802 evsel->core.attr.precise_ip = precise; 1803 } 1804 if (mod.precise_max) 1805 evsel->precise_max = 1; 1806 if (mod.non_idle) 1807 evsel->core.attr.exclude_idle = 1; 1808 if (mod.sample_read) 1809 evsel->sample_read = 1; 1810 if (mod.pinned && evsel__is_group_leader(evsel)) 1811 evsel->core.attr.pinned = 1; 1812 if (mod.exclusive && evsel__is_group_leader(evsel)) 1813 evsel->core.attr.exclusive = 1; 1814 if (mod.weak) 1815 evsel->weak_group = true; 1816 if (mod.bpf) 1817 evsel->bpf_counter = true; 1818 if (mod.retire_lat) 1819 evsel->retire_lat = true; 1820 } 1821 return 0; 1822 } 1823 1824 int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc, 1825 struct list_head *list, 1826 struct parse_events_modifier mod) 1827 { 1828 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true); 1829 } 1830 1831 int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc, 1832 struct list_head *list, 1833 struct parse_events_modifier mod) 1834 { 1835 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false); 1836 } 1837 1838 int parse_events__set_default_name(struct list_head *list, char *name) 1839 { 1840 struct evsel *evsel; 1841 bool used_name = false; 1842 1843 __evlist__for_each_entry(list, evsel) { 1844 if (!evsel->name) { 1845 evsel->name = used_name ? strdup(name) : name; 1846 used_name = true; 1847 if (!evsel->name) 1848 return -ENOMEM; 1849 } 1850 } 1851 if (!used_name) 1852 free(name); 1853 return 0; 1854 } 1855 1856 static int parse_events__scanner(const char *str, 1857 FILE *input, 1858 struct parse_events_state *parse_state) 1859 { 1860 YY_BUFFER_STATE buffer; 1861 void *scanner; 1862 int ret; 1863 1864 ret = parse_events_lex_init_extra(parse_state, &scanner); 1865 if (ret) 1866 return ret; 1867 1868 if (str) 1869 buffer = parse_events__scan_string(str, scanner); 1870 else 1871 parse_events_set_in(input, scanner); 1872 1873 #ifdef PARSER_DEBUG 1874 parse_events_debug = 1; 1875 parse_events_set_debug(1, scanner); 1876 #endif 1877 ret = parse_events_parse(parse_state, scanner); 1878 1879 if (str) { 1880 parse_events__flush_buffer(buffer, scanner); 1881 parse_events__delete_buffer(buffer, scanner); 1882 } 1883 parse_events_lex_destroy(scanner); 1884 return ret; 1885 } 1886 1887 /* 1888 * parse event config string, return a list of event terms. 1889 */ 1890 int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input) 1891 { 1892 struct parse_events_state parse_state = { 1893 .terms = NULL, 1894 .stoken = PE_START_TERMS, 1895 }; 1896 int ret; 1897 1898 ret = parse_events__scanner(str, input, &parse_state); 1899 if (!ret) 1900 list_splice(&parse_state.terms->terms, &terms->terms); 1901 1902 zfree(&parse_state.terms); 1903 return ret; 1904 } 1905 1906 static int evsel__compute_group_pmu_name(struct evsel *evsel, 1907 const struct list_head *head) 1908 { 1909 struct evsel *leader = evsel__leader(evsel); 1910 struct evsel *pos; 1911 const char *group_pmu_name; 1912 struct perf_pmu *pmu = evsel__find_pmu(evsel); 1913 1914 if (!pmu) { 1915 /* 1916 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU 1917 * is a core PMU, but in heterogeneous systems this is 1918 * unknown. For now pick the first core PMU. 1919 */ 1920 pmu = perf_pmus__scan_core(NULL); 1921 } 1922 if (!pmu) { 1923 pr_debug("No PMU found for '%s'\n", evsel__name(evsel)); 1924 return -EINVAL; 1925 } 1926 group_pmu_name = pmu->name; 1927 /* 1928 * Software events may be in a group with other uncore PMU events. Use 1929 * the pmu_name of the first non-software event to avoid breaking the 1930 * software event out of the group. 1931 * 1932 * Aux event leaders, like intel_pt, expect a group with events from 1933 * other PMUs, so substitute the AUX event's PMU in this case. 1934 */ 1935 if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) { 1936 struct perf_pmu *leader_pmu = evsel__find_pmu(leader); 1937 1938 if (!leader_pmu) { 1939 /* As with determining pmu above. */ 1940 leader_pmu = perf_pmus__scan_core(NULL); 1941 } 1942 /* 1943 * Starting with the leader, find the first event with a named 1944 * non-software PMU. for_each_group_(member|evsel) isn't used as 1945 * the list isn't yet sorted putting evsel's in the same group 1946 * together. 1947 */ 1948 if (leader_pmu && !perf_pmu__is_software(leader_pmu)) { 1949 group_pmu_name = leader_pmu->name; 1950 } else if (leader->core.nr_members > 1) { 1951 list_for_each_entry(pos, head, core.node) { 1952 struct perf_pmu *pos_pmu; 1953 1954 if (pos == leader || evsel__leader(pos) != leader) 1955 continue; 1956 pos_pmu = evsel__find_pmu(pos); 1957 if (!pos_pmu) { 1958 /* As with determining pmu above. */ 1959 pos_pmu = perf_pmus__scan_core(NULL); 1960 } 1961 if (pos_pmu && !perf_pmu__is_software(pos_pmu)) { 1962 group_pmu_name = pos_pmu->name; 1963 break; 1964 } 1965 } 1966 } 1967 } 1968 /* Record computed name. */ 1969 evsel->group_pmu_name = strdup(group_pmu_name); 1970 return evsel->group_pmu_name ? 0 : -ENOMEM; 1971 } 1972 1973 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs) 1974 { 1975 /* Order by insertion index. */ 1976 return lhs->core.idx - rhs->core.idx; 1977 } 1978 1979 static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r) 1980 { 1981 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node); 1982 const struct evsel *lhs = container_of(lhs_core, struct evsel, core); 1983 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node); 1984 const struct evsel *rhs = container_of(rhs_core, struct evsel, core); 1985 int *force_grouped_idx = _fg_idx; 1986 int lhs_sort_idx, rhs_sort_idx, ret; 1987 const char *lhs_pmu_name, *rhs_pmu_name; 1988 1989 /* 1990 * Get the indexes of the 2 events to sort. If the events are 1991 * in groups then the leader's index is used otherwise the 1992 * event's index is used. An index may be forced for events that 1993 * must be in the same group, namely Intel topdown events. 1994 */ 1995 if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) { 1996 lhs_sort_idx = *force_grouped_idx; 1997 } else { 1998 bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1; 1999 2000 lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx; 2001 } 2002 if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) { 2003 rhs_sort_idx = *force_grouped_idx; 2004 } else { 2005 bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1; 2006 2007 rhs_sort_idx = rhs_has_group ? rhs_core->leader->idx : rhs_core->idx; 2008 } 2009 2010 /* If the indices differ then respect the insertion order. */ 2011 if (lhs_sort_idx != rhs_sort_idx) 2012 return lhs_sort_idx - rhs_sort_idx; 2013 2014 /* 2015 * Ignoring forcing, lhs_sort_idx == rhs_sort_idx so lhs and rhs should 2016 * be in the same group. Events in the same group need to be ordered by 2017 * their grouping PMU name as the group will be broken to ensure only 2018 * events on the same PMU are programmed together. 2019 * 2020 * With forcing the lhs_sort_idx == rhs_sort_idx shows that one or both 2021 * events are being forced to be at force_group_index. If only one event 2022 * is being forced then the other event is the group leader of the group 2023 * we're trying to force the event into. Ensure for the force grouped 2024 * case that the PMU name ordering is also respected. 2025 */ 2026 lhs_pmu_name = lhs->group_pmu_name; 2027 rhs_pmu_name = rhs->group_pmu_name; 2028 ret = strcmp(lhs_pmu_name, rhs_pmu_name); 2029 if (ret) 2030 return ret; 2031 2032 /* 2033 * Architecture specific sorting, by default sort events in the same 2034 * group with the same PMU by their insertion index. On Intel topdown 2035 * constraints must be adhered to - slots first, etc. 2036 */ 2037 return arch_evlist__cmp(lhs, rhs); 2038 } 2039 2040 static int parse_events__sort_events_and_fix_groups(struct list_head *list) 2041 { 2042 int idx = 0, force_grouped_idx = -1; 2043 struct evsel *pos, *cur_leader = NULL; 2044 struct perf_evsel *cur_leaders_grp = NULL; 2045 bool idx_changed = false; 2046 int orig_num_leaders = 0, num_leaders = 0; 2047 int ret; 2048 struct evsel *force_grouped_leader = NULL; 2049 bool last_event_was_forced_leader = false; 2050 2051 /* 2052 * Compute index to insert ungrouped events at. Place them where the 2053 * first ungrouped event appears. 2054 */ 2055 list_for_each_entry(pos, list, core.node) { 2056 const struct evsel *pos_leader = evsel__leader(pos); 2057 2058 ret = evsel__compute_group_pmu_name(pos, list); 2059 if (ret) 2060 return ret; 2061 2062 if (pos == pos_leader) 2063 orig_num_leaders++; 2064 2065 /* 2066 * Ensure indexes are sequential, in particular for multiple 2067 * event lists being merged. The indexes are used to detect when 2068 * the user order is modified. 2069 */ 2070 pos->core.idx = idx++; 2071 2072 /* 2073 * Remember an index to sort all forced grouped events 2074 * together to. Use the group leader as some events 2075 * must appear first within the group. 2076 */ 2077 if (force_grouped_idx == -1 && arch_evsel__must_be_in_group(pos)) 2078 force_grouped_idx = pos_leader->core.idx; 2079 } 2080 2081 /* Sort events. */ 2082 list_sort(&force_grouped_idx, list, evlist__cmp); 2083 2084 /* 2085 * Recompute groups, splitting for PMUs and adding groups for events 2086 * that require them. 2087 */ 2088 idx = 0; 2089 list_for_each_entry(pos, list, core.node) { 2090 const struct evsel *pos_leader = evsel__leader(pos); 2091 const char *pos_pmu_name = pos->group_pmu_name; 2092 const char *cur_leader_pmu_name; 2093 bool pos_force_grouped = force_grouped_idx != -1 && 2094 arch_evsel__must_be_in_group(pos); 2095 2096 /* Reset index and nr_members. */ 2097 if (pos->core.idx != idx) 2098 idx_changed = true; 2099 pos->core.idx = idx++; 2100 pos->core.nr_members = 0; 2101 2102 /* 2103 * Set the group leader respecting the given groupings and that 2104 * groups can't span PMUs. 2105 */ 2106 if (!cur_leader) { 2107 cur_leader = pos; 2108 cur_leaders_grp = &pos->core; 2109 if (pos_force_grouped) 2110 force_grouped_leader = pos; 2111 } 2112 2113 cur_leader_pmu_name = cur_leader->group_pmu_name; 2114 if (strcmp(cur_leader_pmu_name, pos_pmu_name)) { 2115 /* PMU changed so the group/leader must change. */ 2116 cur_leader = pos; 2117 cur_leaders_grp = pos->core.leader; 2118 if (pos_force_grouped && force_grouped_leader == NULL) 2119 force_grouped_leader = pos; 2120 } else if (cur_leaders_grp != pos->core.leader) { 2121 bool split_even_if_last_leader_was_forced = true; 2122 2123 /* 2124 * Event is for a different group. If the last event was 2125 * the forced group leader then subsequent group events 2126 * and forced events should be in the same group. If 2127 * there are no other forced group events then the 2128 * forced group leader wasn't really being forced into a 2129 * group, it just set arch_evsel__must_be_in_group, and 2130 * we don't want the group to split here. 2131 */ 2132 if (force_grouped_idx != -1 && last_event_was_forced_leader) { 2133 struct evsel *pos2 = pos; 2134 /* 2135 * Search the whole list as the group leaders 2136 * aren't currently valid. 2137 */ 2138 list_for_each_entry_continue(pos2, list, core.node) { 2139 if (pos->core.leader == pos2->core.leader && 2140 arch_evsel__must_be_in_group(pos2)) { 2141 split_even_if_last_leader_was_forced = false; 2142 break; 2143 } 2144 } 2145 } 2146 if (!last_event_was_forced_leader || split_even_if_last_leader_was_forced) { 2147 if (pos_force_grouped) { 2148 if (force_grouped_leader) { 2149 cur_leader = force_grouped_leader; 2150 cur_leaders_grp = force_grouped_leader->core.leader; 2151 } else { 2152 cur_leader = force_grouped_leader = pos; 2153 cur_leaders_grp = &pos->core; 2154 } 2155 } else { 2156 cur_leader = pos; 2157 cur_leaders_grp = pos->core.leader; 2158 } 2159 } 2160 } 2161 if (pos_leader != cur_leader) { 2162 /* The leader changed so update it. */ 2163 evsel__set_leader(pos, cur_leader); 2164 } 2165 last_event_was_forced_leader = (force_grouped_leader == pos); 2166 } 2167 list_for_each_entry(pos, list, core.node) { 2168 struct evsel *pos_leader = evsel__leader(pos); 2169 2170 if (pos == pos_leader) 2171 num_leaders++; 2172 pos_leader->core.nr_members++; 2173 } 2174 return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0; 2175 } 2176 2177 int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter, 2178 struct parse_events_error *err, bool fake_pmu, 2179 bool warn_if_reordered, bool fake_tp) 2180 { 2181 struct parse_events_state parse_state = { 2182 .list = LIST_HEAD_INIT(parse_state.list), 2183 .idx = evlist->core.nr_entries, 2184 .error = err, 2185 .stoken = PE_START_EVENTS, 2186 .fake_pmu = fake_pmu, 2187 .fake_tp = fake_tp, 2188 .pmu_filter = pmu_filter, 2189 .match_legacy_cache_terms = true, 2190 }; 2191 int ret, ret2; 2192 2193 ret = parse_events__scanner(str, /*input=*/ NULL, &parse_state); 2194 2195 if (!ret && list_empty(&parse_state.list)) { 2196 WARN_ONCE(true, "WARNING: event parser found nothing\n"); 2197 return -1; 2198 } 2199 2200 ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list); 2201 if (ret2 < 0) 2202 return ret; 2203 2204 /* 2205 * Add list to the evlist even with errors to allow callers to clean up. 2206 */ 2207 evlist__splice_list_tail(evlist, &parse_state.list); 2208 2209 if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) { 2210 pr_warning("WARNING: events were regrouped to match PMUs\n"); 2211 2212 if (verbose > 0) { 2213 struct strbuf sb = STRBUF_INIT; 2214 2215 evlist__uniquify_name(evlist); 2216 evlist__format_evsels(evlist, &sb, 2048); 2217 pr_debug("evlist after sorting/fixing: '%s'\n", sb.buf); 2218 strbuf_release(&sb); 2219 } 2220 } 2221 if (!ret) { 2222 struct evsel *last; 2223 2224 last = evlist__last(evlist); 2225 last->cmdline_group_boundary = true; 2226 2227 return 0; 2228 } 2229 2230 /* 2231 * There are 2 users - builtin-record and builtin-test objects. 2232 * Both call evlist__delete in case of error, so we dont 2233 * need to bother. 2234 */ 2235 return ret; 2236 } 2237 2238 int parse_event(struct evlist *evlist, const char *str) 2239 { 2240 struct parse_events_error err; 2241 int ret; 2242 2243 parse_events_error__init(&err); 2244 ret = parse_events(evlist, str, &err); 2245 parse_events_error__exit(&err); 2246 return ret; 2247 } 2248 2249 struct parse_events_error_entry { 2250 /** @list: The list the error is part of. */ 2251 struct list_head list; 2252 /** @idx: index in the parsed string */ 2253 int idx; 2254 /** @str: string to display at the index */ 2255 char *str; 2256 /** @help: optional help string */ 2257 char *help; 2258 }; 2259 2260 void parse_events_error__init(struct parse_events_error *err) 2261 { 2262 INIT_LIST_HEAD(&err->list); 2263 } 2264 2265 void parse_events_error__exit(struct parse_events_error *err) 2266 { 2267 struct parse_events_error_entry *pos, *tmp; 2268 2269 list_for_each_entry_safe(pos, tmp, &err->list, list) { 2270 zfree(&pos->str); 2271 zfree(&pos->help); 2272 list_del_init(&pos->list); 2273 free(pos); 2274 } 2275 } 2276 2277 void parse_events_error__handle(struct parse_events_error *err, int idx, 2278 char *str, char *help) 2279 { 2280 struct parse_events_error_entry *entry; 2281 2282 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) 2283 goto out_free; 2284 2285 entry = zalloc(sizeof(*entry)); 2286 if (!entry) { 2287 pr_err("Failed to allocate memory for event parsing error: %s (%s)\n", 2288 str, help ?: "<no help>"); 2289 goto out_free; 2290 } 2291 entry->idx = idx; 2292 entry->str = str; 2293 entry->help = help; 2294 list_add(&entry->list, &err->list); 2295 return; 2296 out_free: 2297 free(str); 2298 free(help); 2299 } 2300 2301 #define MAX_WIDTH 1000 2302 static int get_term_width(void) 2303 { 2304 struct winsize ws; 2305 2306 get_term_dimensions(&ws); 2307 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; 2308 } 2309 2310 static void __parse_events_error__print(int err_idx, const char *err_str, 2311 const char *err_help, const char *event) 2312 { 2313 const char *str = "invalid or unsupported event: "; 2314 char _buf[MAX_WIDTH]; 2315 char *buf = (char *) event; 2316 int idx = 0; 2317 if (err_str) { 2318 /* -2 for extra '' in the final fprintf */ 2319 int width = get_term_width() - 2; 2320 int len_event = strlen(event); 2321 int len_str, max_len, cut = 0; 2322 2323 /* 2324 * Maximum error index indent, we will cut 2325 * the event string if it's bigger. 2326 */ 2327 int max_err_idx = 13; 2328 2329 /* 2330 * Let's be specific with the message when 2331 * we have the precise error. 2332 */ 2333 str = "event syntax error: "; 2334 len_str = strlen(str); 2335 max_len = width - len_str; 2336 2337 buf = _buf; 2338 2339 /* We're cutting from the beginning. */ 2340 if (err_idx > max_err_idx) 2341 cut = err_idx - max_err_idx; 2342 2343 strncpy(buf, event + cut, max_len); 2344 2345 /* Mark cut parts with '..' on both sides. */ 2346 if (cut) 2347 buf[0] = buf[1] = '.'; 2348 2349 if ((len_event - cut) > max_len) { 2350 buf[max_len - 1] = buf[max_len - 2] = '.'; 2351 buf[max_len] = 0; 2352 } 2353 2354 idx = len_str + err_idx - cut; 2355 } 2356 2357 fprintf(stderr, "%s'%s'\n", str, buf); 2358 if (idx) { 2359 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str); 2360 if (err_help) 2361 fprintf(stderr, "\n%s\n", err_help); 2362 } 2363 } 2364 2365 void parse_events_error__print(const struct parse_events_error *err, 2366 const char *event) 2367 { 2368 struct parse_events_error_entry *pos; 2369 bool first = true; 2370 2371 list_for_each_entry(pos, &err->list, list) { 2372 if (!first) 2373 fputs("\n", stderr); 2374 __parse_events_error__print(pos->idx, pos->str, pos->help, event); 2375 first = false; 2376 } 2377 } 2378 2379 /* 2380 * In the list of errors err, do any of the error strings (str) contain the 2381 * given needle string? 2382 */ 2383 bool parse_events_error__contains(const struct parse_events_error *err, 2384 const char *needle) 2385 { 2386 struct parse_events_error_entry *pos; 2387 2388 list_for_each_entry(pos, &err->list, list) { 2389 if (strstr(pos->str, needle) != NULL) 2390 return true; 2391 } 2392 return false; 2393 } 2394 2395 #undef MAX_WIDTH 2396 2397 int parse_events_option(const struct option *opt, const char *str, 2398 int unset __maybe_unused) 2399 { 2400 struct parse_events_option_args *args = opt->value; 2401 struct parse_events_error err; 2402 int ret; 2403 2404 parse_events_error__init(&err); 2405 ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err, 2406 /*fake_pmu=*/false, /*warn_if_reordered=*/true, 2407 /*fake_tp=*/false); 2408 2409 if (ret) { 2410 parse_events_error__print(&err, str); 2411 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 2412 } 2413 parse_events_error__exit(&err); 2414 2415 return ret; 2416 } 2417 2418 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset) 2419 { 2420 struct parse_events_option_args *args = opt->value; 2421 int ret; 2422 2423 if (*args->evlistp == NULL) { 2424 *args->evlistp = evlist__new(); 2425 2426 if (*args->evlistp == NULL) { 2427 fprintf(stderr, "Not enough memory to create evlist\n"); 2428 return -1; 2429 } 2430 } 2431 ret = parse_events_option(opt, str, unset); 2432 if (ret) { 2433 evlist__delete(*args->evlistp); 2434 *args->evlistp = NULL; 2435 } 2436 2437 return ret; 2438 } 2439 2440 static int 2441 foreach_evsel_in_last_glob(struct evlist *evlist, 2442 int (*func)(struct evsel *evsel, 2443 const void *arg), 2444 const void *arg) 2445 { 2446 struct evsel *last = NULL; 2447 int err; 2448 2449 /* 2450 * Don't return when list_empty, give func a chance to report 2451 * error when it found last == NULL. 2452 * 2453 * So no need to WARN here, let *func do this. 2454 */ 2455 if (evlist->core.nr_entries > 0) 2456 last = evlist__last(evlist); 2457 2458 do { 2459 err = (*func)(last, arg); 2460 if (err) 2461 return -1; 2462 if (!last) 2463 return 0; 2464 2465 if (last->core.node.prev == &evlist->core.entries) 2466 return 0; 2467 last = list_entry(last->core.node.prev, struct evsel, core.node); 2468 } while (!last->cmdline_group_boundary); 2469 2470 return 0; 2471 } 2472 2473 static int set_filter(struct evsel *evsel, const void *arg) 2474 { 2475 const char *str = arg; 2476 bool found = false; 2477 int nr_addr_filters = 0; 2478 struct perf_pmu *pmu = NULL; 2479 2480 if (evsel == NULL) { 2481 fprintf(stderr, 2482 "--filter option should follow a -e tracepoint or HW tracer option\n"); 2483 return -1; 2484 } 2485 2486 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 2487 if (evsel__append_tp_filter(evsel, str) < 0) { 2488 fprintf(stderr, 2489 "not enough memory to hold filter string\n"); 2490 return -1; 2491 } 2492 2493 return 0; 2494 } 2495 2496 while ((pmu = perf_pmus__scan(pmu)) != NULL) 2497 if (pmu->type == evsel->core.attr.type) { 2498 found = true; 2499 break; 2500 } 2501 2502 if (found) 2503 perf_pmu__scan_file(pmu, "nr_addr_filters", 2504 "%d", &nr_addr_filters); 2505 2506 if (!nr_addr_filters) 2507 return perf_bpf_filter__parse(&evsel->bpf_filters, str); 2508 2509 if (evsel__append_addr_filter(evsel, str) < 0) { 2510 fprintf(stderr, 2511 "not enough memory to hold filter string\n"); 2512 return -1; 2513 } 2514 2515 return 0; 2516 } 2517 2518 int parse_filter(const struct option *opt, const char *str, 2519 int unset __maybe_unused) 2520 { 2521 struct evlist *evlist = *(struct evlist **)opt->value; 2522 2523 return foreach_evsel_in_last_glob(evlist, set_filter, 2524 (const void *)str); 2525 } 2526 2527 static int add_exclude_perf_filter(struct evsel *evsel, 2528 const void *arg __maybe_unused) 2529 { 2530 char new_filter[64]; 2531 2532 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2533 fprintf(stderr, 2534 "--exclude-perf option should follow a -e tracepoint option\n"); 2535 return -1; 2536 } 2537 2538 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid()); 2539 2540 if (evsel__append_tp_filter(evsel, new_filter) < 0) { 2541 fprintf(stderr, 2542 "not enough memory to hold filter string\n"); 2543 return -1; 2544 } 2545 2546 return 0; 2547 } 2548 2549 int exclude_perf(const struct option *opt, 2550 const char *arg __maybe_unused, 2551 int unset __maybe_unused) 2552 { 2553 struct evlist *evlist = *(struct evlist **)opt->value; 2554 2555 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter, 2556 NULL); 2557 } 2558 2559 int parse_events__is_hardcoded_term(struct parse_events_term *term) 2560 { 2561 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 2562 } 2563 2564 static int new_term(struct parse_events_term **_term, 2565 struct parse_events_term *temp, 2566 char *str, u64 num) 2567 { 2568 struct parse_events_term *term; 2569 2570 term = malloc(sizeof(*term)); 2571 if (!term) 2572 return -ENOMEM; 2573 2574 *term = *temp; 2575 INIT_LIST_HEAD(&term->list); 2576 term->weak = false; 2577 2578 switch (term->type_val) { 2579 case PARSE_EVENTS__TERM_TYPE_NUM: 2580 term->val.num = num; 2581 break; 2582 case PARSE_EVENTS__TERM_TYPE_STR: 2583 term->val.str = str; 2584 break; 2585 default: 2586 free(term); 2587 return -EINVAL; 2588 } 2589 2590 *_term = term; 2591 return 0; 2592 } 2593 2594 int parse_events_term__num(struct parse_events_term **term, 2595 enum parse_events__term_type type_term, 2596 const char *config, u64 num, 2597 bool no_value, 2598 void *loc_term_, void *loc_val_) 2599 { 2600 YYLTYPE *loc_term = loc_term_; 2601 YYLTYPE *loc_val = loc_val_; 2602 2603 struct parse_events_term temp = { 2604 .type_val = PARSE_EVENTS__TERM_TYPE_NUM, 2605 .type_term = type_term, 2606 .config = config ? : strdup(parse_events__term_type_str(type_term)), 2607 .no_value = no_value, 2608 .err_term = loc_term ? loc_term->first_column : 0, 2609 .err_val = loc_val ? loc_val->first_column : 0, 2610 }; 2611 2612 return new_term(term, &temp, /*str=*/NULL, num); 2613 } 2614 2615 int parse_events_term__str(struct parse_events_term **term, 2616 enum parse_events__term_type type_term, 2617 char *config, char *str, 2618 void *loc_term_, void *loc_val_) 2619 { 2620 YYLTYPE *loc_term = loc_term_; 2621 YYLTYPE *loc_val = loc_val_; 2622 2623 struct parse_events_term temp = { 2624 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2625 .type_term = type_term, 2626 .config = config, 2627 .err_term = loc_term ? loc_term->first_column : 0, 2628 .err_val = loc_val ? loc_val->first_column : 0, 2629 }; 2630 2631 return new_term(term, &temp, str, /*num=*/0); 2632 } 2633 2634 int parse_events_term__term(struct parse_events_term **term, 2635 enum parse_events__term_type term_lhs, 2636 enum parse_events__term_type term_rhs, 2637 void *loc_term, void *loc_val) 2638 { 2639 return parse_events_term__str(term, term_lhs, NULL, 2640 strdup(parse_events__term_type_str(term_rhs)), 2641 loc_term, loc_val); 2642 } 2643 2644 int parse_events_term__clone(struct parse_events_term **new, 2645 const struct parse_events_term *term) 2646 { 2647 char *str; 2648 struct parse_events_term temp = *term; 2649 2650 temp.used = false; 2651 if (term->config) { 2652 temp.config = strdup(term->config); 2653 if (!temp.config) 2654 return -ENOMEM; 2655 } 2656 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2657 return new_term(new, &temp, /*str=*/NULL, term->val.num); 2658 2659 str = strdup(term->val.str); 2660 if (!str) { 2661 zfree(&temp.config); 2662 return -ENOMEM; 2663 } 2664 return new_term(new, &temp, str, /*num=*/0); 2665 } 2666 2667 void parse_events_term__delete(struct parse_events_term *term) 2668 { 2669 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) 2670 zfree(&term->val.str); 2671 2672 zfree(&term->config); 2673 free(term); 2674 } 2675 2676 static int parse_events_terms__copy(const struct parse_events_terms *src, 2677 struct parse_events_terms *dest) 2678 { 2679 struct parse_events_term *term; 2680 2681 list_for_each_entry (term, &src->terms, list) { 2682 struct parse_events_term *n; 2683 int ret; 2684 2685 ret = parse_events_term__clone(&n, term); 2686 if (ret) 2687 return ret; 2688 2689 list_add_tail(&n->list, &dest->terms); 2690 } 2691 return 0; 2692 } 2693 2694 void parse_events_terms__init(struct parse_events_terms *terms) 2695 { 2696 INIT_LIST_HEAD(&terms->terms); 2697 } 2698 2699 void parse_events_terms__exit(struct parse_events_terms *terms) 2700 { 2701 struct parse_events_term *term, *h; 2702 2703 list_for_each_entry_safe(term, h, &terms->terms, list) { 2704 list_del_init(&term->list); 2705 parse_events_term__delete(term); 2706 } 2707 } 2708 2709 void parse_events_terms__delete(struct parse_events_terms *terms) 2710 { 2711 if (!terms) 2712 return; 2713 parse_events_terms__exit(terms); 2714 free(terms); 2715 } 2716 2717 int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb) 2718 { 2719 struct parse_events_term *term; 2720 bool first = true; 2721 2722 if (!terms) 2723 return 0; 2724 2725 list_for_each_entry(term, &terms->terms, list) { 2726 int ret; 2727 2728 if (!first) { 2729 ret = strbuf_addch(sb, ','); 2730 if (ret < 0) 2731 return ret; 2732 } 2733 first = false; 2734 2735 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2736 if (term->no_value) { 2737 assert(term->val.num == 1); 2738 ret = strbuf_addf(sb, "%s", term->config); 2739 } else 2740 ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num); 2741 else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) { 2742 if (term->config) { 2743 ret = strbuf_addf(sb, "%s=", term->config); 2744 if (ret < 0) 2745 return ret; 2746 } else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) { 2747 ret = strbuf_addf(sb, "%s=", 2748 parse_events__term_type_str(term->type_term)); 2749 if (ret < 0) 2750 return ret; 2751 } 2752 assert(!term->no_value); 2753 ret = strbuf_addf(sb, "%s", term->val.str); 2754 } 2755 if (ret < 0) 2756 return ret; 2757 } 2758 return 0; 2759 } 2760 2761 static void config_terms_list(char *buf, size_t buf_sz) 2762 { 2763 int i; 2764 bool first = true; 2765 2766 buf[0] = '\0'; 2767 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) { 2768 const char *name = parse_events__term_type_str(i); 2769 2770 if (!config_term_avail(i, NULL)) 2771 continue; 2772 if (!name) 2773 continue; 2774 if (name[0] == '<') 2775 continue; 2776 2777 if (strlen(buf) + strlen(name) + 2 >= buf_sz) 2778 return; 2779 2780 if (!first) 2781 strcat(buf, ","); 2782 else 2783 first = false; 2784 strcat(buf, name); 2785 } 2786 } 2787 2788 /* 2789 * Return string contains valid config terms of an event. 2790 * @additional_terms: For terms such as PMU sysfs terms. 2791 */ 2792 char *parse_events_formats_error_string(char *additional_terms) 2793 { 2794 char *str; 2795 /* "no-overwrite" is the longest name */ 2796 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR * 2797 (sizeof("no-overwrite") - 1)]; 2798 2799 config_terms_list(static_terms, sizeof(static_terms)); 2800 /* valid terms */ 2801 if (additional_terms) { 2802 if (asprintf(&str, "valid terms: %s,%s", 2803 additional_terms, static_terms) < 0) 2804 goto fail; 2805 } else { 2806 if (asprintf(&str, "valid terms: %s", static_terms) < 0) 2807 goto fail; 2808 } 2809 return str; 2810 2811 fail: 2812 return NULL; 2813 } 2814