1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/hw_breakpoint.h> 3 #include <linux/err.h> 4 #include <linux/list_sort.h> 5 #include <linux/zalloc.h> 6 #include <dirent.h> 7 #include <errno.h> 8 #include <sys/ioctl.h> 9 #include <sys/param.h> 10 #include "term.h" 11 #include "env.h" 12 #include "evlist.h" 13 #include "evsel.h" 14 #include <subcmd/parse-options.h> 15 #include "parse-events.h" 16 #include "string2.h" 17 #include "strbuf.h" 18 #include "debug.h" 19 #include <api/fs/tracing_path.h> 20 #include <perf/cpumap.h> 21 #include <util/parse-events-bison.h> 22 #include <util/parse-events-flex.h> 23 #include "pmu.h" 24 #include "pmus.h" 25 #include "asm/bug.h" 26 #include "util/parse-branch-options.h" 27 #include "util/evsel_config.h" 28 #include "util/event.h" 29 #include "util/bpf-filter.h" 30 #include "util/util.h" 31 #include "tracepoint.h" 32 33 #define MAX_NAME_LEN 100 34 35 static int get_config_terms(const struct parse_events_terms *head_config, 36 struct list_head *head_terms); 37 static int parse_events_terms__copy(const struct parse_events_terms *src, 38 struct parse_events_terms *dest); 39 40 const struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { 41 [PERF_COUNT_HW_CPU_CYCLES] = { 42 .symbol = "cpu-cycles", 43 .alias = "cycles", 44 }, 45 [PERF_COUNT_HW_INSTRUCTIONS] = { 46 .symbol = "instructions", 47 .alias = "", 48 }, 49 [PERF_COUNT_HW_CACHE_REFERENCES] = { 50 .symbol = "cache-references", 51 .alias = "", 52 }, 53 [PERF_COUNT_HW_CACHE_MISSES] = { 54 .symbol = "cache-misses", 55 .alias = "", 56 }, 57 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 58 .symbol = "branch-instructions", 59 .alias = "branches", 60 }, 61 [PERF_COUNT_HW_BRANCH_MISSES] = { 62 .symbol = "branch-misses", 63 .alias = "", 64 }, 65 [PERF_COUNT_HW_BUS_CYCLES] = { 66 .symbol = "bus-cycles", 67 .alias = "", 68 }, 69 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = { 70 .symbol = "stalled-cycles-frontend", 71 .alias = "idle-cycles-frontend", 72 }, 73 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = { 74 .symbol = "stalled-cycles-backend", 75 .alias = "idle-cycles-backend", 76 }, 77 [PERF_COUNT_HW_REF_CPU_CYCLES] = { 78 .symbol = "ref-cycles", 79 .alias = "", 80 }, 81 }; 82 83 const struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { 84 [PERF_COUNT_SW_CPU_CLOCK] = { 85 .symbol = "cpu-clock", 86 .alias = "", 87 }, 88 [PERF_COUNT_SW_TASK_CLOCK] = { 89 .symbol = "task-clock", 90 .alias = "", 91 }, 92 [PERF_COUNT_SW_PAGE_FAULTS] = { 93 .symbol = "page-faults", 94 .alias = "faults", 95 }, 96 [PERF_COUNT_SW_CONTEXT_SWITCHES] = { 97 .symbol = "context-switches", 98 .alias = "cs", 99 }, 100 [PERF_COUNT_SW_CPU_MIGRATIONS] = { 101 .symbol = "cpu-migrations", 102 .alias = "migrations", 103 }, 104 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = { 105 .symbol = "minor-faults", 106 .alias = "", 107 }, 108 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = { 109 .symbol = "major-faults", 110 .alias = "", 111 }, 112 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = { 113 .symbol = "alignment-faults", 114 .alias = "", 115 }, 116 [PERF_COUNT_SW_EMULATION_FAULTS] = { 117 .symbol = "emulation-faults", 118 .alias = "", 119 }, 120 [PERF_COUNT_SW_DUMMY] = { 121 .symbol = "dummy", 122 .alias = "", 123 }, 124 [PERF_COUNT_SW_BPF_OUTPUT] = { 125 .symbol = "bpf-output", 126 .alias = "", 127 }, 128 [PERF_COUNT_SW_CGROUP_SWITCHES] = { 129 .symbol = "cgroup-switches", 130 .alias = "", 131 }, 132 }; 133 134 const char *event_type(int type) 135 { 136 switch (type) { 137 case PERF_TYPE_HARDWARE: 138 return "hardware"; 139 140 case PERF_TYPE_SOFTWARE: 141 return "software"; 142 143 case PERF_TYPE_TRACEPOINT: 144 return "tracepoint"; 145 146 case PERF_TYPE_HW_CACHE: 147 return "hardware-cache"; 148 149 default: 150 break; 151 } 152 153 return "unknown"; 154 } 155 156 static char *get_config_str(const struct parse_events_terms *head_terms, 157 enum parse_events__term_type type_term) 158 { 159 struct parse_events_term *term; 160 161 if (!head_terms) 162 return NULL; 163 164 list_for_each_entry(term, &head_terms->terms, list) 165 if (term->type_term == type_term) 166 return term->val.str; 167 168 return NULL; 169 } 170 171 static char *get_config_metric_id(const struct parse_events_terms *head_terms) 172 { 173 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID); 174 } 175 176 static char *get_config_name(const struct parse_events_terms *head_terms) 177 { 178 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME); 179 } 180 181 /** 182 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that 183 * matches the raw's string value. If the string value matches an 184 * event then change the term to be an event, if not then change it to 185 * be a config term. For example, "read" may be an event of the PMU or 186 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of 187 * the event can be determined and we don't need to scan all PMUs 188 * ahead-of-time. 189 * @config_terms: the list of terms that may contain a raw term. 190 * @pmu: the PMU to scan for events from. 191 */ 192 static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu) 193 { 194 struct parse_events_term *term; 195 196 list_for_each_entry(term, &config_terms->terms, list) { 197 u64 num; 198 199 if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW) 200 continue; 201 202 if (perf_pmu__have_event(pmu, term->val.str)) { 203 zfree(&term->config); 204 term->config = term->val.str; 205 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 206 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 207 term->val.num = 1; 208 term->no_value = true; 209 continue; 210 } 211 212 zfree(&term->config); 213 term->config = strdup("config"); 214 errno = 0; 215 num = strtoull(term->val.str + 1, NULL, 16); 216 assert(errno == 0); 217 free(term->val.str); 218 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 219 term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG; 220 term->val.num = num; 221 term->no_value = false; 222 } 223 } 224 225 static struct evsel * 226 __add_event(struct list_head *list, int *idx, 227 struct perf_event_attr *attr, 228 bool init_attr, 229 const char *name, const char *metric_id, struct perf_pmu *pmu, 230 struct list_head *config_terms, bool auto_merge_stats, 231 struct perf_cpu_map *cpu_list, u64 alternate_hw_config) 232 { 233 struct evsel *evsel; 234 struct perf_cpu_map *cpus = perf_cpu_map__is_empty(cpu_list) && pmu ? pmu->cpus : cpu_list; 235 236 cpus = perf_cpu_map__get(cpus); 237 if (pmu) 238 perf_pmu__warn_invalid_formats(pmu); 239 240 if (pmu && (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX)) { 241 perf_pmu__warn_invalid_config(pmu, attr->config, name, 242 PERF_PMU_FORMAT_VALUE_CONFIG, "config"); 243 perf_pmu__warn_invalid_config(pmu, attr->config1, name, 244 PERF_PMU_FORMAT_VALUE_CONFIG1, "config1"); 245 perf_pmu__warn_invalid_config(pmu, attr->config2, name, 246 PERF_PMU_FORMAT_VALUE_CONFIG2, "config2"); 247 perf_pmu__warn_invalid_config(pmu, attr->config3, name, 248 PERF_PMU_FORMAT_VALUE_CONFIG3, "config3"); 249 } 250 if (init_attr) 251 event_attr_init(attr); 252 253 evsel = evsel__new_idx(attr, *idx); 254 if (!evsel) { 255 perf_cpu_map__put(cpus); 256 return NULL; 257 } 258 259 (*idx)++; 260 evsel->core.cpus = cpus; 261 evsel->core.own_cpus = perf_cpu_map__get(cpus); 262 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false; 263 evsel->core.is_pmu_core = pmu ? pmu->is_core : false; 264 evsel->auto_merge_stats = auto_merge_stats; 265 evsel->pmu = pmu; 266 evsel->alternate_hw_config = alternate_hw_config; 267 268 if (name) 269 evsel->name = strdup(name); 270 271 if (metric_id) 272 evsel->metric_id = strdup(metric_id); 273 274 if (config_terms) 275 list_splice_init(config_terms, &evsel->config_terms); 276 277 if (list) 278 list_add_tail(&evsel->core.node, list); 279 280 return evsel; 281 } 282 283 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr, 284 const char *name, const char *metric_id, 285 struct perf_pmu *pmu) 286 { 287 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name, 288 metric_id, pmu, /*config_terms=*/NULL, 289 /*auto_merge_stats=*/false, /*cpu_list=*/NULL, 290 /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 291 } 292 293 static int add_event(struct list_head *list, int *idx, 294 struct perf_event_attr *attr, const char *name, 295 const char *metric_id, struct list_head *config_terms, 296 u64 alternate_hw_config) 297 { 298 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id, 299 /*pmu=*/NULL, config_terms, 300 /*auto_merge_stats=*/false, /*cpu_list=*/NULL, 301 alternate_hw_config) ? 0 : -ENOMEM; 302 } 303 304 /** 305 * parse_aliases - search names for entries beginning or equalling str ignoring 306 * case. If mutliple entries in names match str then the longest 307 * is chosen. 308 * @str: The needle to look for. 309 * @names: The haystack to search. 310 * @size: The size of the haystack. 311 * @longest: Out argument giving the length of the matching entry. 312 */ 313 static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size, 314 int *longest) 315 { 316 *longest = -1; 317 for (int i = 0; i < size; i++) { 318 for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) { 319 int n = strlen(names[i][j]); 320 321 if (n > *longest && !strncasecmp(str, names[i][j], n)) 322 *longest = n; 323 } 324 if (*longest > 0) 325 return i; 326 } 327 328 return -1; 329 } 330 331 typedef int config_term_func_t(struct perf_event_attr *attr, 332 struct parse_events_term *term, 333 struct parse_events_error *err); 334 static int config_term_common(struct perf_event_attr *attr, 335 struct parse_events_term *term, 336 struct parse_events_error *err); 337 static int config_attr(struct perf_event_attr *attr, 338 const struct parse_events_terms *head, 339 struct parse_events_error *err, 340 config_term_func_t config_term); 341 342 /** 343 * parse_events__decode_legacy_cache - Search name for the legacy cache event 344 * name composed of 1, 2 or 3 hyphen 345 * separated sections. The first section is 346 * the cache type while the others are the 347 * optional op and optional result. To make 348 * life hard the names in the table also 349 * contain hyphens and the longest name 350 * should always be selected. 351 */ 352 int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config) 353 { 354 int len, cache_type = -1, cache_op = -1, cache_result = -1; 355 const char *name_end = &name[strlen(name) + 1]; 356 const char *str = name; 357 358 cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len); 359 if (cache_type == -1) 360 return -EINVAL; 361 str += len + 1; 362 363 if (str < name_end) { 364 cache_op = parse_aliases(str, evsel__hw_cache_op, 365 PERF_COUNT_HW_CACHE_OP_MAX, &len); 366 if (cache_op >= 0) { 367 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 368 return -EINVAL; 369 str += len + 1; 370 } else { 371 cache_result = parse_aliases(str, evsel__hw_cache_result, 372 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 373 if (cache_result >= 0) 374 str += len + 1; 375 } 376 } 377 if (str < name_end) { 378 if (cache_op < 0) { 379 cache_op = parse_aliases(str, evsel__hw_cache_op, 380 PERF_COUNT_HW_CACHE_OP_MAX, &len); 381 if (cache_op >= 0) { 382 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 383 return -EINVAL; 384 } 385 } else if (cache_result < 0) { 386 cache_result = parse_aliases(str, evsel__hw_cache_result, 387 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 388 } 389 } 390 391 /* 392 * Fall back to reads: 393 */ 394 if (cache_op == -1) 395 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 396 397 /* 398 * Fall back to accesses: 399 */ 400 if (cache_result == -1) 401 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 402 403 *config = cache_type | (cache_op << 8) | (cache_result << 16); 404 if (perf_pmus__supports_extended_type()) 405 *config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT; 406 return 0; 407 } 408 409 /** 410 * parse_events__filter_pmu - returns false if a wildcard PMU should be 411 * considered, true if it should be filtered. 412 */ 413 bool parse_events__filter_pmu(const struct parse_events_state *parse_state, 414 const struct perf_pmu *pmu) 415 { 416 if (parse_state->pmu_filter == NULL) 417 return false; 418 419 return strcmp(parse_state->pmu_filter, pmu->name) != 0; 420 } 421 422 static int parse_events_add_pmu(struct parse_events_state *parse_state, 423 struct list_head *list, struct perf_pmu *pmu, 424 const struct parse_events_terms *const_parsed_terms, 425 bool auto_merge_stats, u64 alternate_hw_config); 426 427 int parse_events_add_cache(struct list_head *list, int *idx, const char *name, 428 struct parse_events_state *parse_state, 429 struct parse_events_terms *parsed_terms) 430 { 431 struct perf_pmu *pmu = NULL; 432 bool found_supported = false; 433 const char *config_name = get_config_name(parsed_terms); 434 const char *metric_id = get_config_metric_id(parsed_terms); 435 436 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 437 LIST_HEAD(config_terms); 438 struct perf_event_attr attr; 439 int ret; 440 441 if (parse_events__filter_pmu(parse_state, pmu)) 442 continue; 443 444 if (perf_pmu__have_event(pmu, name)) { 445 /* 446 * The PMU has the event so add as not a legacy cache 447 * event. 448 */ 449 ret = parse_events_add_pmu(parse_state, list, pmu, 450 parsed_terms, 451 perf_pmu__auto_merge_stats(pmu), 452 /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 453 if (ret) 454 return ret; 455 continue; 456 } 457 458 if (!pmu->is_core) { 459 /* Legacy cache events are only supported by core PMUs. */ 460 continue; 461 } 462 463 memset(&attr, 0, sizeof(attr)); 464 attr.type = PERF_TYPE_HW_CACHE; 465 466 ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config); 467 if (ret) 468 return ret; 469 470 found_supported = true; 471 472 if (parsed_terms) { 473 if (config_attr(&attr, parsed_terms, parse_state->error, 474 config_term_common)) 475 return -EINVAL; 476 477 if (get_config_terms(parsed_terms, &config_terms)) 478 return -ENOMEM; 479 } 480 481 if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name, 482 metric_id, pmu, &config_terms, /*auto_merge_stats=*/false, 483 /*cpu_list=*/NULL, 484 /*alternate_hw_config=*/PERF_COUNT_HW_MAX) == NULL) 485 return -ENOMEM; 486 487 free_config_terms(&config_terms); 488 } 489 return found_supported ? 0 : -EINVAL; 490 } 491 492 static void tracepoint_error(struct parse_events_error *e, int err, 493 const char *sys, const char *name, int column) 494 { 495 const char *str; 496 char help[BUFSIZ]; 497 498 if (!e) 499 return; 500 501 /* 502 * We get error directly from syscall errno ( > 0), 503 * or from encoded pointer's error ( < 0). 504 */ 505 err = abs(err); 506 507 switch (err) { 508 case EACCES: 509 str = "can't access trace events"; 510 break; 511 case ENOENT: 512 str = "unknown tracepoint"; 513 break; 514 default: 515 str = "failed to add tracepoint"; 516 break; 517 } 518 519 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name); 520 parse_events_error__handle(e, column, strdup(str), strdup(help)); 521 } 522 523 static int add_tracepoint(struct parse_events_state *parse_state, 524 struct list_head *list, 525 const char *sys_name, const char *evt_name, 526 struct parse_events_error *err, 527 struct parse_events_terms *head_config, void *loc_) 528 { 529 YYLTYPE *loc = loc_; 530 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++, 531 !parse_state->fake_tp); 532 533 if (IS_ERR(evsel)) { 534 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column); 535 return PTR_ERR(evsel); 536 } 537 538 if (head_config) { 539 LIST_HEAD(config_terms); 540 541 if (get_config_terms(head_config, &config_terms)) 542 return -ENOMEM; 543 list_splice(&config_terms, &evsel->config_terms); 544 } 545 546 list_add_tail(&evsel->core.node, list); 547 return 0; 548 } 549 550 static int add_tracepoint_multi_event(struct parse_events_state *parse_state, 551 struct list_head *list, 552 const char *sys_name, const char *evt_name, 553 struct parse_events_error *err, 554 struct parse_events_terms *head_config, YYLTYPE *loc) 555 { 556 char *evt_path; 557 struct dirent *evt_ent; 558 DIR *evt_dir; 559 int ret = 0, found = 0; 560 561 evt_path = get_events_file(sys_name); 562 if (!evt_path) { 563 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 564 return -1; 565 } 566 evt_dir = opendir(evt_path); 567 if (!evt_dir) { 568 put_events_file(evt_path); 569 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 570 return -1; 571 } 572 573 while (!ret && (evt_ent = readdir(evt_dir))) { 574 if (!strcmp(evt_ent->d_name, ".") 575 || !strcmp(evt_ent->d_name, "..") 576 || !strcmp(evt_ent->d_name, "enable") 577 || !strcmp(evt_ent->d_name, "filter")) 578 continue; 579 580 if (!strglobmatch(evt_ent->d_name, evt_name)) 581 continue; 582 583 found++; 584 585 ret = add_tracepoint(parse_state, list, sys_name, evt_ent->d_name, 586 err, head_config, loc); 587 } 588 589 if (!found) { 590 tracepoint_error(err, ENOENT, sys_name, evt_name, loc->first_column); 591 ret = -1; 592 } 593 594 put_events_file(evt_path); 595 closedir(evt_dir); 596 return ret; 597 } 598 599 static int add_tracepoint_event(struct parse_events_state *parse_state, 600 struct list_head *list, 601 const char *sys_name, const char *evt_name, 602 struct parse_events_error *err, 603 struct parse_events_terms *head_config, YYLTYPE *loc) 604 { 605 return strpbrk(evt_name, "*?") ? 606 add_tracepoint_multi_event(parse_state, list, sys_name, evt_name, 607 err, head_config, loc) : 608 add_tracepoint(parse_state, list, sys_name, evt_name, 609 err, head_config, loc); 610 } 611 612 static int add_tracepoint_multi_sys(struct parse_events_state *parse_state, 613 struct list_head *list, 614 const char *sys_name, const char *evt_name, 615 struct parse_events_error *err, 616 struct parse_events_terms *head_config, YYLTYPE *loc) 617 { 618 struct dirent *events_ent; 619 DIR *events_dir; 620 int ret = 0; 621 622 events_dir = tracing_events__opendir(); 623 if (!events_dir) { 624 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 625 return -1; 626 } 627 628 while (!ret && (events_ent = readdir(events_dir))) { 629 if (!strcmp(events_ent->d_name, ".") 630 || !strcmp(events_ent->d_name, "..") 631 || !strcmp(events_ent->d_name, "enable") 632 || !strcmp(events_ent->d_name, "header_event") 633 || !strcmp(events_ent->d_name, "header_page")) 634 continue; 635 636 if (!strglobmatch(events_ent->d_name, sys_name)) 637 continue; 638 639 ret = add_tracepoint_event(parse_state, list, events_ent->d_name, 640 evt_name, err, head_config, loc); 641 } 642 643 closedir(events_dir); 644 return ret; 645 } 646 647 size_t default_breakpoint_len(void) 648 { 649 #if defined(__i386__) 650 static int len; 651 652 if (len == 0) { 653 struct perf_env env = {}; 654 655 perf_env__init(&env); 656 len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long); 657 perf_env__exit(&env); 658 } 659 return len; 660 #elif defined(__aarch64__) 661 return 4; 662 #else 663 return sizeof(long); 664 #endif 665 } 666 667 static int 668 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 669 { 670 int i; 671 672 for (i = 0; i < 3; i++) { 673 if (!type || !type[i]) 674 break; 675 676 #define CHECK_SET_TYPE(bit) \ 677 do { \ 678 if (attr->bp_type & bit) \ 679 return -EINVAL; \ 680 else \ 681 attr->bp_type |= bit; \ 682 } while (0) 683 684 switch (type[i]) { 685 case 'r': 686 CHECK_SET_TYPE(HW_BREAKPOINT_R); 687 break; 688 case 'w': 689 CHECK_SET_TYPE(HW_BREAKPOINT_W); 690 break; 691 case 'x': 692 CHECK_SET_TYPE(HW_BREAKPOINT_X); 693 break; 694 default: 695 return -EINVAL; 696 } 697 } 698 699 #undef CHECK_SET_TYPE 700 701 if (!attr->bp_type) /* Default */ 702 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 703 704 return 0; 705 } 706 707 int parse_events_add_breakpoint(struct parse_events_state *parse_state, 708 struct list_head *list, 709 u64 addr, char *type, u64 len, 710 struct parse_events_terms *head_config) 711 { 712 struct perf_event_attr attr; 713 LIST_HEAD(config_terms); 714 const char *name; 715 716 memset(&attr, 0, sizeof(attr)); 717 attr.bp_addr = addr; 718 719 if (parse_breakpoint_type(type, &attr)) 720 return -EINVAL; 721 722 /* Provide some defaults if len is not specified */ 723 if (!len) { 724 if (attr.bp_type == HW_BREAKPOINT_X) 725 len = default_breakpoint_len(); 726 else 727 len = HW_BREAKPOINT_LEN_4; 728 } 729 730 attr.bp_len = len; 731 732 attr.type = PERF_TYPE_BREAKPOINT; 733 attr.sample_period = 1; 734 735 if (head_config) { 736 if (config_attr(&attr, head_config, parse_state->error, 737 config_term_common)) 738 return -EINVAL; 739 740 if (get_config_terms(head_config, &config_terms)) 741 return -ENOMEM; 742 } 743 744 name = get_config_name(head_config); 745 746 return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL, 747 &config_terms, /*alternate_hw_config=*/PERF_COUNT_HW_MAX); 748 } 749 750 static int check_type_val(struct parse_events_term *term, 751 struct parse_events_error *err, 752 enum parse_events__term_val_type type) 753 { 754 if (type == term->type_val) 755 return 0; 756 757 if (err) { 758 parse_events_error__handle(err, term->err_val, 759 type == PARSE_EVENTS__TERM_TYPE_NUM 760 ? strdup("expected numeric value") 761 : strdup("expected string value"), 762 NULL); 763 } 764 return -EINVAL; 765 } 766 767 static bool config_term_shrinked; 768 769 const char *parse_events__term_type_str(enum parse_events__term_type term_type) 770 { 771 /* 772 * Update according to parse-events.l 773 */ 774 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { 775 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>", 776 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config", 777 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1", 778 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2", 779 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3", 780 [PARSE_EVENTS__TERM_TYPE_NAME] = "name", 781 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period", 782 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq", 783 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type", 784 [PARSE_EVENTS__TERM_TYPE_TIME] = "time", 785 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph", 786 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size", 787 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", 788 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", 789 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack", 790 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr", 791 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite", 792 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite", 793 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config", 794 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore", 795 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output", 796 [PARSE_EVENTS__TERM_TYPE_AUX_ACTION] = "aux-action", 797 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size", 798 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id", 799 [PARSE_EVENTS__TERM_TYPE_RAW] = "raw", 800 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache", 801 [PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware", 802 }; 803 if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR) 804 return "unknown term"; 805 806 return config_term_names[term_type]; 807 } 808 809 static bool 810 config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err) 811 { 812 char *err_str; 813 814 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) { 815 parse_events_error__handle(err, -1, 816 strdup("Invalid term_type"), NULL); 817 return false; 818 } 819 if (!config_term_shrinked) 820 return true; 821 822 switch (term_type) { 823 case PARSE_EVENTS__TERM_TYPE_CONFIG: 824 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 825 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 826 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 827 case PARSE_EVENTS__TERM_TYPE_NAME: 828 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 829 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 830 case PARSE_EVENTS__TERM_TYPE_PERCORE: 831 return true; 832 case PARSE_EVENTS__TERM_TYPE_USER: 833 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 834 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 835 case PARSE_EVENTS__TERM_TYPE_TIME: 836 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 837 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 838 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 839 case PARSE_EVENTS__TERM_TYPE_INHERIT: 840 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 841 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 842 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 843 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 844 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 845 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 846 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 847 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 848 case PARSE_EVENTS__TERM_TYPE_RAW: 849 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 850 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 851 default: 852 if (!err) 853 return false; 854 855 /* term_type is validated so indexing is safe */ 856 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'", 857 parse_events__term_type_str(term_type)) >= 0) 858 parse_events_error__handle(err, -1, err_str, NULL); 859 return false; 860 } 861 } 862 863 void parse_events__shrink_config_terms(void) 864 { 865 config_term_shrinked = true; 866 } 867 868 static int config_term_common(struct perf_event_attr *attr, 869 struct parse_events_term *term, 870 struct parse_events_error *err) 871 { 872 #define CHECK_TYPE_VAL(type) \ 873 do { \ 874 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \ 875 return -EINVAL; \ 876 } while (0) 877 878 switch (term->type_term) { 879 case PARSE_EVENTS__TERM_TYPE_CONFIG: 880 CHECK_TYPE_VAL(NUM); 881 attr->config = term->val.num; 882 break; 883 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 884 CHECK_TYPE_VAL(NUM); 885 attr->config1 = term->val.num; 886 break; 887 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 888 CHECK_TYPE_VAL(NUM); 889 attr->config2 = term->val.num; 890 break; 891 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 892 CHECK_TYPE_VAL(NUM); 893 attr->config3 = term->val.num; 894 break; 895 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 896 CHECK_TYPE_VAL(NUM); 897 break; 898 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 899 CHECK_TYPE_VAL(NUM); 900 break; 901 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 902 CHECK_TYPE_VAL(STR); 903 if (strcmp(term->val.str, "no") && 904 parse_branch_str(term->val.str, 905 &attr->branch_sample_type)) { 906 parse_events_error__handle(err, term->err_val, 907 strdup("invalid branch sample type"), 908 NULL); 909 return -EINVAL; 910 } 911 break; 912 case PARSE_EVENTS__TERM_TYPE_TIME: 913 CHECK_TYPE_VAL(NUM); 914 if (term->val.num > 1) { 915 parse_events_error__handle(err, term->err_val, 916 strdup("expected 0 or 1"), 917 NULL); 918 return -EINVAL; 919 } 920 break; 921 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 922 CHECK_TYPE_VAL(STR); 923 break; 924 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 925 CHECK_TYPE_VAL(NUM); 926 break; 927 case PARSE_EVENTS__TERM_TYPE_INHERIT: 928 CHECK_TYPE_VAL(NUM); 929 break; 930 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 931 CHECK_TYPE_VAL(NUM); 932 break; 933 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 934 CHECK_TYPE_VAL(NUM); 935 break; 936 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 937 CHECK_TYPE_VAL(NUM); 938 break; 939 case PARSE_EVENTS__TERM_TYPE_NAME: 940 CHECK_TYPE_VAL(STR); 941 break; 942 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 943 CHECK_TYPE_VAL(STR); 944 break; 945 case PARSE_EVENTS__TERM_TYPE_RAW: 946 CHECK_TYPE_VAL(STR); 947 break; 948 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 949 CHECK_TYPE_VAL(NUM); 950 break; 951 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 952 CHECK_TYPE_VAL(NUM); 953 break; 954 case PARSE_EVENTS__TERM_TYPE_PERCORE: 955 CHECK_TYPE_VAL(NUM); 956 if ((unsigned int)term->val.num > 1) { 957 parse_events_error__handle(err, term->err_val, 958 strdup("expected 0 or 1"), 959 NULL); 960 return -EINVAL; 961 } 962 break; 963 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 964 CHECK_TYPE_VAL(NUM); 965 break; 966 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 967 CHECK_TYPE_VAL(STR); 968 break; 969 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 970 CHECK_TYPE_VAL(NUM); 971 if (term->val.num > UINT_MAX) { 972 parse_events_error__handle(err, term->err_val, 973 strdup("too big"), 974 NULL); 975 return -EINVAL; 976 } 977 break; 978 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 979 case PARSE_EVENTS__TERM_TYPE_USER: 980 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 981 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 982 default: 983 parse_events_error__handle(err, term->err_term, 984 strdup(parse_events__term_type_str(term->type_term)), 985 parse_events_formats_error_string(NULL)); 986 return -EINVAL; 987 } 988 989 /* 990 * Check term availability after basic checking so 991 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered. 992 * 993 * If check availability at the entry of this function, 994 * user will see "'<sysfs term>' is not usable in 'perf stat'" 995 * if an invalid config term is provided for legacy events 996 * (for example, instructions/badterm/...), which is confusing. 997 */ 998 if (!config_term_avail(term->type_term, err)) 999 return -EINVAL; 1000 return 0; 1001 #undef CHECK_TYPE_VAL 1002 } 1003 1004 static int config_term_pmu(struct perf_event_attr *attr, 1005 struct parse_events_term *term, 1006 struct parse_events_error *err) 1007 { 1008 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) { 1009 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); 1010 1011 if (!pmu) { 1012 char *err_str; 1013 1014 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0) 1015 parse_events_error__handle(err, term->err_term, 1016 err_str, /*help=*/NULL); 1017 return -EINVAL; 1018 } 1019 /* 1020 * Rewrite the PMU event to a legacy cache one unless the PMU 1021 * doesn't support legacy cache events or the event is present 1022 * within the PMU. 1023 */ 1024 if (perf_pmu__supports_legacy_cache(pmu) && 1025 !perf_pmu__have_event(pmu, term->config)) { 1026 attr->type = PERF_TYPE_HW_CACHE; 1027 return parse_events__decode_legacy_cache(term->config, pmu->type, 1028 &attr->config); 1029 } else { 1030 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 1031 term->no_value = true; 1032 } 1033 } 1034 if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) { 1035 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); 1036 1037 if (!pmu) { 1038 char *err_str; 1039 1040 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0) 1041 parse_events_error__handle(err, term->err_term, 1042 err_str, /*help=*/NULL); 1043 return -EINVAL; 1044 } 1045 /* 1046 * If the PMU has a sysfs or json event prefer it over 1047 * legacy. ARM requires this. 1048 */ 1049 if (perf_pmu__have_event(pmu, term->config)) { 1050 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 1051 term->no_value = true; 1052 term->alternate_hw_config = true; 1053 } else { 1054 attr->type = PERF_TYPE_HARDWARE; 1055 attr->config = term->val.num; 1056 if (perf_pmus__supports_extended_type()) 1057 attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT; 1058 } 1059 return 0; 1060 } 1061 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || 1062 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) { 1063 /* 1064 * Always succeed for sysfs terms, as we dont know 1065 * at this point what type they need to have. 1066 */ 1067 return 0; 1068 } 1069 return config_term_common(attr, term, err); 1070 } 1071 1072 static int config_term_tracepoint(struct perf_event_attr *attr, 1073 struct parse_events_term *term, 1074 struct parse_events_error *err) 1075 { 1076 switch (term->type_term) { 1077 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1078 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1079 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1080 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1081 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1082 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1083 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1084 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1085 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1086 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1087 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1088 return config_term_common(attr, term, err); 1089 case PARSE_EVENTS__TERM_TYPE_USER: 1090 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1091 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1092 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1093 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1094 case PARSE_EVENTS__TERM_TYPE_NAME: 1095 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1096 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1097 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1098 case PARSE_EVENTS__TERM_TYPE_TIME: 1099 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1100 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1101 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1102 case PARSE_EVENTS__TERM_TYPE_RAW: 1103 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1104 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1105 default: 1106 if (err) { 1107 parse_events_error__handle(err, term->err_term, 1108 strdup(parse_events__term_type_str(term->type_term)), 1109 strdup("valid terms: call-graph,stack-size\n") 1110 ); 1111 } 1112 return -EINVAL; 1113 } 1114 1115 return 0; 1116 } 1117 1118 static int config_attr(struct perf_event_attr *attr, 1119 const struct parse_events_terms *head, 1120 struct parse_events_error *err, 1121 config_term_func_t config_term) 1122 { 1123 struct parse_events_term *term; 1124 1125 list_for_each_entry(term, &head->terms, list) 1126 if (config_term(attr, term, err)) 1127 return -EINVAL; 1128 1129 return 0; 1130 } 1131 1132 static int get_config_terms(const struct parse_events_terms *head_config, 1133 struct list_head *head_terms) 1134 { 1135 #define ADD_CONFIG_TERM(__type, __weak) \ 1136 struct evsel_config_term *__t; \ 1137 \ 1138 __t = zalloc(sizeof(*__t)); \ 1139 if (!__t) \ 1140 return -ENOMEM; \ 1141 \ 1142 INIT_LIST_HEAD(&__t->list); \ 1143 __t->type = EVSEL__CONFIG_TERM_ ## __type; \ 1144 __t->weak = __weak; \ 1145 list_add_tail(&__t->list, head_terms) 1146 1147 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \ 1148 do { \ 1149 ADD_CONFIG_TERM(__type, __weak); \ 1150 __t->val.__name = __val; \ 1151 } while (0) 1152 1153 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \ 1154 do { \ 1155 ADD_CONFIG_TERM(__type, __weak); \ 1156 __t->val.str = strdup(__val); \ 1157 if (!__t->val.str) { \ 1158 zfree(&__t); \ 1159 return -ENOMEM; \ 1160 } \ 1161 __t->free_str = true; \ 1162 } while (0) 1163 1164 struct parse_events_term *term; 1165 1166 list_for_each_entry(term, &head_config->terms, list) { 1167 switch (term->type_term) { 1168 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1169 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak); 1170 break; 1171 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1172 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak); 1173 break; 1174 case PARSE_EVENTS__TERM_TYPE_TIME: 1175 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak); 1176 break; 1177 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1178 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak); 1179 break; 1180 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1181 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak); 1182 break; 1183 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1184 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user, 1185 term->val.num, term->weak); 1186 break; 1187 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1188 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1189 term->val.num ? 1 : 0, term->weak); 1190 break; 1191 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1192 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1193 term->val.num ? 0 : 1, term->weak); 1194 break; 1195 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1196 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack, 1197 term->val.num, term->weak); 1198 break; 1199 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1200 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events, 1201 term->val.num, term->weak); 1202 break; 1203 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1204 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1205 term->val.num ? 1 : 0, term->weak); 1206 break; 1207 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1208 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1209 term->val.num ? 0 : 1, term->weak); 1210 break; 1211 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1212 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak); 1213 break; 1214 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1215 ADD_CONFIG_TERM_VAL(PERCORE, percore, 1216 term->val.num ? true : false, term->weak); 1217 break; 1218 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1219 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output, 1220 term->val.num ? 1 : 0, term->weak); 1221 break; 1222 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1223 ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak); 1224 break; 1225 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1226 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size, 1227 term->val.num, term->weak); 1228 break; 1229 case PARSE_EVENTS__TERM_TYPE_USER: 1230 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1231 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1232 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1233 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1234 case PARSE_EVENTS__TERM_TYPE_NAME: 1235 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1236 case PARSE_EVENTS__TERM_TYPE_RAW: 1237 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1238 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1239 default: 1240 break; 1241 } 1242 } 1243 return 0; 1244 } 1245 1246 /* 1247 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for 1248 * each bit of attr->config that the user has changed. 1249 */ 1250 static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config, 1251 struct list_head *head_terms) 1252 { 1253 struct parse_events_term *term; 1254 u64 bits = 0; 1255 int type; 1256 1257 list_for_each_entry(term, &head_config->terms, list) { 1258 switch (term->type_term) { 1259 case PARSE_EVENTS__TERM_TYPE_USER: 1260 type = perf_pmu__format_type(pmu, term->config); 1261 if (type != PERF_PMU_FORMAT_VALUE_CONFIG) 1262 continue; 1263 bits |= perf_pmu__format_bits(pmu, term->config); 1264 break; 1265 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1266 bits = ~(u64)0; 1267 break; 1268 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1269 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1270 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1271 case PARSE_EVENTS__TERM_TYPE_NAME: 1272 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1273 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1274 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1275 case PARSE_EVENTS__TERM_TYPE_TIME: 1276 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1277 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1278 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1279 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1280 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1281 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1282 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1283 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1284 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1285 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1286 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1287 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION: 1288 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1289 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1290 case PARSE_EVENTS__TERM_TYPE_RAW: 1291 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1292 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1293 default: 1294 break; 1295 } 1296 } 1297 1298 if (bits) 1299 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false); 1300 1301 #undef ADD_CONFIG_TERM 1302 return 0; 1303 } 1304 1305 int parse_events_add_tracepoint(struct parse_events_state *parse_state, 1306 struct list_head *list, 1307 const char *sys, const char *event, 1308 struct parse_events_error *err, 1309 struct parse_events_terms *head_config, void *loc_) 1310 { 1311 YYLTYPE *loc = loc_; 1312 1313 if (head_config) { 1314 struct perf_event_attr attr; 1315 1316 if (config_attr(&attr, head_config, err, 1317 config_term_tracepoint)) 1318 return -EINVAL; 1319 } 1320 1321 if (strpbrk(sys, "*?")) 1322 return add_tracepoint_multi_sys(parse_state, list, sys, event, 1323 err, head_config, loc); 1324 else 1325 return add_tracepoint_event(parse_state, list, sys, event, 1326 err, head_config, loc); 1327 } 1328 1329 static int __parse_events_add_numeric(struct parse_events_state *parse_state, 1330 struct list_head *list, 1331 struct perf_pmu *pmu, u32 type, u32 extended_type, 1332 u64 config, const struct parse_events_terms *head_config) 1333 { 1334 struct perf_event_attr attr; 1335 LIST_HEAD(config_terms); 1336 const char *name, *metric_id; 1337 int ret; 1338 1339 memset(&attr, 0, sizeof(attr)); 1340 attr.type = type; 1341 attr.config = config; 1342 if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) { 1343 assert(perf_pmus__supports_extended_type()); 1344 attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT; 1345 } 1346 1347 if (head_config) { 1348 if (config_attr(&attr, head_config, parse_state->error, 1349 config_term_common)) 1350 return -EINVAL; 1351 1352 if (get_config_terms(head_config, &config_terms)) 1353 return -ENOMEM; 1354 } 1355 1356 name = get_config_name(head_config); 1357 metric_id = get_config_metric_id(head_config); 1358 ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name, 1359 metric_id, pmu, &config_terms, /*auto_merge_stats=*/false, 1360 /*cpu_list=*/NULL, /*alternate_hw_config=*/PERF_COUNT_HW_MAX 1361 ) == NULL ? -ENOMEM : 0; 1362 free_config_terms(&config_terms); 1363 return ret; 1364 } 1365 1366 int parse_events_add_numeric(struct parse_events_state *parse_state, 1367 struct list_head *list, 1368 u32 type, u64 config, 1369 const struct parse_events_terms *head_config, 1370 bool wildcard) 1371 { 1372 struct perf_pmu *pmu = NULL; 1373 bool found_supported = false; 1374 1375 /* Wildcards on numeric values are only supported by core PMUs. */ 1376 if (wildcard && perf_pmus__supports_extended_type()) { 1377 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 1378 int ret; 1379 1380 found_supported = true; 1381 if (parse_events__filter_pmu(parse_state, pmu)) 1382 continue; 1383 1384 ret = __parse_events_add_numeric(parse_state, list, pmu, 1385 type, pmu->type, 1386 config, head_config); 1387 if (ret) 1388 return ret; 1389 } 1390 if (found_supported) 1391 return 0; 1392 } 1393 return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type), 1394 type, /*extended_type=*/0, config, head_config); 1395 } 1396 1397 static bool config_term_percore(struct list_head *config_terms) 1398 { 1399 struct evsel_config_term *term; 1400 1401 list_for_each_entry(term, config_terms, list) { 1402 if (term->type == EVSEL__CONFIG_TERM_PERCORE) 1403 return term->val.percore; 1404 } 1405 1406 return false; 1407 } 1408 1409 static int parse_events_add_pmu(struct parse_events_state *parse_state, 1410 struct list_head *list, struct perf_pmu *pmu, 1411 const struct parse_events_terms *const_parsed_terms, 1412 bool auto_merge_stats, u64 alternate_hw_config) 1413 { 1414 struct perf_event_attr attr; 1415 struct perf_pmu_info info; 1416 struct evsel *evsel; 1417 struct parse_events_error *err = parse_state->error; 1418 LIST_HEAD(config_terms); 1419 struct parse_events_terms parsed_terms; 1420 bool alias_rewrote_terms = false; 1421 1422 if (verbose > 1) { 1423 struct strbuf sb; 1424 1425 strbuf_init(&sb, /*hint=*/ 0); 1426 if (pmu->selectable && const_parsed_terms && 1427 list_empty(&const_parsed_terms->terms)) { 1428 strbuf_addf(&sb, "%s//", pmu->name); 1429 } else { 1430 strbuf_addf(&sb, "%s/", pmu->name); 1431 parse_events_terms__to_strbuf(const_parsed_terms, &sb); 1432 strbuf_addch(&sb, '/'); 1433 } 1434 fprintf(stderr, "Attempt to add: %s\n", sb.buf); 1435 strbuf_release(&sb); 1436 } 1437 1438 memset(&attr, 0, sizeof(attr)); 1439 if (pmu->perf_event_attr_init_default) 1440 pmu->perf_event_attr_init_default(pmu, &attr); 1441 1442 attr.type = pmu->type; 1443 1444 if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) { 1445 evsel = __add_event(list, &parse_state->idx, &attr, 1446 /*init_attr=*/true, /*name=*/NULL, 1447 /*metric_id=*/NULL, pmu, 1448 /*config_terms=*/NULL, auto_merge_stats, 1449 /*cpu_list=*/NULL, alternate_hw_config); 1450 return evsel ? 0 : -ENOMEM; 1451 } 1452 1453 parse_events_terms__init(&parsed_terms); 1454 if (const_parsed_terms) { 1455 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1456 1457 if (ret) 1458 return ret; 1459 } 1460 fix_raw(&parsed_terms, pmu); 1461 1462 /* Configure attr/terms with a known PMU, this will set hardcoded terms. */ 1463 if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) { 1464 parse_events_terms__exit(&parsed_terms); 1465 return -EINVAL; 1466 } 1467 1468 /* Look for event names in the terms and rewrite into format based terms. */ 1469 if (perf_pmu__check_alias(pmu, &parsed_terms, 1470 &info, &alias_rewrote_terms, 1471 &alternate_hw_config, err)) { 1472 parse_events_terms__exit(&parsed_terms); 1473 return -EINVAL; 1474 } 1475 1476 if (verbose > 1) { 1477 struct strbuf sb; 1478 1479 strbuf_init(&sb, /*hint=*/ 0); 1480 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1481 fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf); 1482 strbuf_release(&sb); 1483 } 1484 1485 /* Configure attr/terms again if an alias was expanded. */ 1486 if (alias_rewrote_terms && 1487 config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) { 1488 parse_events_terms__exit(&parsed_terms); 1489 return -EINVAL; 1490 } 1491 1492 if (get_config_terms(&parsed_terms, &config_terms)) { 1493 parse_events_terms__exit(&parsed_terms); 1494 return -ENOMEM; 1495 } 1496 1497 /* 1498 * When using default config, record which bits of attr->config were 1499 * changed by the user. 1500 */ 1501 if (pmu->perf_event_attr_init_default && 1502 get_config_chgs(pmu, &parsed_terms, &config_terms)) { 1503 parse_events_terms__exit(&parsed_terms); 1504 return -ENOMEM; 1505 } 1506 1507 /* Skip configuring hard coded terms that were applied by config_attr. */ 1508 if (perf_pmu__config(pmu, &attr, &parsed_terms, /*apply_hardcoded=*/false, 1509 parse_state->error)) { 1510 free_config_terms(&config_terms); 1511 parse_events_terms__exit(&parsed_terms); 1512 return -EINVAL; 1513 } 1514 1515 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true, 1516 get_config_name(&parsed_terms), 1517 get_config_metric_id(&parsed_terms), pmu, 1518 &config_terms, auto_merge_stats, /*cpu_list=*/NULL, 1519 alternate_hw_config); 1520 if (!evsel) { 1521 parse_events_terms__exit(&parsed_terms); 1522 return -ENOMEM; 1523 } 1524 1525 if (evsel->name) 1526 evsel->use_config_name = true; 1527 1528 evsel->percore = config_term_percore(&evsel->config_terms); 1529 1530 parse_events_terms__exit(&parsed_terms); 1531 free((char *)evsel->unit); 1532 evsel->unit = strdup(info.unit); 1533 evsel->scale = info.scale; 1534 evsel->per_pkg = info.per_pkg; 1535 evsel->snapshot = info.snapshot; 1536 return 0; 1537 } 1538 1539 int parse_events_multi_pmu_add(struct parse_events_state *parse_state, 1540 const char *event_name, u64 hw_config, 1541 const struct parse_events_terms *const_parsed_terms, 1542 struct list_head **listp, void *loc_) 1543 { 1544 struct parse_events_term *term; 1545 struct list_head *list = NULL; 1546 struct perf_pmu *pmu = NULL; 1547 YYLTYPE *loc = loc_; 1548 int ok = 0; 1549 const char *config; 1550 struct parse_events_terms parsed_terms; 1551 1552 *listp = NULL; 1553 1554 parse_events_terms__init(&parsed_terms); 1555 if (const_parsed_terms) { 1556 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1557 1558 if (ret) 1559 return ret; 1560 } 1561 1562 config = strdup(event_name); 1563 if (!config) 1564 goto out_err; 1565 1566 if (parse_events_term__num(&term, 1567 PARSE_EVENTS__TERM_TYPE_USER, 1568 config, /*num=*/1, /*novalue=*/true, 1569 loc, /*loc_val=*/NULL) < 0) { 1570 zfree(&config); 1571 goto out_err; 1572 } 1573 list_add_tail(&term->list, &parsed_terms.terms); 1574 1575 /* Add it for all PMUs that support the alias */ 1576 list = malloc(sizeof(struct list_head)); 1577 if (!list) 1578 goto out_err; 1579 1580 INIT_LIST_HEAD(list); 1581 1582 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 1583 bool auto_merge_stats; 1584 1585 if (parse_events__filter_pmu(parse_state, pmu)) 1586 continue; 1587 1588 if (!perf_pmu__have_event(pmu, event_name)) 1589 continue; 1590 1591 auto_merge_stats = perf_pmu__auto_merge_stats(pmu); 1592 if (!parse_events_add_pmu(parse_state, list, pmu, 1593 &parsed_terms, auto_merge_stats, hw_config)) { 1594 struct strbuf sb; 1595 1596 strbuf_init(&sb, /*hint=*/ 0); 1597 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1598 pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf); 1599 strbuf_release(&sb); 1600 ok++; 1601 } 1602 } 1603 1604 if (parse_state->fake_pmu) { 1605 if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms, 1606 /*auto_merge_stats=*/true, hw_config)) { 1607 struct strbuf sb; 1608 1609 strbuf_init(&sb, /*hint=*/ 0); 1610 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1611 pr_debug("%s -> fake/%s/\n", event_name, sb.buf); 1612 strbuf_release(&sb); 1613 ok++; 1614 } 1615 } 1616 1617 out_err: 1618 parse_events_terms__exit(&parsed_terms); 1619 if (ok) 1620 *listp = list; 1621 else 1622 free(list); 1623 1624 return ok ? 0 : -1; 1625 } 1626 1627 int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state, 1628 const char *event_or_pmu, 1629 const struct parse_events_terms *const_parsed_terms, 1630 struct list_head **listp, 1631 void *loc_) 1632 { 1633 YYLTYPE *loc = loc_; 1634 struct perf_pmu *pmu; 1635 int ok = 0; 1636 char *help; 1637 1638 *listp = malloc(sizeof(**listp)); 1639 if (!*listp) 1640 return -ENOMEM; 1641 1642 INIT_LIST_HEAD(*listp); 1643 1644 /* Attempt to add to list assuming event_or_pmu is a PMU name. */ 1645 pmu = perf_pmus__find(event_or_pmu); 1646 if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms, 1647 /*auto_merge_stats=*/false, 1648 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) 1649 return 0; 1650 1651 if (parse_state->fake_pmu) { 1652 if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(), 1653 const_parsed_terms, 1654 /*auto_merge_stats=*/false, 1655 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) 1656 return 0; 1657 } 1658 1659 pmu = NULL; 1660 /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */ 1661 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 1662 if (!parse_events__filter_pmu(parse_state, pmu) && 1663 perf_pmu__match(pmu, event_or_pmu)) { 1664 bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu); 1665 1666 if (!parse_events_add_pmu(parse_state, *listp, pmu, 1667 const_parsed_terms, 1668 auto_merge_stats, 1669 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) { 1670 ok++; 1671 parse_state->wild_card_pmus = true; 1672 } 1673 } 1674 } 1675 if (ok) 1676 return 0; 1677 1678 /* Failure to add, assume event_or_pmu is an event name. */ 1679 zfree(listp); 1680 if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, PERF_COUNT_HW_MAX, 1681 const_parsed_terms, listp, loc)) 1682 return 0; 1683 1684 if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0) 1685 help = NULL; 1686 parse_events_error__handle(parse_state->error, loc->first_column, 1687 strdup("Bad event or PMU"), 1688 help); 1689 zfree(listp); 1690 return -EINVAL; 1691 } 1692 1693 void parse_events__set_leader(char *name, struct list_head *list) 1694 { 1695 struct evsel *leader; 1696 1697 if (list_empty(list)) { 1698 WARN_ONCE(true, "WARNING: failed to set leader: empty list"); 1699 return; 1700 } 1701 1702 leader = list_first_entry(list, struct evsel, core.node); 1703 __perf_evlist__set_leader(list, &leader->core); 1704 zfree(&leader->group_name); 1705 leader->group_name = name; 1706 } 1707 1708 static int parse_events__modifier_list(struct parse_events_state *parse_state, 1709 YYLTYPE *loc, 1710 struct list_head *list, 1711 struct parse_events_modifier mod, 1712 bool group) 1713 { 1714 struct evsel *evsel; 1715 1716 if (!group && mod.weak) { 1717 parse_events_error__handle(parse_state->error, loc->first_column, 1718 strdup("Weak modifier is for use with groups"), NULL); 1719 return -EINVAL; 1720 } 1721 1722 __evlist__for_each_entry(list, evsel) { 1723 /* Translate modifiers into the equivalent evsel excludes. */ 1724 int eu = group ? evsel->core.attr.exclude_user : 0; 1725 int ek = group ? evsel->core.attr.exclude_kernel : 0; 1726 int eh = group ? evsel->core.attr.exclude_hv : 0; 1727 int eH = group ? evsel->core.attr.exclude_host : 0; 1728 int eG = group ? evsel->core.attr.exclude_guest : 0; 1729 int exclude = eu | ek | eh; 1730 int exclude_GH = group ? evsel->exclude_GH : 0; 1731 1732 if (mod.user) { 1733 if (!exclude) 1734 exclude = eu = ek = eh = 1; 1735 if (!exclude_GH && !perf_guest && exclude_GH_default) 1736 eG = 1; 1737 eu = 0; 1738 } 1739 if (mod.kernel) { 1740 if (!exclude) 1741 exclude = eu = ek = eh = 1; 1742 ek = 0; 1743 } 1744 if (mod.hypervisor) { 1745 if (!exclude) 1746 exclude = eu = ek = eh = 1; 1747 eh = 0; 1748 } 1749 if (mod.guest) { 1750 if (!exclude_GH) 1751 exclude_GH = eG = eH = 1; 1752 eG = 0; 1753 } 1754 if (mod.host) { 1755 if (!exclude_GH) 1756 exclude_GH = eG = eH = 1; 1757 eH = 0; 1758 } 1759 evsel->core.attr.exclude_user = eu; 1760 evsel->core.attr.exclude_kernel = ek; 1761 evsel->core.attr.exclude_hv = eh; 1762 evsel->core.attr.exclude_host = eH; 1763 evsel->core.attr.exclude_guest = eG; 1764 evsel->exclude_GH = exclude_GH; 1765 1766 /* Simple modifiers copied to the evsel. */ 1767 if (mod.precise) { 1768 u8 precise = evsel->core.attr.precise_ip + mod.precise; 1769 /* 1770 * precise ip: 1771 * 1772 * 0 - SAMPLE_IP can have arbitrary skid 1773 * 1 - SAMPLE_IP must have constant skid 1774 * 2 - SAMPLE_IP requested to have 0 skid 1775 * 3 - SAMPLE_IP must have 0 skid 1776 * 1777 * See also PERF_RECORD_MISC_EXACT_IP 1778 */ 1779 if (precise > 3) { 1780 char *help; 1781 1782 if (asprintf(&help, 1783 "Maximum combined precise value is 3, adding precision to \"%s\"", 1784 evsel__name(evsel)) > 0) { 1785 parse_events_error__handle(parse_state->error, 1786 loc->first_column, 1787 help, NULL); 1788 } 1789 return -EINVAL; 1790 } 1791 evsel->core.attr.precise_ip = precise; 1792 } 1793 if (mod.precise_max) 1794 evsel->precise_max = 1; 1795 if (mod.non_idle) 1796 evsel->core.attr.exclude_idle = 1; 1797 if (mod.sample_read) 1798 evsel->sample_read = 1; 1799 if (mod.pinned && evsel__is_group_leader(evsel)) 1800 evsel->core.attr.pinned = 1; 1801 if (mod.exclusive && evsel__is_group_leader(evsel)) 1802 evsel->core.attr.exclusive = 1; 1803 if (mod.weak) 1804 evsel->weak_group = true; 1805 if (mod.bpf) 1806 evsel->bpf_counter = true; 1807 if (mod.retire_lat) 1808 evsel->retire_lat = true; 1809 } 1810 return 0; 1811 } 1812 1813 int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc, 1814 struct list_head *list, 1815 struct parse_events_modifier mod) 1816 { 1817 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true); 1818 } 1819 1820 int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc, 1821 struct list_head *list, 1822 struct parse_events_modifier mod) 1823 { 1824 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false); 1825 } 1826 1827 int parse_events__set_default_name(struct list_head *list, char *name) 1828 { 1829 struct evsel *evsel; 1830 bool used_name = false; 1831 1832 __evlist__for_each_entry(list, evsel) { 1833 if (!evsel->name) { 1834 evsel->name = used_name ? strdup(name) : name; 1835 used_name = true; 1836 if (!evsel->name) 1837 return -ENOMEM; 1838 } 1839 } 1840 if (!used_name) 1841 free(name); 1842 return 0; 1843 } 1844 1845 static int parse_events__scanner(const char *str, 1846 FILE *input, 1847 struct parse_events_state *parse_state) 1848 { 1849 YY_BUFFER_STATE buffer; 1850 void *scanner; 1851 int ret; 1852 1853 ret = parse_events_lex_init_extra(parse_state, &scanner); 1854 if (ret) 1855 return ret; 1856 1857 if (str) 1858 buffer = parse_events__scan_string(str, scanner); 1859 else 1860 parse_events_set_in(input, scanner); 1861 1862 #ifdef PARSER_DEBUG 1863 parse_events_debug = 1; 1864 parse_events_set_debug(1, scanner); 1865 #endif 1866 ret = parse_events_parse(parse_state, scanner); 1867 1868 if (str) { 1869 parse_events__flush_buffer(buffer, scanner); 1870 parse_events__delete_buffer(buffer, scanner); 1871 } 1872 parse_events_lex_destroy(scanner); 1873 return ret; 1874 } 1875 1876 /* 1877 * parse event config string, return a list of event terms. 1878 */ 1879 int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input) 1880 { 1881 struct parse_events_state parse_state = { 1882 .terms = NULL, 1883 .stoken = PE_START_TERMS, 1884 }; 1885 int ret; 1886 1887 ret = parse_events__scanner(str, input, &parse_state); 1888 if (!ret) 1889 list_splice(&parse_state.terms->terms, &terms->terms); 1890 1891 zfree(&parse_state.terms); 1892 return ret; 1893 } 1894 1895 static int evsel__compute_group_pmu_name(struct evsel *evsel, 1896 const struct list_head *head) 1897 { 1898 struct evsel *leader = evsel__leader(evsel); 1899 struct evsel *pos; 1900 const char *group_pmu_name; 1901 struct perf_pmu *pmu = evsel__find_pmu(evsel); 1902 1903 if (!pmu) { 1904 /* 1905 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU 1906 * is a core PMU, but in heterogeneous systems this is 1907 * unknown. For now pick the first core PMU. 1908 */ 1909 pmu = perf_pmus__scan_core(NULL); 1910 } 1911 if (!pmu) { 1912 pr_debug("No PMU found for '%s'\n", evsel__name(evsel)); 1913 return -EINVAL; 1914 } 1915 group_pmu_name = pmu->name; 1916 /* 1917 * Software events may be in a group with other uncore PMU events. Use 1918 * the pmu_name of the first non-software event to avoid breaking the 1919 * software event out of the group. 1920 * 1921 * Aux event leaders, like intel_pt, expect a group with events from 1922 * other PMUs, so substitute the AUX event's PMU in this case. 1923 */ 1924 if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) { 1925 struct perf_pmu *leader_pmu = evsel__find_pmu(leader); 1926 1927 if (!leader_pmu) { 1928 /* As with determining pmu above. */ 1929 leader_pmu = perf_pmus__scan_core(NULL); 1930 } 1931 /* 1932 * Starting with the leader, find the first event with a named 1933 * non-software PMU. for_each_group_(member|evsel) isn't used as 1934 * the list isn't yet sorted putting evsel's in the same group 1935 * together. 1936 */ 1937 if (leader_pmu && !perf_pmu__is_software(leader_pmu)) { 1938 group_pmu_name = leader_pmu->name; 1939 } else if (leader->core.nr_members > 1) { 1940 list_for_each_entry(pos, head, core.node) { 1941 struct perf_pmu *pos_pmu; 1942 1943 if (pos == leader || evsel__leader(pos) != leader) 1944 continue; 1945 pos_pmu = evsel__find_pmu(pos); 1946 if (!pos_pmu) { 1947 /* As with determining pmu above. */ 1948 pos_pmu = perf_pmus__scan_core(NULL); 1949 } 1950 if (pos_pmu && !perf_pmu__is_software(pos_pmu)) { 1951 group_pmu_name = pos_pmu->name; 1952 break; 1953 } 1954 } 1955 } 1956 } 1957 /* Record computed name. */ 1958 evsel->group_pmu_name = strdup(group_pmu_name); 1959 return evsel->group_pmu_name ? 0 : -ENOMEM; 1960 } 1961 1962 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs) 1963 { 1964 /* Order by insertion index. */ 1965 return lhs->core.idx - rhs->core.idx; 1966 } 1967 1968 static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r) 1969 { 1970 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node); 1971 const struct evsel *lhs = container_of(lhs_core, struct evsel, core); 1972 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node); 1973 const struct evsel *rhs = container_of(rhs_core, struct evsel, core); 1974 int *force_grouped_idx = _fg_idx; 1975 int lhs_sort_idx, rhs_sort_idx, ret; 1976 const char *lhs_pmu_name, *rhs_pmu_name; 1977 bool lhs_has_group, rhs_has_group; 1978 1979 /* 1980 * First sort by grouping/leader. Read the leader idx only if the evsel 1981 * is part of a group, by default ungrouped events will be sorted 1982 * relative to grouped events based on where the first ungrouped event 1983 * occurs. If both events don't have a group we want to fall-through to 1984 * the arch specific sorting, that can reorder and fix things like 1985 * Intel's topdown events. 1986 */ 1987 if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) { 1988 lhs_has_group = true; 1989 lhs_sort_idx = lhs_core->leader->idx; 1990 } else { 1991 lhs_has_group = false; 1992 lhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs) 1993 ? *force_grouped_idx 1994 : lhs_core->idx; 1995 } 1996 if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) { 1997 rhs_has_group = true; 1998 rhs_sort_idx = rhs_core->leader->idx; 1999 } else { 2000 rhs_has_group = false; 2001 rhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs) 2002 ? *force_grouped_idx 2003 : rhs_core->idx; 2004 } 2005 2006 if (lhs_sort_idx != rhs_sort_idx) 2007 return lhs_sort_idx - rhs_sort_idx; 2008 2009 /* Group by PMU if there is a group. Groups can't span PMUs. */ 2010 if (lhs_has_group && rhs_has_group) { 2011 lhs_pmu_name = lhs->group_pmu_name; 2012 rhs_pmu_name = rhs->group_pmu_name; 2013 ret = strcmp(lhs_pmu_name, rhs_pmu_name); 2014 if (ret) 2015 return ret; 2016 } 2017 2018 /* Architecture specific sorting. */ 2019 return arch_evlist__cmp(lhs, rhs); 2020 } 2021 2022 static int parse_events__sort_events_and_fix_groups(struct list_head *list) 2023 { 2024 int idx = 0, force_grouped_idx = -1; 2025 struct evsel *pos, *cur_leader = NULL; 2026 struct perf_evsel *cur_leaders_grp = NULL; 2027 bool idx_changed = false, cur_leader_force_grouped = false; 2028 int orig_num_leaders = 0, num_leaders = 0; 2029 int ret; 2030 2031 /* 2032 * Compute index to insert ungrouped events at. Place them where the 2033 * first ungrouped event appears. 2034 */ 2035 list_for_each_entry(pos, list, core.node) { 2036 const struct evsel *pos_leader = evsel__leader(pos); 2037 2038 ret = evsel__compute_group_pmu_name(pos, list); 2039 if (ret) 2040 return ret; 2041 2042 if (pos == pos_leader) 2043 orig_num_leaders++; 2044 2045 /* 2046 * Ensure indexes are sequential, in particular for multiple 2047 * event lists being merged. The indexes are used to detect when 2048 * the user order is modified. 2049 */ 2050 pos->core.idx = idx++; 2051 2052 /* Remember an index to sort all forced grouped events together to. */ 2053 if (force_grouped_idx == -1 && pos == pos_leader && pos->core.nr_members < 2 && 2054 arch_evsel__must_be_in_group(pos)) 2055 force_grouped_idx = pos->core.idx; 2056 } 2057 2058 /* Sort events. */ 2059 list_sort(&force_grouped_idx, list, evlist__cmp); 2060 2061 /* 2062 * Recompute groups, splitting for PMUs and adding groups for events 2063 * that require them. 2064 */ 2065 idx = 0; 2066 list_for_each_entry(pos, list, core.node) { 2067 const struct evsel *pos_leader = evsel__leader(pos); 2068 const char *pos_pmu_name = pos->group_pmu_name; 2069 const char *cur_leader_pmu_name; 2070 bool pos_force_grouped = force_grouped_idx != -1 && 2071 arch_evsel__must_be_in_group(pos); 2072 2073 /* Reset index and nr_members. */ 2074 if (pos->core.idx != idx) 2075 idx_changed = true; 2076 pos->core.idx = idx++; 2077 pos->core.nr_members = 0; 2078 2079 /* 2080 * Set the group leader respecting the given groupings and that 2081 * groups can't span PMUs. 2082 */ 2083 if (!cur_leader) 2084 cur_leader = pos; 2085 2086 cur_leader_pmu_name = cur_leader->group_pmu_name; 2087 if ((cur_leaders_grp != pos->core.leader && 2088 (!pos_force_grouped || !cur_leader_force_grouped)) || 2089 strcmp(cur_leader_pmu_name, pos_pmu_name)) { 2090 /* Event is for a different group/PMU than last. */ 2091 cur_leader = pos; 2092 /* 2093 * Remember the leader's group before it is overwritten, 2094 * so that later events match as being in the same 2095 * group. 2096 */ 2097 cur_leaders_grp = pos->core.leader; 2098 /* 2099 * Avoid forcing events into groups with events that 2100 * don't need to be in the group. 2101 */ 2102 cur_leader_force_grouped = pos_force_grouped; 2103 } 2104 if (pos_leader != cur_leader) { 2105 /* The leader changed so update it. */ 2106 evsel__set_leader(pos, cur_leader); 2107 } 2108 } 2109 list_for_each_entry(pos, list, core.node) { 2110 struct evsel *pos_leader = evsel__leader(pos); 2111 2112 if (pos == pos_leader) 2113 num_leaders++; 2114 pos_leader->core.nr_members++; 2115 } 2116 return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0; 2117 } 2118 2119 int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter, 2120 struct parse_events_error *err, bool fake_pmu, 2121 bool warn_if_reordered, bool fake_tp) 2122 { 2123 struct parse_events_state parse_state = { 2124 .list = LIST_HEAD_INIT(parse_state.list), 2125 .idx = evlist->core.nr_entries, 2126 .error = err, 2127 .stoken = PE_START_EVENTS, 2128 .fake_pmu = fake_pmu, 2129 .fake_tp = fake_tp, 2130 .pmu_filter = pmu_filter, 2131 .match_legacy_cache_terms = true, 2132 }; 2133 int ret, ret2; 2134 2135 ret = parse_events__scanner(str, /*input=*/ NULL, &parse_state); 2136 2137 if (!ret && list_empty(&parse_state.list)) { 2138 WARN_ONCE(true, "WARNING: event parser found nothing\n"); 2139 return -1; 2140 } 2141 2142 ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list); 2143 if (ret2 < 0) 2144 return ret; 2145 2146 if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) 2147 pr_warning("WARNING: events were regrouped to match PMUs\n"); 2148 2149 /* 2150 * Add list to the evlist even with errors to allow callers to clean up. 2151 */ 2152 evlist__splice_list_tail(evlist, &parse_state.list); 2153 2154 if (!ret) { 2155 struct evsel *last; 2156 2157 last = evlist__last(evlist); 2158 last->cmdline_group_boundary = true; 2159 2160 return 0; 2161 } 2162 2163 /* 2164 * There are 2 users - builtin-record and builtin-test objects. 2165 * Both call evlist__delete in case of error, so we dont 2166 * need to bother. 2167 */ 2168 return ret; 2169 } 2170 2171 int parse_event(struct evlist *evlist, const char *str) 2172 { 2173 struct parse_events_error err; 2174 int ret; 2175 2176 parse_events_error__init(&err); 2177 ret = parse_events(evlist, str, &err); 2178 parse_events_error__exit(&err); 2179 return ret; 2180 } 2181 2182 struct parse_events_error_entry { 2183 /** @list: The list the error is part of. */ 2184 struct list_head list; 2185 /** @idx: index in the parsed string */ 2186 int idx; 2187 /** @str: string to display at the index */ 2188 char *str; 2189 /** @help: optional help string */ 2190 char *help; 2191 }; 2192 2193 void parse_events_error__init(struct parse_events_error *err) 2194 { 2195 INIT_LIST_HEAD(&err->list); 2196 } 2197 2198 void parse_events_error__exit(struct parse_events_error *err) 2199 { 2200 struct parse_events_error_entry *pos, *tmp; 2201 2202 list_for_each_entry_safe(pos, tmp, &err->list, list) { 2203 zfree(&pos->str); 2204 zfree(&pos->help); 2205 list_del_init(&pos->list); 2206 free(pos); 2207 } 2208 } 2209 2210 void parse_events_error__handle(struct parse_events_error *err, int idx, 2211 char *str, char *help) 2212 { 2213 struct parse_events_error_entry *entry; 2214 2215 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) 2216 goto out_free; 2217 2218 entry = zalloc(sizeof(*entry)); 2219 if (!entry) { 2220 pr_err("Failed to allocate memory for event parsing error: %s (%s)\n", 2221 str, help ?: "<no help>"); 2222 goto out_free; 2223 } 2224 entry->idx = idx; 2225 entry->str = str; 2226 entry->help = help; 2227 list_add(&entry->list, &err->list); 2228 return; 2229 out_free: 2230 free(str); 2231 free(help); 2232 } 2233 2234 #define MAX_WIDTH 1000 2235 static int get_term_width(void) 2236 { 2237 struct winsize ws; 2238 2239 get_term_dimensions(&ws); 2240 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; 2241 } 2242 2243 static void __parse_events_error__print(int err_idx, const char *err_str, 2244 const char *err_help, const char *event) 2245 { 2246 const char *str = "invalid or unsupported event: "; 2247 char _buf[MAX_WIDTH]; 2248 char *buf = (char *) event; 2249 int idx = 0; 2250 if (err_str) { 2251 /* -2 for extra '' in the final fprintf */ 2252 int width = get_term_width() - 2; 2253 int len_event = strlen(event); 2254 int len_str, max_len, cut = 0; 2255 2256 /* 2257 * Maximum error index indent, we will cut 2258 * the event string if it's bigger. 2259 */ 2260 int max_err_idx = 13; 2261 2262 /* 2263 * Let's be specific with the message when 2264 * we have the precise error. 2265 */ 2266 str = "event syntax error: "; 2267 len_str = strlen(str); 2268 max_len = width - len_str; 2269 2270 buf = _buf; 2271 2272 /* We're cutting from the beginning. */ 2273 if (err_idx > max_err_idx) 2274 cut = err_idx - max_err_idx; 2275 2276 strncpy(buf, event + cut, max_len); 2277 2278 /* Mark cut parts with '..' on both sides. */ 2279 if (cut) 2280 buf[0] = buf[1] = '.'; 2281 2282 if ((len_event - cut) > max_len) { 2283 buf[max_len - 1] = buf[max_len - 2] = '.'; 2284 buf[max_len] = 0; 2285 } 2286 2287 idx = len_str + err_idx - cut; 2288 } 2289 2290 fprintf(stderr, "%s'%s'\n", str, buf); 2291 if (idx) { 2292 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str); 2293 if (err_help) 2294 fprintf(stderr, "\n%s\n", err_help); 2295 } 2296 } 2297 2298 void parse_events_error__print(const struct parse_events_error *err, 2299 const char *event) 2300 { 2301 struct parse_events_error_entry *pos; 2302 bool first = true; 2303 2304 list_for_each_entry(pos, &err->list, list) { 2305 if (!first) 2306 fputs("\n", stderr); 2307 __parse_events_error__print(pos->idx, pos->str, pos->help, event); 2308 first = false; 2309 } 2310 } 2311 2312 /* 2313 * In the list of errors err, do any of the error strings (str) contain the 2314 * given needle string? 2315 */ 2316 bool parse_events_error__contains(const struct parse_events_error *err, 2317 const char *needle) 2318 { 2319 struct parse_events_error_entry *pos; 2320 2321 list_for_each_entry(pos, &err->list, list) { 2322 if (strstr(pos->str, needle) != NULL) 2323 return true; 2324 } 2325 return false; 2326 } 2327 2328 #undef MAX_WIDTH 2329 2330 int parse_events_option(const struct option *opt, const char *str, 2331 int unset __maybe_unused) 2332 { 2333 struct parse_events_option_args *args = opt->value; 2334 struct parse_events_error err; 2335 int ret; 2336 2337 parse_events_error__init(&err); 2338 ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err, 2339 /*fake_pmu=*/false, /*warn_if_reordered=*/true, 2340 /*fake_tp=*/false); 2341 2342 if (ret) { 2343 parse_events_error__print(&err, str); 2344 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 2345 } 2346 parse_events_error__exit(&err); 2347 2348 return ret; 2349 } 2350 2351 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset) 2352 { 2353 struct parse_events_option_args *args = opt->value; 2354 int ret; 2355 2356 if (*args->evlistp == NULL) { 2357 *args->evlistp = evlist__new(); 2358 2359 if (*args->evlistp == NULL) { 2360 fprintf(stderr, "Not enough memory to create evlist\n"); 2361 return -1; 2362 } 2363 } 2364 ret = parse_events_option(opt, str, unset); 2365 if (ret) { 2366 evlist__delete(*args->evlistp); 2367 *args->evlistp = NULL; 2368 } 2369 2370 return ret; 2371 } 2372 2373 static int 2374 foreach_evsel_in_last_glob(struct evlist *evlist, 2375 int (*func)(struct evsel *evsel, 2376 const void *arg), 2377 const void *arg) 2378 { 2379 struct evsel *last = NULL; 2380 int err; 2381 2382 /* 2383 * Don't return when list_empty, give func a chance to report 2384 * error when it found last == NULL. 2385 * 2386 * So no need to WARN here, let *func do this. 2387 */ 2388 if (evlist->core.nr_entries > 0) 2389 last = evlist__last(evlist); 2390 2391 do { 2392 err = (*func)(last, arg); 2393 if (err) 2394 return -1; 2395 if (!last) 2396 return 0; 2397 2398 if (last->core.node.prev == &evlist->core.entries) 2399 return 0; 2400 last = list_entry(last->core.node.prev, struct evsel, core.node); 2401 } while (!last->cmdline_group_boundary); 2402 2403 return 0; 2404 } 2405 2406 static int set_filter(struct evsel *evsel, const void *arg) 2407 { 2408 const char *str = arg; 2409 bool found = false; 2410 int nr_addr_filters = 0; 2411 struct perf_pmu *pmu = NULL; 2412 2413 if (evsel == NULL) { 2414 fprintf(stderr, 2415 "--filter option should follow a -e tracepoint or HW tracer option\n"); 2416 return -1; 2417 } 2418 2419 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 2420 if (evsel__append_tp_filter(evsel, str) < 0) { 2421 fprintf(stderr, 2422 "not enough memory to hold filter string\n"); 2423 return -1; 2424 } 2425 2426 return 0; 2427 } 2428 2429 while ((pmu = perf_pmus__scan(pmu)) != NULL) 2430 if (pmu->type == evsel->core.attr.type) { 2431 found = true; 2432 break; 2433 } 2434 2435 if (found) 2436 perf_pmu__scan_file(pmu, "nr_addr_filters", 2437 "%d", &nr_addr_filters); 2438 2439 if (!nr_addr_filters) 2440 return perf_bpf_filter__parse(&evsel->bpf_filters, str); 2441 2442 if (evsel__append_addr_filter(evsel, str) < 0) { 2443 fprintf(stderr, 2444 "not enough memory to hold filter string\n"); 2445 return -1; 2446 } 2447 2448 return 0; 2449 } 2450 2451 int parse_filter(const struct option *opt, const char *str, 2452 int unset __maybe_unused) 2453 { 2454 struct evlist *evlist = *(struct evlist **)opt->value; 2455 2456 return foreach_evsel_in_last_glob(evlist, set_filter, 2457 (const void *)str); 2458 } 2459 2460 static int add_exclude_perf_filter(struct evsel *evsel, 2461 const void *arg __maybe_unused) 2462 { 2463 char new_filter[64]; 2464 2465 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2466 fprintf(stderr, 2467 "--exclude-perf option should follow a -e tracepoint option\n"); 2468 return -1; 2469 } 2470 2471 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid()); 2472 2473 if (evsel__append_tp_filter(evsel, new_filter) < 0) { 2474 fprintf(stderr, 2475 "not enough memory to hold filter string\n"); 2476 return -1; 2477 } 2478 2479 return 0; 2480 } 2481 2482 int exclude_perf(const struct option *opt, 2483 const char *arg __maybe_unused, 2484 int unset __maybe_unused) 2485 { 2486 struct evlist *evlist = *(struct evlist **)opt->value; 2487 2488 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter, 2489 NULL); 2490 } 2491 2492 int parse_events__is_hardcoded_term(struct parse_events_term *term) 2493 { 2494 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 2495 } 2496 2497 static int new_term(struct parse_events_term **_term, 2498 struct parse_events_term *temp, 2499 char *str, u64 num) 2500 { 2501 struct parse_events_term *term; 2502 2503 term = malloc(sizeof(*term)); 2504 if (!term) 2505 return -ENOMEM; 2506 2507 *term = *temp; 2508 INIT_LIST_HEAD(&term->list); 2509 term->weak = false; 2510 2511 switch (term->type_val) { 2512 case PARSE_EVENTS__TERM_TYPE_NUM: 2513 term->val.num = num; 2514 break; 2515 case PARSE_EVENTS__TERM_TYPE_STR: 2516 term->val.str = str; 2517 break; 2518 default: 2519 free(term); 2520 return -EINVAL; 2521 } 2522 2523 *_term = term; 2524 return 0; 2525 } 2526 2527 int parse_events_term__num(struct parse_events_term **term, 2528 enum parse_events__term_type type_term, 2529 const char *config, u64 num, 2530 bool no_value, 2531 void *loc_term_, void *loc_val_) 2532 { 2533 YYLTYPE *loc_term = loc_term_; 2534 YYLTYPE *loc_val = loc_val_; 2535 2536 struct parse_events_term temp = { 2537 .type_val = PARSE_EVENTS__TERM_TYPE_NUM, 2538 .type_term = type_term, 2539 .config = config ? : strdup(parse_events__term_type_str(type_term)), 2540 .no_value = no_value, 2541 .err_term = loc_term ? loc_term->first_column : 0, 2542 .err_val = loc_val ? loc_val->first_column : 0, 2543 }; 2544 2545 return new_term(term, &temp, /*str=*/NULL, num); 2546 } 2547 2548 int parse_events_term__str(struct parse_events_term **term, 2549 enum parse_events__term_type type_term, 2550 char *config, char *str, 2551 void *loc_term_, void *loc_val_) 2552 { 2553 YYLTYPE *loc_term = loc_term_; 2554 YYLTYPE *loc_val = loc_val_; 2555 2556 struct parse_events_term temp = { 2557 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2558 .type_term = type_term, 2559 .config = config, 2560 .err_term = loc_term ? loc_term->first_column : 0, 2561 .err_val = loc_val ? loc_val->first_column : 0, 2562 }; 2563 2564 return new_term(term, &temp, str, /*num=*/0); 2565 } 2566 2567 int parse_events_term__term(struct parse_events_term **term, 2568 enum parse_events__term_type term_lhs, 2569 enum parse_events__term_type term_rhs, 2570 void *loc_term, void *loc_val) 2571 { 2572 return parse_events_term__str(term, term_lhs, NULL, 2573 strdup(parse_events__term_type_str(term_rhs)), 2574 loc_term, loc_val); 2575 } 2576 2577 int parse_events_term__clone(struct parse_events_term **new, 2578 const struct parse_events_term *term) 2579 { 2580 char *str; 2581 struct parse_events_term temp = *term; 2582 2583 temp.used = false; 2584 if (term->config) { 2585 temp.config = strdup(term->config); 2586 if (!temp.config) 2587 return -ENOMEM; 2588 } 2589 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2590 return new_term(new, &temp, /*str=*/NULL, term->val.num); 2591 2592 str = strdup(term->val.str); 2593 if (!str) { 2594 zfree(&temp.config); 2595 return -ENOMEM; 2596 } 2597 return new_term(new, &temp, str, /*num=*/0); 2598 } 2599 2600 void parse_events_term__delete(struct parse_events_term *term) 2601 { 2602 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) 2603 zfree(&term->val.str); 2604 2605 zfree(&term->config); 2606 free(term); 2607 } 2608 2609 static int parse_events_terms__copy(const struct parse_events_terms *src, 2610 struct parse_events_terms *dest) 2611 { 2612 struct parse_events_term *term; 2613 2614 list_for_each_entry (term, &src->terms, list) { 2615 struct parse_events_term *n; 2616 int ret; 2617 2618 ret = parse_events_term__clone(&n, term); 2619 if (ret) 2620 return ret; 2621 2622 list_add_tail(&n->list, &dest->terms); 2623 } 2624 return 0; 2625 } 2626 2627 void parse_events_terms__init(struct parse_events_terms *terms) 2628 { 2629 INIT_LIST_HEAD(&terms->terms); 2630 } 2631 2632 void parse_events_terms__exit(struct parse_events_terms *terms) 2633 { 2634 struct parse_events_term *term, *h; 2635 2636 list_for_each_entry_safe(term, h, &terms->terms, list) { 2637 list_del_init(&term->list); 2638 parse_events_term__delete(term); 2639 } 2640 } 2641 2642 void parse_events_terms__delete(struct parse_events_terms *terms) 2643 { 2644 if (!terms) 2645 return; 2646 parse_events_terms__exit(terms); 2647 free(terms); 2648 } 2649 2650 int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb) 2651 { 2652 struct parse_events_term *term; 2653 bool first = true; 2654 2655 if (!terms) 2656 return 0; 2657 2658 list_for_each_entry(term, &terms->terms, list) { 2659 int ret; 2660 2661 if (!first) { 2662 ret = strbuf_addch(sb, ','); 2663 if (ret < 0) 2664 return ret; 2665 } 2666 first = false; 2667 2668 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2669 if (term->no_value) { 2670 assert(term->val.num == 1); 2671 ret = strbuf_addf(sb, "%s", term->config); 2672 } else 2673 ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num); 2674 else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) { 2675 if (term->config) { 2676 ret = strbuf_addf(sb, "%s=", term->config); 2677 if (ret < 0) 2678 return ret; 2679 } else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) { 2680 ret = strbuf_addf(sb, "%s=", 2681 parse_events__term_type_str(term->type_term)); 2682 if (ret < 0) 2683 return ret; 2684 } 2685 assert(!term->no_value); 2686 ret = strbuf_addf(sb, "%s", term->val.str); 2687 } 2688 if (ret < 0) 2689 return ret; 2690 } 2691 return 0; 2692 } 2693 2694 static void config_terms_list(char *buf, size_t buf_sz) 2695 { 2696 int i; 2697 bool first = true; 2698 2699 buf[0] = '\0'; 2700 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) { 2701 const char *name = parse_events__term_type_str(i); 2702 2703 if (!config_term_avail(i, NULL)) 2704 continue; 2705 if (!name) 2706 continue; 2707 if (name[0] == '<') 2708 continue; 2709 2710 if (strlen(buf) + strlen(name) + 2 >= buf_sz) 2711 return; 2712 2713 if (!first) 2714 strcat(buf, ","); 2715 else 2716 first = false; 2717 strcat(buf, name); 2718 } 2719 } 2720 2721 /* 2722 * Return string contains valid config terms of an event. 2723 * @additional_terms: For terms such as PMU sysfs terms. 2724 */ 2725 char *parse_events_formats_error_string(char *additional_terms) 2726 { 2727 char *str; 2728 /* "no-overwrite" is the longest name */ 2729 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR * 2730 (sizeof("no-overwrite") - 1)]; 2731 2732 config_terms_list(static_terms, sizeof(static_terms)); 2733 /* valid terms */ 2734 if (additional_terms) { 2735 if (asprintf(&str, "valid terms: %s,%s", 2736 additional_terms, static_terms) < 0) 2737 goto fail; 2738 } else { 2739 if (asprintf(&str, "valid terms: %s", static_terms) < 0) 2740 goto fail; 2741 } 2742 return str; 2743 2744 fail: 2745 return NULL; 2746 } 2747