1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/hw_breakpoint.h> 3 #include <linux/err.h> 4 #include <linux/list_sort.h> 5 #include <linux/zalloc.h> 6 #include <dirent.h> 7 #include <errno.h> 8 #include <sys/ioctl.h> 9 #include <sys/param.h> 10 #include "term.h" 11 #include "evlist.h" 12 #include "evsel.h" 13 #include <subcmd/parse-options.h> 14 #include "parse-events.h" 15 #include "string2.h" 16 #include "strbuf.h" 17 #include "debug.h" 18 #include <api/fs/tracing_path.h> 19 #include <perf/cpumap.h> 20 #include <util/parse-events-bison.h> 21 #include <util/parse-events-flex.h> 22 #include "pmu.h" 23 #include "pmus.h" 24 #include "asm/bug.h" 25 #include "util/parse-branch-options.h" 26 #include "util/evsel_config.h" 27 #include "util/event.h" 28 #include "util/bpf-filter.h" 29 #include "util/util.h" 30 #include "tracepoint.h" 31 32 #define MAX_NAME_LEN 100 33 34 #ifdef PARSER_DEBUG 35 extern int parse_events_debug; 36 #endif 37 static int get_config_terms(const struct parse_events_terms *head_config, 38 struct list_head *head_terms); 39 static int parse_events_terms__copy(const struct parse_events_terms *src, 40 struct parse_events_terms *dest); 41 42 const struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { 43 [PERF_COUNT_HW_CPU_CYCLES] = { 44 .symbol = "cpu-cycles", 45 .alias = "cycles", 46 }, 47 [PERF_COUNT_HW_INSTRUCTIONS] = { 48 .symbol = "instructions", 49 .alias = "", 50 }, 51 [PERF_COUNT_HW_CACHE_REFERENCES] = { 52 .symbol = "cache-references", 53 .alias = "", 54 }, 55 [PERF_COUNT_HW_CACHE_MISSES] = { 56 .symbol = "cache-misses", 57 .alias = "", 58 }, 59 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 60 .symbol = "branch-instructions", 61 .alias = "branches", 62 }, 63 [PERF_COUNT_HW_BRANCH_MISSES] = { 64 .symbol = "branch-misses", 65 .alias = "", 66 }, 67 [PERF_COUNT_HW_BUS_CYCLES] = { 68 .symbol = "bus-cycles", 69 .alias = "", 70 }, 71 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = { 72 .symbol = "stalled-cycles-frontend", 73 .alias = "idle-cycles-frontend", 74 }, 75 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = { 76 .symbol = "stalled-cycles-backend", 77 .alias = "idle-cycles-backend", 78 }, 79 [PERF_COUNT_HW_REF_CPU_CYCLES] = { 80 .symbol = "ref-cycles", 81 .alias = "", 82 }, 83 }; 84 85 const struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { 86 [PERF_COUNT_SW_CPU_CLOCK] = { 87 .symbol = "cpu-clock", 88 .alias = "", 89 }, 90 [PERF_COUNT_SW_TASK_CLOCK] = { 91 .symbol = "task-clock", 92 .alias = "", 93 }, 94 [PERF_COUNT_SW_PAGE_FAULTS] = { 95 .symbol = "page-faults", 96 .alias = "faults", 97 }, 98 [PERF_COUNT_SW_CONTEXT_SWITCHES] = { 99 .symbol = "context-switches", 100 .alias = "cs", 101 }, 102 [PERF_COUNT_SW_CPU_MIGRATIONS] = { 103 .symbol = "cpu-migrations", 104 .alias = "migrations", 105 }, 106 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = { 107 .symbol = "minor-faults", 108 .alias = "", 109 }, 110 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = { 111 .symbol = "major-faults", 112 .alias = "", 113 }, 114 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = { 115 .symbol = "alignment-faults", 116 .alias = "", 117 }, 118 [PERF_COUNT_SW_EMULATION_FAULTS] = { 119 .symbol = "emulation-faults", 120 .alias = "", 121 }, 122 [PERF_COUNT_SW_DUMMY] = { 123 .symbol = "dummy", 124 .alias = "", 125 }, 126 [PERF_COUNT_SW_BPF_OUTPUT] = { 127 .symbol = "bpf-output", 128 .alias = "", 129 }, 130 [PERF_COUNT_SW_CGROUP_SWITCHES] = { 131 .symbol = "cgroup-switches", 132 .alias = "", 133 }, 134 }; 135 136 const char *event_type(int type) 137 { 138 switch (type) { 139 case PERF_TYPE_HARDWARE: 140 return "hardware"; 141 142 case PERF_TYPE_SOFTWARE: 143 return "software"; 144 145 case PERF_TYPE_TRACEPOINT: 146 return "tracepoint"; 147 148 case PERF_TYPE_HW_CACHE: 149 return "hardware-cache"; 150 151 default: 152 break; 153 } 154 155 return "unknown"; 156 } 157 158 static char *get_config_str(const struct parse_events_terms *head_terms, 159 enum parse_events__term_type type_term) 160 { 161 struct parse_events_term *term; 162 163 if (!head_terms) 164 return NULL; 165 166 list_for_each_entry(term, &head_terms->terms, list) 167 if (term->type_term == type_term) 168 return term->val.str; 169 170 return NULL; 171 } 172 173 static char *get_config_metric_id(const struct parse_events_terms *head_terms) 174 { 175 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID); 176 } 177 178 static char *get_config_name(const struct parse_events_terms *head_terms) 179 { 180 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME); 181 } 182 183 /** 184 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that 185 * matches the raw's string value. If the string value matches an 186 * event then change the term to be an event, if not then change it to 187 * be a config term. For example, "read" may be an event of the PMU or 188 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of 189 * the event can be determined and we don't need to scan all PMUs 190 * ahead-of-time. 191 * @config_terms: the list of terms that may contain a raw term. 192 * @pmu: the PMU to scan for events from. 193 */ 194 static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu) 195 { 196 struct parse_events_term *term; 197 198 list_for_each_entry(term, &config_terms->terms, list) { 199 u64 num; 200 201 if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW) 202 continue; 203 204 if (perf_pmu__have_event(pmu, term->val.str)) { 205 zfree(&term->config); 206 term->config = term->val.str; 207 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 208 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 209 term->val.num = 1; 210 term->no_value = true; 211 continue; 212 } 213 214 zfree(&term->config); 215 term->config = strdup("config"); 216 errno = 0; 217 num = strtoull(term->val.str + 1, NULL, 16); 218 assert(errno == 0); 219 free(term->val.str); 220 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM; 221 term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG; 222 term->val.num = num; 223 term->no_value = false; 224 } 225 } 226 227 static struct evsel * 228 __add_event(struct list_head *list, int *idx, 229 struct perf_event_attr *attr, 230 bool init_attr, 231 const char *name, const char *metric_id, struct perf_pmu *pmu, 232 struct list_head *config_terms, bool auto_merge_stats, 233 const char *cpu_list) 234 { 235 struct evsel *evsel; 236 struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) : 237 cpu_list ? perf_cpu_map__new(cpu_list) : NULL; 238 239 if (pmu) 240 perf_pmu__warn_invalid_formats(pmu); 241 242 if (pmu && (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX)) { 243 perf_pmu__warn_invalid_config(pmu, attr->config, name, 244 PERF_PMU_FORMAT_VALUE_CONFIG, "config"); 245 perf_pmu__warn_invalid_config(pmu, attr->config1, name, 246 PERF_PMU_FORMAT_VALUE_CONFIG1, "config1"); 247 perf_pmu__warn_invalid_config(pmu, attr->config2, name, 248 PERF_PMU_FORMAT_VALUE_CONFIG2, "config2"); 249 perf_pmu__warn_invalid_config(pmu, attr->config3, name, 250 PERF_PMU_FORMAT_VALUE_CONFIG3, "config3"); 251 } 252 if (init_attr) 253 event_attr_init(attr); 254 255 evsel = evsel__new_idx(attr, *idx); 256 if (!evsel) { 257 perf_cpu_map__put(cpus); 258 return NULL; 259 } 260 261 (*idx)++; 262 evsel->core.cpus = cpus; 263 evsel->core.own_cpus = perf_cpu_map__get(cpus); 264 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false; 265 evsel->core.is_pmu_core = pmu ? pmu->is_core : false; 266 evsel->auto_merge_stats = auto_merge_stats; 267 evsel->pmu = pmu; 268 evsel->pmu_name = pmu ? strdup(pmu->name) : NULL; 269 270 if (name) 271 evsel->name = strdup(name); 272 273 if (metric_id) 274 evsel->metric_id = strdup(metric_id); 275 276 if (config_terms) 277 list_splice_init(config_terms, &evsel->config_terms); 278 279 if (list) 280 list_add_tail(&evsel->core.node, list); 281 282 return evsel; 283 } 284 285 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr, 286 const char *name, const char *metric_id, 287 struct perf_pmu *pmu) 288 { 289 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name, 290 metric_id, pmu, /*config_terms=*/NULL, 291 /*auto_merge_stats=*/false, /*cpu_list=*/NULL); 292 } 293 294 static int add_event(struct list_head *list, int *idx, 295 struct perf_event_attr *attr, const char *name, 296 const char *metric_id, struct list_head *config_terms) 297 { 298 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id, 299 /*pmu=*/NULL, config_terms, 300 /*auto_merge_stats=*/false, /*cpu_list=*/NULL) ? 0 : -ENOMEM; 301 } 302 303 static int add_event_tool(struct list_head *list, int *idx, 304 enum perf_tool_event tool_event) 305 { 306 struct evsel *evsel; 307 struct perf_event_attr attr = { 308 .type = PERF_TYPE_SOFTWARE, 309 .config = PERF_COUNT_SW_DUMMY, 310 }; 311 312 evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL, 313 /*metric_id=*/NULL, /*pmu=*/NULL, 314 /*config_terms=*/NULL, /*auto_merge_stats=*/false, 315 /*cpu_list=*/"0"); 316 if (!evsel) 317 return -ENOMEM; 318 evsel->tool_event = tool_event; 319 if (tool_event == PERF_TOOL_DURATION_TIME 320 || tool_event == PERF_TOOL_USER_TIME 321 || tool_event == PERF_TOOL_SYSTEM_TIME) { 322 free((char *)evsel->unit); 323 evsel->unit = strdup("ns"); 324 } 325 return 0; 326 } 327 328 /** 329 * parse_aliases - search names for entries beginning or equalling str ignoring 330 * case. If mutliple entries in names match str then the longest 331 * is chosen. 332 * @str: The needle to look for. 333 * @names: The haystack to search. 334 * @size: The size of the haystack. 335 * @longest: Out argument giving the length of the matching entry. 336 */ 337 static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size, 338 int *longest) 339 { 340 *longest = -1; 341 for (int i = 0; i < size; i++) { 342 for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) { 343 int n = strlen(names[i][j]); 344 345 if (n > *longest && !strncasecmp(str, names[i][j], n)) 346 *longest = n; 347 } 348 if (*longest > 0) 349 return i; 350 } 351 352 return -1; 353 } 354 355 typedef int config_term_func_t(struct perf_event_attr *attr, 356 struct parse_events_term *term, 357 struct parse_events_error *err); 358 static int config_term_common(struct perf_event_attr *attr, 359 struct parse_events_term *term, 360 struct parse_events_error *err); 361 static int config_attr(struct perf_event_attr *attr, 362 const struct parse_events_terms *head, 363 struct parse_events_error *err, 364 config_term_func_t config_term); 365 366 /** 367 * parse_events__decode_legacy_cache - Search name for the legacy cache event 368 * name composed of 1, 2 or 3 hyphen 369 * separated sections. The first section is 370 * the cache type while the others are the 371 * optional op and optional result. To make 372 * life hard the names in the table also 373 * contain hyphens and the longest name 374 * should always be selected. 375 */ 376 int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config) 377 { 378 int len, cache_type = -1, cache_op = -1, cache_result = -1; 379 const char *name_end = &name[strlen(name) + 1]; 380 const char *str = name; 381 382 cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len); 383 if (cache_type == -1) 384 return -EINVAL; 385 str += len + 1; 386 387 if (str < name_end) { 388 cache_op = parse_aliases(str, evsel__hw_cache_op, 389 PERF_COUNT_HW_CACHE_OP_MAX, &len); 390 if (cache_op >= 0) { 391 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 392 return -EINVAL; 393 str += len + 1; 394 } else { 395 cache_result = parse_aliases(str, evsel__hw_cache_result, 396 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 397 if (cache_result >= 0) 398 str += len + 1; 399 } 400 } 401 if (str < name_end) { 402 if (cache_op < 0) { 403 cache_op = parse_aliases(str, evsel__hw_cache_op, 404 PERF_COUNT_HW_CACHE_OP_MAX, &len); 405 if (cache_op >= 0) { 406 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 407 return -EINVAL; 408 } 409 } else if (cache_result < 0) { 410 cache_result = parse_aliases(str, evsel__hw_cache_result, 411 PERF_COUNT_HW_CACHE_RESULT_MAX, &len); 412 } 413 } 414 415 /* 416 * Fall back to reads: 417 */ 418 if (cache_op == -1) 419 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 420 421 /* 422 * Fall back to accesses: 423 */ 424 if (cache_result == -1) 425 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 426 427 *config = cache_type | (cache_op << 8) | (cache_result << 16); 428 if (perf_pmus__supports_extended_type()) 429 *config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT; 430 return 0; 431 } 432 433 /** 434 * parse_events__filter_pmu - returns false if a wildcard PMU should be 435 * considered, true if it should be filtered. 436 */ 437 bool parse_events__filter_pmu(const struct parse_events_state *parse_state, 438 const struct perf_pmu *pmu) 439 { 440 if (parse_state->pmu_filter == NULL) 441 return false; 442 443 return strcmp(parse_state->pmu_filter, pmu->name) != 0; 444 } 445 446 static int parse_events_add_pmu(struct parse_events_state *parse_state, 447 struct list_head *list, struct perf_pmu *pmu, 448 const struct parse_events_terms *const_parsed_terms, 449 bool auto_merge_stats); 450 451 int parse_events_add_cache(struct list_head *list, int *idx, const char *name, 452 struct parse_events_state *parse_state, 453 struct parse_events_terms *parsed_terms) 454 { 455 struct perf_pmu *pmu = NULL; 456 bool found_supported = false; 457 const char *config_name = get_config_name(parsed_terms); 458 const char *metric_id = get_config_metric_id(parsed_terms); 459 460 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 461 LIST_HEAD(config_terms); 462 struct perf_event_attr attr; 463 int ret; 464 465 if (parse_events__filter_pmu(parse_state, pmu)) 466 continue; 467 468 if (perf_pmu__have_event(pmu, name)) { 469 /* 470 * The PMU has the event so add as not a legacy cache 471 * event. 472 */ 473 ret = parse_events_add_pmu(parse_state, list, pmu, 474 parsed_terms, 475 perf_pmu__auto_merge_stats(pmu)); 476 if (ret) 477 return ret; 478 continue; 479 } 480 481 if (!pmu->is_core) { 482 /* Legacy cache events are only supported by core PMUs. */ 483 continue; 484 } 485 486 memset(&attr, 0, sizeof(attr)); 487 attr.type = PERF_TYPE_HW_CACHE; 488 489 ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config); 490 if (ret) 491 return ret; 492 493 found_supported = true; 494 495 if (parsed_terms) { 496 if (config_attr(&attr, parsed_terms, parse_state->error, 497 config_term_common)) 498 return -EINVAL; 499 500 if (get_config_terms(parsed_terms, &config_terms)) 501 return -ENOMEM; 502 } 503 504 if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name, 505 metric_id, pmu, &config_terms, /*auto_merge_stats=*/false, 506 /*cpu_list=*/NULL) == NULL) 507 return -ENOMEM; 508 509 free_config_terms(&config_terms); 510 } 511 return found_supported ? 0 : -EINVAL; 512 } 513 514 #ifdef HAVE_LIBTRACEEVENT 515 static void tracepoint_error(struct parse_events_error *e, int err, 516 const char *sys, const char *name, int column) 517 { 518 const char *str; 519 char help[BUFSIZ]; 520 521 if (!e) 522 return; 523 524 /* 525 * We get error directly from syscall errno ( > 0), 526 * or from encoded pointer's error ( < 0). 527 */ 528 err = abs(err); 529 530 switch (err) { 531 case EACCES: 532 str = "can't access trace events"; 533 break; 534 case ENOENT: 535 str = "unknown tracepoint"; 536 break; 537 default: 538 str = "failed to add tracepoint"; 539 break; 540 } 541 542 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name); 543 parse_events_error__handle(e, column, strdup(str), strdup(help)); 544 } 545 546 static int add_tracepoint(struct list_head *list, int *idx, 547 const char *sys_name, const char *evt_name, 548 struct parse_events_error *err, 549 struct parse_events_terms *head_config, void *loc_) 550 { 551 YYLTYPE *loc = loc_; 552 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++); 553 554 if (IS_ERR(evsel)) { 555 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column); 556 return PTR_ERR(evsel); 557 } 558 559 if (head_config) { 560 LIST_HEAD(config_terms); 561 562 if (get_config_terms(head_config, &config_terms)) 563 return -ENOMEM; 564 list_splice(&config_terms, &evsel->config_terms); 565 } 566 567 list_add_tail(&evsel->core.node, list); 568 return 0; 569 } 570 571 static int add_tracepoint_multi_event(struct list_head *list, int *idx, 572 const char *sys_name, const char *evt_name, 573 struct parse_events_error *err, 574 struct parse_events_terms *head_config, YYLTYPE *loc) 575 { 576 char *evt_path; 577 struct dirent *evt_ent; 578 DIR *evt_dir; 579 int ret = 0, found = 0; 580 581 evt_path = get_events_file(sys_name); 582 if (!evt_path) { 583 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 584 return -1; 585 } 586 evt_dir = opendir(evt_path); 587 if (!evt_dir) { 588 put_events_file(evt_path); 589 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 590 return -1; 591 } 592 593 while (!ret && (evt_ent = readdir(evt_dir))) { 594 if (!strcmp(evt_ent->d_name, ".") 595 || !strcmp(evt_ent->d_name, "..") 596 || !strcmp(evt_ent->d_name, "enable") 597 || !strcmp(evt_ent->d_name, "filter")) 598 continue; 599 600 if (!strglobmatch(evt_ent->d_name, evt_name)) 601 continue; 602 603 found++; 604 605 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name, 606 err, head_config, loc); 607 } 608 609 if (!found) { 610 tracepoint_error(err, ENOENT, sys_name, evt_name, loc->first_column); 611 ret = -1; 612 } 613 614 put_events_file(evt_path); 615 closedir(evt_dir); 616 return ret; 617 } 618 619 static int add_tracepoint_event(struct list_head *list, int *idx, 620 const char *sys_name, const char *evt_name, 621 struct parse_events_error *err, 622 struct parse_events_terms *head_config, YYLTYPE *loc) 623 { 624 return strpbrk(evt_name, "*?") ? 625 add_tracepoint_multi_event(list, idx, sys_name, evt_name, 626 err, head_config, loc) : 627 add_tracepoint(list, idx, sys_name, evt_name, 628 err, head_config, loc); 629 } 630 631 static int add_tracepoint_multi_sys(struct list_head *list, int *idx, 632 const char *sys_name, const char *evt_name, 633 struct parse_events_error *err, 634 struct parse_events_terms *head_config, YYLTYPE *loc) 635 { 636 struct dirent *events_ent; 637 DIR *events_dir; 638 int ret = 0; 639 640 events_dir = tracing_events__opendir(); 641 if (!events_dir) { 642 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column); 643 return -1; 644 } 645 646 while (!ret && (events_ent = readdir(events_dir))) { 647 if (!strcmp(events_ent->d_name, ".") 648 || !strcmp(events_ent->d_name, "..") 649 || !strcmp(events_ent->d_name, "enable") 650 || !strcmp(events_ent->d_name, "header_event") 651 || !strcmp(events_ent->d_name, "header_page")) 652 continue; 653 654 if (!strglobmatch(events_ent->d_name, sys_name)) 655 continue; 656 657 ret = add_tracepoint_event(list, idx, events_ent->d_name, 658 evt_name, err, head_config, loc); 659 } 660 661 closedir(events_dir); 662 return ret; 663 } 664 #endif /* HAVE_LIBTRACEEVENT */ 665 666 static int 667 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 668 { 669 int i; 670 671 for (i = 0; i < 3; i++) { 672 if (!type || !type[i]) 673 break; 674 675 #define CHECK_SET_TYPE(bit) \ 676 do { \ 677 if (attr->bp_type & bit) \ 678 return -EINVAL; \ 679 else \ 680 attr->bp_type |= bit; \ 681 } while (0) 682 683 switch (type[i]) { 684 case 'r': 685 CHECK_SET_TYPE(HW_BREAKPOINT_R); 686 break; 687 case 'w': 688 CHECK_SET_TYPE(HW_BREAKPOINT_W); 689 break; 690 case 'x': 691 CHECK_SET_TYPE(HW_BREAKPOINT_X); 692 break; 693 default: 694 return -EINVAL; 695 } 696 } 697 698 #undef CHECK_SET_TYPE 699 700 if (!attr->bp_type) /* Default */ 701 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 702 703 return 0; 704 } 705 706 int parse_events_add_breakpoint(struct parse_events_state *parse_state, 707 struct list_head *list, 708 u64 addr, char *type, u64 len, 709 struct parse_events_terms *head_config) 710 { 711 struct perf_event_attr attr; 712 LIST_HEAD(config_terms); 713 const char *name; 714 715 memset(&attr, 0, sizeof(attr)); 716 attr.bp_addr = addr; 717 718 if (parse_breakpoint_type(type, &attr)) 719 return -EINVAL; 720 721 /* Provide some defaults if len is not specified */ 722 if (!len) { 723 if (attr.bp_type == HW_BREAKPOINT_X) 724 len = sizeof(long); 725 else 726 len = HW_BREAKPOINT_LEN_4; 727 } 728 729 attr.bp_len = len; 730 731 attr.type = PERF_TYPE_BREAKPOINT; 732 attr.sample_period = 1; 733 734 if (head_config) { 735 if (config_attr(&attr, head_config, parse_state->error, 736 config_term_common)) 737 return -EINVAL; 738 739 if (get_config_terms(head_config, &config_terms)) 740 return -ENOMEM; 741 } 742 743 name = get_config_name(head_config); 744 745 return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL, 746 &config_terms); 747 } 748 749 static int check_type_val(struct parse_events_term *term, 750 struct parse_events_error *err, 751 enum parse_events__term_val_type type) 752 { 753 if (type == term->type_val) 754 return 0; 755 756 if (err) { 757 parse_events_error__handle(err, term->err_val, 758 type == PARSE_EVENTS__TERM_TYPE_NUM 759 ? strdup("expected numeric value") 760 : strdup("expected string value"), 761 NULL); 762 } 763 return -EINVAL; 764 } 765 766 static bool config_term_shrinked; 767 768 static const char *config_term_name(enum parse_events__term_type term_type) 769 { 770 /* 771 * Update according to parse-events.l 772 */ 773 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { 774 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>", 775 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config", 776 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1", 777 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2", 778 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3", 779 [PARSE_EVENTS__TERM_TYPE_NAME] = "name", 780 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period", 781 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq", 782 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type", 783 [PARSE_EVENTS__TERM_TYPE_TIME] = "time", 784 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph", 785 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size", 786 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", 787 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", 788 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack", 789 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr", 790 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite", 791 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite", 792 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config", 793 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore", 794 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output", 795 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size", 796 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id", 797 [PARSE_EVENTS__TERM_TYPE_RAW] = "raw", 798 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache", 799 [PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware", 800 }; 801 if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR) 802 return "unknown term"; 803 804 return config_term_names[term_type]; 805 } 806 807 static bool 808 config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err) 809 { 810 char *err_str; 811 812 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) { 813 parse_events_error__handle(err, -1, 814 strdup("Invalid term_type"), NULL); 815 return false; 816 } 817 if (!config_term_shrinked) 818 return true; 819 820 switch (term_type) { 821 case PARSE_EVENTS__TERM_TYPE_CONFIG: 822 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 823 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 824 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 825 case PARSE_EVENTS__TERM_TYPE_NAME: 826 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 827 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 828 case PARSE_EVENTS__TERM_TYPE_PERCORE: 829 return true; 830 case PARSE_EVENTS__TERM_TYPE_USER: 831 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 832 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 833 case PARSE_EVENTS__TERM_TYPE_TIME: 834 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 835 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 836 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 837 case PARSE_EVENTS__TERM_TYPE_INHERIT: 838 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 839 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 840 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 841 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 842 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 843 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 844 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 845 case PARSE_EVENTS__TERM_TYPE_RAW: 846 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 847 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 848 default: 849 if (!err) 850 return false; 851 852 /* term_type is validated so indexing is safe */ 853 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'", 854 config_term_name(term_type)) >= 0) 855 parse_events_error__handle(err, -1, err_str, NULL); 856 return false; 857 } 858 } 859 860 void parse_events__shrink_config_terms(void) 861 { 862 config_term_shrinked = true; 863 } 864 865 static int config_term_common(struct perf_event_attr *attr, 866 struct parse_events_term *term, 867 struct parse_events_error *err) 868 { 869 #define CHECK_TYPE_VAL(type) \ 870 do { \ 871 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \ 872 return -EINVAL; \ 873 } while (0) 874 875 switch (term->type_term) { 876 case PARSE_EVENTS__TERM_TYPE_CONFIG: 877 CHECK_TYPE_VAL(NUM); 878 attr->config = term->val.num; 879 break; 880 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 881 CHECK_TYPE_VAL(NUM); 882 attr->config1 = term->val.num; 883 break; 884 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 885 CHECK_TYPE_VAL(NUM); 886 attr->config2 = term->val.num; 887 break; 888 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 889 CHECK_TYPE_VAL(NUM); 890 attr->config3 = term->val.num; 891 break; 892 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 893 CHECK_TYPE_VAL(NUM); 894 break; 895 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 896 CHECK_TYPE_VAL(NUM); 897 break; 898 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 899 CHECK_TYPE_VAL(STR); 900 if (strcmp(term->val.str, "no") && 901 parse_branch_str(term->val.str, 902 &attr->branch_sample_type)) { 903 parse_events_error__handle(err, term->err_val, 904 strdup("invalid branch sample type"), 905 NULL); 906 return -EINVAL; 907 } 908 break; 909 case PARSE_EVENTS__TERM_TYPE_TIME: 910 CHECK_TYPE_VAL(NUM); 911 if (term->val.num > 1) { 912 parse_events_error__handle(err, term->err_val, 913 strdup("expected 0 or 1"), 914 NULL); 915 return -EINVAL; 916 } 917 break; 918 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 919 CHECK_TYPE_VAL(STR); 920 break; 921 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 922 CHECK_TYPE_VAL(NUM); 923 break; 924 case PARSE_EVENTS__TERM_TYPE_INHERIT: 925 CHECK_TYPE_VAL(NUM); 926 break; 927 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 928 CHECK_TYPE_VAL(NUM); 929 break; 930 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 931 CHECK_TYPE_VAL(NUM); 932 break; 933 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 934 CHECK_TYPE_VAL(NUM); 935 break; 936 case PARSE_EVENTS__TERM_TYPE_NAME: 937 CHECK_TYPE_VAL(STR); 938 break; 939 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 940 CHECK_TYPE_VAL(STR); 941 break; 942 case PARSE_EVENTS__TERM_TYPE_RAW: 943 CHECK_TYPE_VAL(STR); 944 break; 945 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 946 CHECK_TYPE_VAL(NUM); 947 break; 948 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 949 CHECK_TYPE_VAL(NUM); 950 break; 951 case PARSE_EVENTS__TERM_TYPE_PERCORE: 952 CHECK_TYPE_VAL(NUM); 953 if ((unsigned int)term->val.num > 1) { 954 parse_events_error__handle(err, term->err_val, 955 strdup("expected 0 or 1"), 956 NULL); 957 return -EINVAL; 958 } 959 break; 960 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 961 CHECK_TYPE_VAL(NUM); 962 break; 963 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 964 CHECK_TYPE_VAL(NUM); 965 if (term->val.num > UINT_MAX) { 966 parse_events_error__handle(err, term->err_val, 967 strdup("too big"), 968 NULL); 969 return -EINVAL; 970 } 971 break; 972 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 973 case PARSE_EVENTS__TERM_TYPE_USER: 974 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 975 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 976 default: 977 parse_events_error__handle(err, term->err_term, 978 strdup(config_term_name(term->type_term)), 979 parse_events_formats_error_string(NULL)); 980 return -EINVAL; 981 } 982 983 /* 984 * Check term availability after basic checking so 985 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered. 986 * 987 * If check availability at the entry of this function, 988 * user will see "'<sysfs term>' is not usable in 'perf stat'" 989 * if an invalid config term is provided for legacy events 990 * (for example, instructions/badterm/...), which is confusing. 991 */ 992 if (!config_term_avail(term->type_term, err)) 993 return -EINVAL; 994 return 0; 995 #undef CHECK_TYPE_VAL 996 } 997 998 static int config_term_pmu(struct perf_event_attr *attr, 999 struct parse_events_term *term, 1000 struct parse_events_error *err) 1001 { 1002 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) { 1003 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); 1004 1005 if (!pmu) { 1006 char *err_str; 1007 1008 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0) 1009 parse_events_error__handle(err, term->err_term, 1010 err_str, /*help=*/NULL); 1011 return -EINVAL; 1012 } 1013 /* 1014 * Rewrite the PMU event to a legacy cache one unless the PMU 1015 * doesn't support legacy cache events or the event is present 1016 * within the PMU. 1017 */ 1018 if (perf_pmu__supports_legacy_cache(pmu) && 1019 !perf_pmu__have_event(pmu, term->config)) { 1020 attr->type = PERF_TYPE_HW_CACHE; 1021 return parse_events__decode_legacy_cache(term->config, pmu->type, 1022 &attr->config); 1023 } else { 1024 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 1025 term->no_value = true; 1026 } 1027 } 1028 if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) { 1029 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); 1030 1031 if (!pmu) { 1032 char *err_str; 1033 1034 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0) 1035 parse_events_error__handle(err, term->err_term, 1036 err_str, /*help=*/NULL); 1037 return -EINVAL; 1038 } 1039 /* 1040 * If the PMU has a sysfs or json event prefer it over 1041 * legacy. ARM requires this. 1042 */ 1043 if (perf_pmu__have_event(pmu, term->config)) { 1044 term->type_term = PARSE_EVENTS__TERM_TYPE_USER; 1045 term->no_value = true; 1046 } else { 1047 attr->type = PERF_TYPE_HARDWARE; 1048 attr->config = term->val.num; 1049 if (perf_pmus__supports_extended_type()) 1050 attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT; 1051 } 1052 return 0; 1053 } 1054 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || 1055 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) { 1056 /* 1057 * Always succeed for sysfs terms, as we dont know 1058 * at this point what type they need to have. 1059 */ 1060 return 0; 1061 } 1062 return config_term_common(attr, term, err); 1063 } 1064 1065 #ifdef HAVE_LIBTRACEEVENT 1066 static int config_term_tracepoint(struct perf_event_attr *attr, 1067 struct parse_events_term *term, 1068 struct parse_events_error *err) 1069 { 1070 switch (term->type_term) { 1071 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1072 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1073 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1074 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1075 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1076 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1077 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1078 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1079 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1080 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1081 return config_term_common(attr, term, err); 1082 case PARSE_EVENTS__TERM_TYPE_USER: 1083 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1084 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1085 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1086 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1087 case PARSE_EVENTS__TERM_TYPE_NAME: 1088 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1089 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1090 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1091 case PARSE_EVENTS__TERM_TYPE_TIME: 1092 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1093 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1094 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1095 case PARSE_EVENTS__TERM_TYPE_RAW: 1096 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1097 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1098 default: 1099 if (err) { 1100 parse_events_error__handle(err, term->err_term, 1101 strdup(config_term_name(term->type_term)), 1102 strdup("valid terms: call-graph,stack-size\n")); 1103 } 1104 return -EINVAL; 1105 } 1106 1107 return 0; 1108 } 1109 #endif 1110 1111 static int config_attr(struct perf_event_attr *attr, 1112 const struct parse_events_terms *head, 1113 struct parse_events_error *err, 1114 config_term_func_t config_term) 1115 { 1116 struct parse_events_term *term; 1117 1118 list_for_each_entry(term, &head->terms, list) 1119 if (config_term(attr, term, err)) 1120 return -EINVAL; 1121 1122 return 0; 1123 } 1124 1125 static int get_config_terms(const struct parse_events_terms *head_config, 1126 struct list_head *head_terms) 1127 { 1128 #define ADD_CONFIG_TERM(__type, __weak) \ 1129 struct evsel_config_term *__t; \ 1130 \ 1131 __t = zalloc(sizeof(*__t)); \ 1132 if (!__t) \ 1133 return -ENOMEM; \ 1134 \ 1135 INIT_LIST_HEAD(&__t->list); \ 1136 __t->type = EVSEL__CONFIG_TERM_ ## __type; \ 1137 __t->weak = __weak; \ 1138 list_add_tail(&__t->list, head_terms) 1139 1140 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \ 1141 do { \ 1142 ADD_CONFIG_TERM(__type, __weak); \ 1143 __t->val.__name = __val; \ 1144 } while (0) 1145 1146 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \ 1147 do { \ 1148 ADD_CONFIG_TERM(__type, __weak); \ 1149 __t->val.str = strdup(__val); \ 1150 if (!__t->val.str) { \ 1151 zfree(&__t); \ 1152 return -ENOMEM; \ 1153 } \ 1154 __t->free_str = true; \ 1155 } while (0) 1156 1157 struct parse_events_term *term; 1158 1159 list_for_each_entry(term, &head_config->terms, list) { 1160 switch (term->type_term) { 1161 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1162 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak); 1163 break; 1164 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1165 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak); 1166 break; 1167 case PARSE_EVENTS__TERM_TYPE_TIME: 1168 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak); 1169 break; 1170 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1171 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak); 1172 break; 1173 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1174 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak); 1175 break; 1176 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1177 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user, 1178 term->val.num, term->weak); 1179 break; 1180 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1181 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1182 term->val.num ? 1 : 0, term->weak); 1183 break; 1184 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1185 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1186 term->val.num ? 0 : 1, term->weak); 1187 break; 1188 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1189 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack, 1190 term->val.num, term->weak); 1191 break; 1192 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1193 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events, 1194 term->val.num, term->weak); 1195 break; 1196 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1197 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1198 term->val.num ? 1 : 0, term->weak); 1199 break; 1200 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1201 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1202 term->val.num ? 0 : 1, term->weak); 1203 break; 1204 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1205 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak); 1206 break; 1207 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1208 ADD_CONFIG_TERM_VAL(PERCORE, percore, 1209 term->val.num ? true : false, term->weak); 1210 break; 1211 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1212 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output, 1213 term->val.num ? 1 : 0, term->weak); 1214 break; 1215 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1216 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size, 1217 term->val.num, term->weak); 1218 break; 1219 case PARSE_EVENTS__TERM_TYPE_USER: 1220 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1221 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1222 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1223 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1224 case PARSE_EVENTS__TERM_TYPE_NAME: 1225 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1226 case PARSE_EVENTS__TERM_TYPE_RAW: 1227 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1228 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1229 default: 1230 break; 1231 } 1232 } 1233 return 0; 1234 } 1235 1236 /* 1237 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for 1238 * each bit of attr->config that the user has changed. 1239 */ 1240 static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config, 1241 struct list_head *head_terms) 1242 { 1243 struct parse_events_term *term; 1244 u64 bits = 0; 1245 int type; 1246 1247 list_for_each_entry(term, &head_config->terms, list) { 1248 switch (term->type_term) { 1249 case PARSE_EVENTS__TERM_TYPE_USER: 1250 type = perf_pmu__format_type(pmu, term->config); 1251 if (type != PERF_PMU_FORMAT_VALUE_CONFIG) 1252 continue; 1253 bits |= perf_pmu__format_bits(pmu, term->config); 1254 break; 1255 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1256 bits = ~(u64)0; 1257 break; 1258 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1259 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1260 case PARSE_EVENTS__TERM_TYPE_CONFIG3: 1261 case PARSE_EVENTS__TERM_TYPE_NAME: 1262 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1263 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1264 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1265 case PARSE_EVENTS__TERM_TYPE_TIME: 1266 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1267 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1268 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1269 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1270 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1271 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1272 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1273 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1274 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1275 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1276 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1277 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1278 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1279 case PARSE_EVENTS__TERM_TYPE_RAW: 1280 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE: 1281 case PARSE_EVENTS__TERM_TYPE_HARDWARE: 1282 default: 1283 break; 1284 } 1285 } 1286 1287 if (bits) 1288 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false); 1289 1290 #undef ADD_CONFIG_TERM 1291 return 0; 1292 } 1293 1294 int parse_events_add_tracepoint(struct list_head *list, int *idx, 1295 const char *sys, const char *event, 1296 struct parse_events_error *err, 1297 struct parse_events_terms *head_config, void *loc_) 1298 { 1299 YYLTYPE *loc = loc_; 1300 #ifdef HAVE_LIBTRACEEVENT 1301 if (head_config) { 1302 struct perf_event_attr attr; 1303 1304 if (config_attr(&attr, head_config, err, 1305 config_term_tracepoint)) 1306 return -EINVAL; 1307 } 1308 1309 if (strpbrk(sys, "*?")) 1310 return add_tracepoint_multi_sys(list, idx, sys, event, 1311 err, head_config, loc); 1312 else 1313 return add_tracepoint_event(list, idx, sys, event, 1314 err, head_config, loc); 1315 #else 1316 (void)list; 1317 (void)idx; 1318 (void)sys; 1319 (void)event; 1320 (void)head_config; 1321 parse_events_error__handle(err, loc->first_column, strdup("unsupported tracepoint"), 1322 strdup("libtraceevent is necessary for tracepoint support")); 1323 return -1; 1324 #endif 1325 } 1326 1327 static int __parse_events_add_numeric(struct parse_events_state *parse_state, 1328 struct list_head *list, 1329 struct perf_pmu *pmu, u32 type, u32 extended_type, 1330 u64 config, const struct parse_events_terms *head_config) 1331 { 1332 struct perf_event_attr attr; 1333 LIST_HEAD(config_terms); 1334 const char *name, *metric_id; 1335 int ret; 1336 1337 memset(&attr, 0, sizeof(attr)); 1338 attr.type = type; 1339 attr.config = config; 1340 if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) { 1341 assert(perf_pmus__supports_extended_type()); 1342 attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT; 1343 } 1344 1345 if (head_config) { 1346 if (config_attr(&attr, head_config, parse_state->error, 1347 config_term_common)) 1348 return -EINVAL; 1349 1350 if (get_config_terms(head_config, &config_terms)) 1351 return -ENOMEM; 1352 } 1353 1354 name = get_config_name(head_config); 1355 metric_id = get_config_metric_id(head_config); 1356 ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name, 1357 metric_id, pmu, &config_terms, /*auto_merge_stats=*/false, 1358 /*cpu_list=*/NULL) ? 0 : -ENOMEM; 1359 free_config_terms(&config_terms); 1360 return ret; 1361 } 1362 1363 int parse_events_add_numeric(struct parse_events_state *parse_state, 1364 struct list_head *list, 1365 u32 type, u64 config, 1366 const struct parse_events_terms *head_config, 1367 bool wildcard) 1368 { 1369 struct perf_pmu *pmu = NULL; 1370 bool found_supported = false; 1371 1372 /* Wildcards on numeric values are only supported by core PMUs. */ 1373 if (wildcard && perf_pmus__supports_extended_type()) { 1374 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 1375 int ret; 1376 1377 found_supported = true; 1378 if (parse_events__filter_pmu(parse_state, pmu)) 1379 continue; 1380 1381 ret = __parse_events_add_numeric(parse_state, list, pmu, 1382 type, pmu->type, 1383 config, head_config); 1384 if (ret) 1385 return ret; 1386 } 1387 if (found_supported) 1388 return 0; 1389 } 1390 return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type), 1391 type, /*extended_type=*/0, config, head_config); 1392 } 1393 1394 int parse_events_add_tool(struct parse_events_state *parse_state, 1395 struct list_head *list, 1396 int tool_event) 1397 { 1398 return add_event_tool(list, &parse_state->idx, tool_event); 1399 } 1400 1401 static bool config_term_percore(struct list_head *config_terms) 1402 { 1403 struct evsel_config_term *term; 1404 1405 list_for_each_entry(term, config_terms, list) { 1406 if (term->type == EVSEL__CONFIG_TERM_PERCORE) 1407 return term->val.percore; 1408 } 1409 1410 return false; 1411 } 1412 1413 static int parse_events_add_pmu(struct parse_events_state *parse_state, 1414 struct list_head *list, struct perf_pmu *pmu, 1415 const struct parse_events_terms *const_parsed_terms, 1416 bool auto_merge_stats) 1417 { 1418 struct perf_event_attr attr; 1419 struct perf_pmu_info info; 1420 struct evsel *evsel; 1421 struct parse_events_error *err = parse_state->error; 1422 LIST_HEAD(config_terms); 1423 struct parse_events_terms parsed_terms; 1424 bool alias_rewrote_terms = false; 1425 1426 if (verbose > 1) { 1427 struct strbuf sb; 1428 1429 strbuf_init(&sb, /*hint=*/ 0); 1430 if (pmu->selectable && const_parsed_terms && 1431 list_empty(&const_parsed_terms->terms)) { 1432 strbuf_addf(&sb, "%s//", pmu->name); 1433 } else { 1434 strbuf_addf(&sb, "%s/", pmu->name); 1435 parse_events_terms__to_strbuf(const_parsed_terms, &sb); 1436 strbuf_addch(&sb, '/'); 1437 } 1438 fprintf(stderr, "Attempt to add: %s\n", sb.buf); 1439 strbuf_release(&sb); 1440 } 1441 1442 memset(&attr, 0, sizeof(attr)); 1443 if (pmu->perf_event_attr_init_default) 1444 pmu->perf_event_attr_init_default(pmu, &attr); 1445 1446 attr.type = pmu->type; 1447 1448 if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) { 1449 evsel = __add_event(list, &parse_state->idx, &attr, 1450 /*init_attr=*/true, /*name=*/NULL, 1451 /*metric_id=*/NULL, pmu, 1452 /*config_terms=*/NULL, auto_merge_stats, 1453 /*cpu_list=*/NULL); 1454 return evsel ? 0 : -ENOMEM; 1455 } 1456 1457 parse_events_terms__init(&parsed_terms); 1458 if (const_parsed_terms) { 1459 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1460 1461 if (ret) 1462 return ret; 1463 } 1464 fix_raw(&parsed_terms, pmu); 1465 1466 /* Configure attr/terms with a known PMU, this will set hardcoded terms. */ 1467 if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) { 1468 parse_events_terms__exit(&parsed_terms); 1469 return -EINVAL; 1470 } 1471 1472 /* Look for event names in the terms and rewrite into format based terms. */ 1473 if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, &parsed_terms, 1474 &info, &alias_rewrote_terms, err)) { 1475 parse_events_terms__exit(&parsed_terms); 1476 return -EINVAL; 1477 } 1478 1479 if (verbose > 1) { 1480 struct strbuf sb; 1481 1482 strbuf_init(&sb, /*hint=*/ 0); 1483 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1484 fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf); 1485 strbuf_release(&sb); 1486 } 1487 1488 /* Configure attr/terms again if an alias was expanded. */ 1489 if (alias_rewrote_terms && 1490 config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) { 1491 parse_events_terms__exit(&parsed_terms); 1492 return -EINVAL; 1493 } 1494 1495 if (get_config_terms(&parsed_terms, &config_terms)) { 1496 parse_events_terms__exit(&parsed_terms); 1497 return -ENOMEM; 1498 } 1499 1500 /* 1501 * When using default config, record which bits of attr->config were 1502 * changed by the user. 1503 */ 1504 if (pmu->perf_event_attr_init_default && 1505 get_config_chgs(pmu, &parsed_terms, &config_terms)) { 1506 parse_events_terms__exit(&parsed_terms); 1507 return -ENOMEM; 1508 } 1509 1510 if (!parse_state->fake_pmu && 1511 perf_pmu__config(pmu, &attr, &parsed_terms, parse_state->error)) { 1512 free_config_terms(&config_terms); 1513 parse_events_terms__exit(&parsed_terms); 1514 return -EINVAL; 1515 } 1516 1517 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true, 1518 get_config_name(&parsed_terms), 1519 get_config_metric_id(&parsed_terms), pmu, 1520 &config_terms, auto_merge_stats, /*cpu_list=*/NULL); 1521 if (!evsel) { 1522 parse_events_terms__exit(&parsed_terms); 1523 return -ENOMEM; 1524 } 1525 1526 if (evsel->name) 1527 evsel->use_config_name = true; 1528 1529 evsel->percore = config_term_percore(&evsel->config_terms); 1530 1531 if (parse_state->fake_pmu) { 1532 parse_events_terms__exit(&parsed_terms); 1533 return 0; 1534 } 1535 1536 parse_events_terms__exit(&parsed_terms); 1537 free((char *)evsel->unit); 1538 evsel->unit = strdup(info.unit); 1539 evsel->scale = info.scale; 1540 evsel->per_pkg = info.per_pkg; 1541 evsel->snapshot = info.snapshot; 1542 return 0; 1543 } 1544 1545 int parse_events_multi_pmu_add(struct parse_events_state *parse_state, 1546 const char *event_name, u64 hw_config, 1547 const struct parse_events_terms *const_parsed_terms, 1548 struct list_head **listp, void *loc_) 1549 { 1550 struct parse_events_term *term; 1551 struct list_head *list = NULL; 1552 struct perf_pmu *pmu = NULL; 1553 YYLTYPE *loc = loc_; 1554 int ok = 0, core_ok = 0; 1555 const char *tmp; 1556 struct parse_events_terms parsed_terms; 1557 1558 *listp = NULL; 1559 1560 parse_events_terms__init(&parsed_terms); 1561 if (const_parsed_terms) { 1562 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms); 1563 1564 if (ret) 1565 return ret; 1566 } 1567 1568 tmp = strdup(event_name); 1569 if (!tmp) 1570 goto out_err; 1571 1572 if (parse_events_term__num(&term, 1573 PARSE_EVENTS__TERM_TYPE_USER, 1574 tmp, /*num=*/1, /*novalue=*/true, 1575 loc, /*loc_val=*/NULL) < 0) { 1576 zfree(&tmp); 1577 goto out_err; 1578 } 1579 list_add_tail(&term->list, &parsed_terms.terms); 1580 1581 /* Add it for all PMUs that support the alias */ 1582 list = malloc(sizeof(struct list_head)); 1583 if (!list) 1584 goto out_err; 1585 1586 INIT_LIST_HEAD(list); 1587 1588 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 1589 bool auto_merge_stats; 1590 1591 if (parse_events__filter_pmu(parse_state, pmu)) 1592 continue; 1593 1594 if (!perf_pmu__have_event(pmu, event_name)) 1595 continue; 1596 1597 auto_merge_stats = perf_pmu__auto_merge_stats(pmu); 1598 if (!parse_events_add_pmu(parse_state, list, pmu, 1599 &parsed_terms, auto_merge_stats)) { 1600 struct strbuf sb; 1601 1602 strbuf_init(&sb, /*hint=*/ 0); 1603 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1604 pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf); 1605 strbuf_release(&sb); 1606 ok++; 1607 if (pmu->is_core) 1608 core_ok++; 1609 } 1610 } 1611 1612 if (parse_state->fake_pmu) { 1613 if (!parse_events_add_pmu(parse_state, list, parse_state->fake_pmu, &parsed_terms, 1614 /*auto_merge_stats=*/true)) { 1615 struct strbuf sb; 1616 1617 strbuf_init(&sb, /*hint=*/ 0); 1618 parse_events_terms__to_strbuf(&parsed_terms, &sb); 1619 pr_debug("%s -> %s/%s/\n", event_name, "fake_pmu", sb.buf); 1620 strbuf_release(&sb); 1621 ok++; 1622 } 1623 } 1624 1625 if (hw_config != PERF_COUNT_HW_MAX && !core_ok) { 1626 /* 1627 * The event wasn't found on core PMUs but it has a hardware 1628 * config version to try. 1629 */ 1630 if (!parse_events_add_numeric(parse_state, list, 1631 PERF_TYPE_HARDWARE, hw_config, 1632 const_parsed_terms, 1633 /*wildcard=*/true)) 1634 ok++; 1635 } 1636 1637 out_err: 1638 parse_events_terms__exit(&parsed_terms); 1639 if (ok) 1640 *listp = list; 1641 else 1642 free(list); 1643 1644 return ok ? 0 : -1; 1645 } 1646 1647 int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state, 1648 const char *event_or_pmu, 1649 const struct parse_events_terms *const_parsed_terms, 1650 struct list_head **listp, 1651 void *loc_) 1652 { 1653 YYLTYPE *loc = loc_; 1654 struct perf_pmu *pmu; 1655 int ok = 0; 1656 char *help; 1657 1658 *listp = malloc(sizeof(**listp)); 1659 if (!*listp) 1660 return -ENOMEM; 1661 1662 INIT_LIST_HEAD(*listp); 1663 1664 /* Attempt to add to list assuming event_or_pmu is a PMU name. */ 1665 pmu = parse_state->fake_pmu ?: perf_pmus__find(event_or_pmu); 1666 if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms, 1667 /*auto_merge_stats=*/false)) 1668 return 0; 1669 1670 pmu = NULL; 1671 /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */ 1672 while ((pmu = perf_pmus__scan(pmu)) != NULL) { 1673 if (!parse_events__filter_pmu(parse_state, pmu) && 1674 perf_pmu__match(pmu, event_or_pmu)) { 1675 bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu); 1676 1677 if (!parse_events_add_pmu(parse_state, *listp, pmu, 1678 const_parsed_terms, 1679 auto_merge_stats)) { 1680 ok++; 1681 parse_state->wild_card_pmus = true; 1682 } 1683 } 1684 } 1685 if (ok) 1686 return 0; 1687 1688 /* Failure to add, assume event_or_pmu is an event name. */ 1689 zfree(listp); 1690 if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, PERF_COUNT_HW_MAX, 1691 const_parsed_terms, listp, loc)) 1692 return 0; 1693 1694 if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0) 1695 help = NULL; 1696 parse_events_error__handle(parse_state->error, loc->first_column, 1697 strdup("Bad event or PMU"), 1698 help); 1699 zfree(listp); 1700 return -EINVAL; 1701 } 1702 1703 void parse_events__set_leader(char *name, struct list_head *list) 1704 { 1705 struct evsel *leader; 1706 1707 if (list_empty(list)) { 1708 WARN_ONCE(true, "WARNING: failed to set leader: empty list"); 1709 return; 1710 } 1711 1712 leader = list_first_entry(list, struct evsel, core.node); 1713 __perf_evlist__set_leader(list, &leader->core); 1714 zfree(&leader->group_name); 1715 leader->group_name = name; 1716 } 1717 1718 static int parse_events__modifier_list(struct parse_events_state *parse_state, 1719 YYLTYPE *loc, 1720 struct list_head *list, 1721 struct parse_events_modifier mod, 1722 bool group) 1723 { 1724 struct evsel *evsel; 1725 1726 if (!group && mod.weak) { 1727 parse_events_error__handle(parse_state->error, loc->first_column, 1728 strdup("Weak modifier is for use with groups"), NULL); 1729 return -EINVAL; 1730 } 1731 1732 __evlist__for_each_entry(list, evsel) { 1733 /* Translate modifiers into the equivalent evsel excludes. */ 1734 int eu = group ? evsel->core.attr.exclude_user : 0; 1735 int ek = group ? evsel->core.attr.exclude_kernel : 0; 1736 int eh = group ? evsel->core.attr.exclude_hv : 0; 1737 int eH = group ? evsel->core.attr.exclude_host : 0; 1738 int eG = group ? evsel->core.attr.exclude_guest : 0; 1739 int exclude = eu | ek | eh; 1740 int exclude_GH = group ? evsel->exclude_GH : 0; 1741 1742 if (mod.precise) { 1743 /* use of precise requires exclude_guest */ 1744 eG = 1; 1745 } 1746 if (mod.user) { 1747 if (!exclude) 1748 exclude = eu = ek = eh = 1; 1749 if (!exclude_GH && !perf_guest) 1750 eG = 1; 1751 eu = 0; 1752 } 1753 if (mod.kernel) { 1754 if (!exclude) 1755 exclude = eu = ek = eh = 1; 1756 ek = 0; 1757 } 1758 if (mod.hypervisor) { 1759 if (!exclude) 1760 exclude = eu = ek = eh = 1; 1761 eh = 0; 1762 } 1763 if (mod.guest) { 1764 if (!exclude_GH) 1765 exclude_GH = eG = eH = 1; 1766 eG = 0; 1767 } 1768 if (mod.host) { 1769 if (!exclude_GH) 1770 exclude_GH = eG = eH = 1; 1771 eH = 0; 1772 } 1773 evsel->core.attr.exclude_user = eu; 1774 evsel->core.attr.exclude_kernel = ek; 1775 evsel->core.attr.exclude_hv = eh; 1776 evsel->core.attr.exclude_host = eH; 1777 evsel->core.attr.exclude_guest = eG; 1778 evsel->exclude_GH = exclude_GH; 1779 1780 /* Simple modifiers copied to the evsel. */ 1781 if (mod.precise) { 1782 u8 precise = evsel->core.attr.precise_ip + mod.precise; 1783 /* 1784 * precise ip: 1785 * 1786 * 0 - SAMPLE_IP can have arbitrary skid 1787 * 1 - SAMPLE_IP must have constant skid 1788 * 2 - SAMPLE_IP requested to have 0 skid 1789 * 3 - SAMPLE_IP must have 0 skid 1790 * 1791 * See also PERF_RECORD_MISC_EXACT_IP 1792 */ 1793 if (precise > 3) { 1794 char *help; 1795 1796 if (asprintf(&help, 1797 "Maximum combined precise value is 3, adding precision to \"%s\"", 1798 evsel__name(evsel)) > 0) { 1799 parse_events_error__handle(parse_state->error, 1800 loc->first_column, 1801 help, NULL); 1802 } 1803 return -EINVAL; 1804 } 1805 evsel->core.attr.precise_ip = precise; 1806 } 1807 if (mod.precise_max) 1808 evsel->precise_max = 1; 1809 if (mod.non_idle) 1810 evsel->core.attr.exclude_idle = 1; 1811 if (mod.sample_read) 1812 evsel->sample_read = 1; 1813 if (mod.pinned && evsel__is_group_leader(evsel)) 1814 evsel->core.attr.pinned = 1; 1815 if (mod.exclusive && evsel__is_group_leader(evsel)) 1816 evsel->core.attr.exclusive = 1; 1817 if (mod.weak) 1818 evsel->weak_group = true; 1819 if (mod.bpf) 1820 evsel->bpf_counter = true; 1821 } 1822 return 0; 1823 } 1824 1825 int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc, 1826 struct list_head *list, 1827 struct parse_events_modifier mod) 1828 { 1829 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true); 1830 } 1831 1832 int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc, 1833 struct list_head *list, 1834 struct parse_events_modifier mod) 1835 { 1836 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false); 1837 } 1838 1839 int parse_events__set_default_name(struct list_head *list, char *name) 1840 { 1841 struct evsel *evsel; 1842 bool used_name = false; 1843 1844 __evlist__for_each_entry(list, evsel) { 1845 if (!evsel->name) { 1846 evsel->name = used_name ? strdup(name) : name; 1847 used_name = true; 1848 if (!evsel->name) 1849 return -ENOMEM; 1850 } 1851 } 1852 if (!used_name) 1853 free(name); 1854 return 0; 1855 } 1856 1857 static int parse_events__scanner(const char *str, 1858 FILE *input, 1859 struct parse_events_state *parse_state) 1860 { 1861 YY_BUFFER_STATE buffer; 1862 void *scanner; 1863 int ret; 1864 1865 ret = parse_events_lex_init_extra(parse_state, &scanner); 1866 if (ret) 1867 return ret; 1868 1869 if (str) 1870 buffer = parse_events__scan_string(str, scanner); 1871 else 1872 parse_events_set_in(input, scanner); 1873 1874 #ifdef PARSER_DEBUG 1875 parse_events_debug = 1; 1876 parse_events_set_debug(1, scanner); 1877 #endif 1878 ret = parse_events_parse(parse_state, scanner); 1879 1880 if (str) { 1881 parse_events__flush_buffer(buffer, scanner); 1882 parse_events__delete_buffer(buffer, scanner); 1883 } 1884 parse_events_lex_destroy(scanner); 1885 return ret; 1886 } 1887 1888 /* 1889 * parse event config string, return a list of event terms. 1890 */ 1891 int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input) 1892 { 1893 struct parse_events_state parse_state = { 1894 .terms = NULL, 1895 .stoken = PE_START_TERMS, 1896 }; 1897 int ret; 1898 1899 ret = parse_events__scanner(str, input, &parse_state); 1900 if (!ret) 1901 list_splice(&parse_state.terms->terms, &terms->terms); 1902 1903 zfree(&parse_state.terms); 1904 return ret; 1905 } 1906 1907 static int evsel__compute_group_pmu_name(struct evsel *evsel, 1908 const struct list_head *head) 1909 { 1910 struct evsel *leader = evsel__leader(evsel); 1911 struct evsel *pos; 1912 const char *group_pmu_name; 1913 struct perf_pmu *pmu = evsel__find_pmu(evsel); 1914 1915 if (!pmu) { 1916 /* 1917 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU 1918 * is a core PMU, but in heterogeneous systems this is 1919 * unknown. For now pick the first core PMU. 1920 */ 1921 pmu = perf_pmus__scan_core(NULL); 1922 } 1923 if (!pmu) { 1924 pr_debug("No PMU found for '%s'\n", evsel__name(evsel)); 1925 return -EINVAL; 1926 } 1927 group_pmu_name = pmu->name; 1928 /* 1929 * Software events may be in a group with other uncore PMU events. Use 1930 * the pmu_name of the first non-software event to avoid breaking the 1931 * software event out of the group. 1932 * 1933 * Aux event leaders, like intel_pt, expect a group with events from 1934 * other PMUs, so substitute the AUX event's PMU in this case. 1935 */ 1936 if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) { 1937 struct perf_pmu *leader_pmu = evsel__find_pmu(leader); 1938 1939 if (!leader_pmu) { 1940 /* As with determining pmu above. */ 1941 leader_pmu = perf_pmus__scan_core(NULL); 1942 } 1943 /* 1944 * Starting with the leader, find the first event with a named 1945 * non-software PMU. for_each_group_(member|evsel) isn't used as 1946 * the list isn't yet sorted putting evsel's in the same group 1947 * together. 1948 */ 1949 if (leader_pmu && !perf_pmu__is_software(leader_pmu)) { 1950 group_pmu_name = leader_pmu->name; 1951 } else if (leader->core.nr_members > 1) { 1952 list_for_each_entry(pos, head, core.node) { 1953 struct perf_pmu *pos_pmu; 1954 1955 if (pos == leader || evsel__leader(pos) != leader) 1956 continue; 1957 pos_pmu = evsel__find_pmu(pos); 1958 if (!pos_pmu) { 1959 /* As with determining pmu above. */ 1960 pos_pmu = perf_pmus__scan_core(NULL); 1961 } 1962 if (pos_pmu && !perf_pmu__is_software(pos_pmu)) { 1963 group_pmu_name = pos_pmu->name; 1964 break; 1965 } 1966 } 1967 } 1968 } 1969 /* Assign the actual name taking care that the fake PMU lacks a name. */ 1970 evsel->group_pmu_name = strdup(group_pmu_name ?: "fake"); 1971 return evsel->group_pmu_name ? 0 : -ENOMEM; 1972 } 1973 1974 __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs) 1975 { 1976 /* Order by insertion index. */ 1977 return lhs->core.idx - rhs->core.idx; 1978 } 1979 1980 static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r) 1981 { 1982 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node); 1983 const struct evsel *lhs = container_of(lhs_core, struct evsel, core); 1984 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node); 1985 const struct evsel *rhs = container_of(rhs_core, struct evsel, core); 1986 int *force_grouped_idx = _fg_idx; 1987 int lhs_sort_idx, rhs_sort_idx, ret; 1988 const char *lhs_pmu_name, *rhs_pmu_name; 1989 bool lhs_has_group, rhs_has_group; 1990 1991 /* 1992 * First sort by grouping/leader. Read the leader idx only if the evsel 1993 * is part of a group, by default ungrouped events will be sorted 1994 * relative to grouped events based on where the first ungrouped event 1995 * occurs. If both events don't have a group we want to fall-through to 1996 * the arch specific sorting, that can reorder and fix things like 1997 * Intel's topdown events. 1998 */ 1999 if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) { 2000 lhs_has_group = true; 2001 lhs_sort_idx = lhs_core->leader->idx; 2002 } else { 2003 lhs_has_group = false; 2004 lhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs) 2005 ? *force_grouped_idx 2006 : lhs_core->idx; 2007 } 2008 if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) { 2009 rhs_has_group = true; 2010 rhs_sort_idx = rhs_core->leader->idx; 2011 } else { 2012 rhs_has_group = false; 2013 rhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs) 2014 ? *force_grouped_idx 2015 : rhs_core->idx; 2016 } 2017 2018 if (lhs_sort_idx != rhs_sort_idx) 2019 return lhs_sort_idx - rhs_sort_idx; 2020 2021 /* Group by PMU if there is a group. Groups can't span PMUs. */ 2022 if (lhs_has_group && rhs_has_group) { 2023 lhs_pmu_name = lhs->group_pmu_name; 2024 rhs_pmu_name = rhs->group_pmu_name; 2025 ret = strcmp(lhs_pmu_name, rhs_pmu_name); 2026 if (ret) 2027 return ret; 2028 } 2029 2030 /* Architecture specific sorting. */ 2031 return arch_evlist__cmp(lhs, rhs); 2032 } 2033 2034 static int parse_events__sort_events_and_fix_groups(struct list_head *list) 2035 { 2036 int idx = 0, force_grouped_idx = -1; 2037 struct evsel *pos, *cur_leader = NULL; 2038 struct perf_evsel *cur_leaders_grp = NULL; 2039 bool idx_changed = false, cur_leader_force_grouped = false; 2040 int orig_num_leaders = 0, num_leaders = 0; 2041 int ret; 2042 2043 /* 2044 * Compute index to insert ungrouped events at. Place them where the 2045 * first ungrouped event appears. 2046 */ 2047 list_for_each_entry(pos, list, core.node) { 2048 const struct evsel *pos_leader = evsel__leader(pos); 2049 2050 ret = evsel__compute_group_pmu_name(pos, list); 2051 if (ret) 2052 return ret; 2053 2054 if (pos == pos_leader) 2055 orig_num_leaders++; 2056 2057 /* 2058 * Ensure indexes are sequential, in particular for multiple 2059 * event lists being merged. The indexes are used to detect when 2060 * the user order is modified. 2061 */ 2062 pos->core.idx = idx++; 2063 2064 /* Remember an index to sort all forced grouped events together to. */ 2065 if (force_grouped_idx == -1 && pos == pos_leader && pos->core.nr_members < 2 && 2066 arch_evsel__must_be_in_group(pos)) 2067 force_grouped_idx = pos->core.idx; 2068 } 2069 2070 /* Sort events. */ 2071 list_sort(&force_grouped_idx, list, evlist__cmp); 2072 2073 /* 2074 * Recompute groups, splitting for PMUs and adding groups for events 2075 * that require them. 2076 */ 2077 idx = 0; 2078 list_for_each_entry(pos, list, core.node) { 2079 const struct evsel *pos_leader = evsel__leader(pos); 2080 const char *pos_pmu_name = pos->group_pmu_name; 2081 const char *cur_leader_pmu_name; 2082 bool pos_force_grouped = force_grouped_idx != -1 && 2083 arch_evsel__must_be_in_group(pos); 2084 2085 /* Reset index and nr_members. */ 2086 if (pos->core.idx != idx) 2087 idx_changed = true; 2088 pos->core.idx = idx++; 2089 pos->core.nr_members = 0; 2090 2091 /* 2092 * Set the group leader respecting the given groupings and that 2093 * groups can't span PMUs. 2094 */ 2095 if (!cur_leader) 2096 cur_leader = pos; 2097 2098 cur_leader_pmu_name = cur_leader->group_pmu_name; 2099 if ((cur_leaders_grp != pos->core.leader && 2100 (!pos_force_grouped || !cur_leader_force_grouped)) || 2101 strcmp(cur_leader_pmu_name, pos_pmu_name)) { 2102 /* Event is for a different group/PMU than last. */ 2103 cur_leader = pos; 2104 /* 2105 * Remember the leader's group before it is overwritten, 2106 * so that later events match as being in the same 2107 * group. 2108 */ 2109 cur_leaders_grp = pos->core.leader; 2110 /* 2111 * Avoid forcing events into groups with events that 2112 * don't need to be in the group. 2113 */ 2114 cur_leader_force_grouped = pos_force_grouped; 2115 } 2116 if (pos_leader != cur_leader) { 2117 /* The leader changed so update it. */ 2118 evsel__set_leader(pos, cur_leader); 2119 } 2120 } 2121 list_for_each_entry(pos, list, core.node) { 2122 struct evsel *pos_leader = evsel__leader(pos); 2123 2124 if (pos == pos_leader) 2125 num_leaders++; 2126 pos_leader->core.nr_members++; 2127 } 2128 return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0; 2129 } 2130 2131 int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter, 2132 struct parse_events_error *err, struct perf_pmu *fake_pmu, 2133 bool warn_if_reordered) 2134 { 2135 struct parse_events_state parse_state = { 2136 .list = LIST_HEAD_INIT(parse_state.list), 2137 .idx = evlist->core.nr_entries, 2138 .error = err, 2139 .stoken = PE_START_EVENTS, 2140 .fake_pmu = fake_pmu, 2141 .pmu_filter = pmu_filter, 2142 .match_legacy_cache_terms = true, 2143 }; 2144 int ret, ret2; 2145 2146 ret = parse_events__scanner(str, /*input=*/ NULL, &parse_state); 2147 2148 if (!ret && list_empty(&parse_state.list)) { 2149 WARN_ONCE(true, "WARNING: event parser found nothing\n"); 2150 return -1; 2151 } 2152 2153 ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list); 2154 if (ret2 < 0) 2155 return ret; 2156 2157 if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) 2158 pr_warning("WARNING: events were regrouped to match PMUs\n"); 2159 2160 /* 2161 * Add list to the evlist even with errors to allow callers to clean up. 2162 */ 2163 evlist__splice_list_tail(evlist, &parse_state.list); 2164 2165 if (!ret) { 2166 struct evsel *last; 2167 2168 last = evlist__last(evlist); 2169 last->cmdline_group_boundary = true; 2170 2171 return 0; 2172 } 2173 2174 /* 2175 * There are 2 users - builtin-record and builtin-test objects. 2176 * Both call evlist__delete in case of error, so we dont 2177 * need to bother. 2178 */ 2179 return ret; 2180 } 2181 2182 int parse_event(struct evlist *evlist, const char *str) 2183 { 2184 struct parse_events_error err; 2185 int ret; 2186 2187 parse_events_error__init(&err); 2188 ret = parse_events(evlist, str, &err); 2189 parse_events_error__exit(&err); 2190 return ret; 2191 } 2192 2193 struct parse_events_error_entry { 2194 /** @list: The list the error is part of. */ 2195 struct list_head list; 2196 /** @idx: index in the parsed string */ 2197 int idx; 2198 /** @str: string to display at the index */ 2199 char *str; 2200 /** @help: optional help string */ 2201 char *help; 2202 }; 2203 2204 void parse_events_error__init(struct parse_events_error *err) 2205 { 2206 INIT_LIST_HEAD(&err->list); 2207 } 2208 2209 void parse_events_error__exit(struct parse_events_error *err) 2210 { 2211 struct parse_events_error_entry *pos, *tmp; 2212 2213 list_for_each_entry_safe(pos, tmp, &err->list, list) { 2214 zfree(&pos->str); 2215 zfree(&pos->help); 2216 list_del_init(&pos->list); 2217 free(pos); 2218 } 2219 } 2220 2221 void parse_events_error__handle(struct parse_events_error *err, int idx, 2222 char *str, char *help) 2223 { 2224 struct parse_events_error_entry *entry; 2225 2226 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) 2227 goto out_free; 2228 2229 entry = zalloc(sizeof(*entry)); 2230 if (!entry) { 2231 pr_err("Failed to allocate memory for event parsing error: %s (%s)\n", 2232 str, help ?: "<no help>"); 2233 goto out_free; 2234 } 2235 entry->idx = idx; 2236 entry->str = str; 2237 entry->help = help; 2238 list_add(&entry->list, &err->list); 2239 return; 2240 out_free: 2241 free(str); 2242 free(help); 2243 } 2244 2245 #define MAX_WIDTH 1000 2246 static int get_term_width(void) 2247 { 2248 struct winsize ws; 2249 2250 get_term_dimensions(&ws); 2251 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; 2252 } 2253 2254 static void __parse_events_error__print(int err_idx, const char *err_str, 2255 const char *err_help, const char *event) 2256 { 2257 const char *str = "invalid or unsupported event: "; 2258 char _buf[MAX_WIDTH]; 2259 char *buf = (char *) event; 2260 int idx = 0; 2261 if (err_str) { 2262 /* -2 for extra '' in the final fprintf */ 2263 int width = get_term_width() - 2; 2264 int len_event = strlen(event); 2265 int len_str, max_len, cut = 0; 2266 2267 /* 2268 * Maximum error index indent, we will cut 2269 * the event string if it's bigger. 2270 */ 2271 int max_err_idx = 13; 2272 2273 /* 2274 * Let's be specific with the message when 2275 * we have the precise error. 2276 */ 2277 str = "event syntax error: "; 2278 len_str = strlen(str); 2279 max_len = width - len_str; 2280 2281 buf = _buf; 2282 2283 /* We're cutting from the beginning. */ 2284 if (err_idx > max_err_idx) 2285 cut = err_idx - max_err_idx; 2286 2287 strncpy(buf, event + cut, max_len); 2288 2289 /* Mark cut parts with '..' on both sides. */ 2290 if (cut) 2291 buf[0] = buf[1] = '.'; 2292 2293 if ((len_event - cut) > max_len) { 2294 buf[max_len - 1] = buf[max_len - 2] = '.'; 2295 buf[max_len] = 0; 2296 } 2297 2298 idx = len_str + err_idx - cut; 2299 } 2300 2301 fprintf(stderr, "%s'%s'\n", str, buf); 2302 if (idx) { 2303 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str); 2304 if (err_help) 2305 fprintf(stderr, "\n%s\n", err_help); 2306 } 2307 } 2308 2309 void parse_events_error__print(const struct parse_events_error *err, 2310 const char *event) 2311 { 2312 struct parse_events_error_entry *pos; 2313 bool first = true; 2314 2315 list_for_each_entry(pos, &err->list, list) { 2316 if (!first) 2317 fputs("\n", stderr); 2318 __parse_events_error__print(pos->idx, pos->str, pos->help, event); 2319 first = false; 2320 } 2321 } 2322 2323 /* 2324 * In the list of errors err, do any of the error strings (str) contain the 2325 * given needle string? 2326 */ 2327 bool parse_events_error__contains(const struct parse_events_error *err, 2328 const char *needle) 2329 { 2330 struct parse_events_error_entry *pos; 2331 2332 list_for_each_entry(pos, &err->list, list) { 2333 if (strstr(pos->str, needle) != NULL) 2334 return true; 2335 } 2336 return false; 2337 } 2338 2339 #undef MAX_WIDTH 2340 2341 int parse_events_option(const struct option *opt, const char *str, 2342 int unset __maybe_unused) 2343 { 2344 struct parse_events_option_args *args = opt->value; 2345 struct parse_events_error err; 2346 int ret; 2347 2348 parse_events_error__init(&err); 2349 ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err, 2350 /*fake_pmu=*/NULL, /*warn_if_reordered=*/true); 2351 2352 if (ret) { 2353 parse_events_error__print(&err, str); 2354 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 2355 } 2356 parse_events_error__exit(&err); 2357 2358 return ret; 2359 } 2360 2361 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset) 2362 { 2363 struct parse_events_option_args *args = opt->value; 2364 int ret; 2365 2366 if (*args->evlistp == NULL) { 2367 *args->evlistp = evlist__new(); 2368 2369 if (*args->evlistp == NULL) { 2370 fprintf(stderr, "Not enough memory to create evlist\n"); 2371 return -1; 2372 } 2373 } 2374 ret = parse_events_option(opt, str, unset); 2375 if (ret) { 2376 evlist__delete(*args->evlistp); 2377 *args->evlistp = NULL; 2378 } 2379 2380 return ret; 2381 } 2382 2383 static int 2384 foreach_evsel_in_last_glob(struct evlist *evlist, 2385 int (*func)(struct evsel *evsel, 2386 const void *arg), 2387 const void *arg) 2388 { 2389 struct evsel *last = NULL; 2390 int err; 2391 2392 /* 2393 * Don't return when list_empty, give func a chance to report 2394 * error when it found last == NULL. 2395 * 2396 * So no need to WARN here, let *func do this. 2397 */ 2398 if (evlist->core.nr_entries > 0) 2399 last = evlist__last(evlist); 2400 2401 do { 2402 err = (*func)(last, arg); 2403 if (err) 2404 return -1; 2405 if (!last) 2406 return 0; 2407 2408 if (last->core.node.prev == &evlist->core.entries) 2409 return 0; 2410 last = list_entry(last->core.node.prev, struct evsel, core.node); 2411 } while (!last->cmdline_group_boundary); 2412 2413 return 0; 2414 } 2415 2416 static int set_filter(struct evsel *evsel, const void *arg) 2417 { 2418 const char *str = arg; 2419 bool found = false; 2420 int nr_addr_filters = 0; 2421 struct perf_pmu *pmu = NULL; 2422 2423 if (evsel == NULL) { 2424 fprintf(stderr, 2425 "--filter option should follow a -e tracepoint or HW tracer option\n"); 2426 return -1; 2427 } 2428 2429 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 2430 if (evsel__append_tp_filter(evsel, str) < 0) { 2431 fprintf(stderr, 2432 "not enough memory to hold filter string\n"); 2433 return -1; 2434 } 2435 2436 return 0; 2437 } 2438 2439 while ((pmu = perf_pmus__scan(pmu)) != NULL) 2440 if (pmu->type == evsel->core.attr.type) { 2441 found = true; 2442 break; 2443 } 2444 2445 if (found) 2446 perf_pmu__scan_file(pmu, "nr_addr_filters", 2447 "%d", &nr_addr_filters); 2448 2449 if (!nr_addr_filters) 2450 return perf_bpf_filter__parse(&evsel->bpf_filters, str); 2451 2452 if (evsel__append_addr_filter(evsel, str) < 0) { 2453 fprintf(stderr, 2454 "not enough memory to hold filter string\n"); 2455 return -1; 2456 } 2457 2458 return 0; 2459 } 2460 2461 int parse_filter(const struct option *opt, const char *str, 2462 int unset __maybe_unused) 2463 { 2464 struct evlist *evlist = *(struct evlist **)opt->value; 2465 2466 return foreach_evsel_in_last_glob(evlist, set_filter, 2467 (const void *)str); 2468 } 2469 2470 static int add_exclude_perf_filter(struct evsel *evsel, 2471 const void *arg __maybe_unused) 2472 { 2473 char new_filter[64]; 2474 2475 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2476 fprintf(stderr, 2477 "--exclude-perf option should follow a -e tracepoint option\n"); 2478 return -1; 2479 } 2480 2481 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid()); 2482 2483 if (evsel__append_tp_filter(evsel, new_filter) < 0) { 2484 fprintf(stderr, 2485 "not enough memory to hold filter string\n"); 2486 return -1; 2487 } 2488 2489 return 0; 2490 } 2491 2492 int exclude_perf(const struct option *opt, 2493 const char *arg __maybe_unused, 2494 int unset __maybe_unused) 2495 { 2496 struct evlist *evlist = *(struct evlist **)opt->value; 2497 2498 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter, 2499 NULL); 2500 } 2501 2502 int parse_events__is_hardcoded_term(struct parse_events_term *term) 2503 { 2504 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 2505 } 2506 2507 static int new_term(struct parse_events_term **_term, 2508 struct parse_events_term *temp, 2509 char *str, u64 num) 2510 { 2511 struct parse_events_term *term; 2512 2513 term = malloc(sizeof(*term)); 2514 if (!term) 2515 return -ENOMEM; 2516 2517 *term = *temp; 2518 INIT_LIST_HEAD(&term->list); 2519 term->weak = false; 2520 2521 switch (term->type_val) { 2522 case PARSE_EVENTS__TERM_TYPE_NUM: 2523 term->val.num = num; 2524 break; 2525 case PARSE_EVENTS__TERM_TYPE_STR: 2526 term->val.str = str; 2527 break; 2528 default: 2529 free(term); 2530 return -EINVAL; 2531 } 2532 2533 *_term = term; 2534 return 0; 2535 } 2536 2537 int parse_events_term__num(struct parse_events_term **term, 2538 enum parse_events__term_type type_term, 2539 const char *config, u64 num, 2540 bool no_value, 2541 void *loc_term_, void *loc_val_) 2542 { 2543 YYLTYPE *loc_term = loc_term_; 2544 YYLTYPE *loc_val = loc_val_; 2545 2546 struct parse_events_term temp = { 2547 .type_val = PARSE_EVENTS__TERM_TYPE_NUM, 2548 .type_term = type_term, 2549 .config = config ? : strdup(config_term_name(type_term)), 2550 .no_value = no_value, 2551 .err_term = loc_term ? loc_term->first_column : 0, 2552 .err_val = loc_val ? loc_val->first_column : 0, 2553 }; 2554 2555 return new_term(term, &temp, /*str=*/NULL, num); 2556 } 2557 2558 int parse_events_term__str(struct parse_events_term **term, 2559 enum parse_events__term_type type_term, 2560 char *config, char *str, 2561 void *loc_term_, void *loc_val_) 2562 { 2563 YYLTYPE *loc_term = loc_term_; 2564 YYLTYPE *loc_val = loc_val_; 2565 2566 struct parse_events_term temp = { 2567 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2568 .type_term = type_term, 2569 .config = config, 2570 .err_term = loc_term ? loc_term->first_column : 0, 2571 .err_val = loc_val ? loc_val->first_column : 0, 2572 }; 2573 2574 return new_term(term, &temp, str, /*num=*/0); 2575 } 2576 2577 int parse_events_term__term(struct parse_events_term **term, 2578 enum parse_events__term_type term_lhs, 2579 enum parse_events__term_type term_rhs, 2580 void *loc_term, void *loc_val) 2581 { 2582 return parse_events_term__str(term, term_lhs, NULL, 2583 strdup(config_term_name(term_rhs)), 2584 loc_term, loc_val); 2585 } 2586 2587 int parse_events_term__clone(struct parse_events_term **new, 2588 struct parse_events_term *term) 2589 { 2590 char *str; 2591 struct parse_events_term temp = *term; 2592 2593 temp.used = false; 2594 if (term->config) { 2595 temp.config = strdup(term->config); 2596 if (!temp.config) 2597 return -ENOMEM; 2598 } 2599 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2600 return new_term(new, &temp, /*str=*/NULL, term->val.num); 2601 2602 str = strdup(term->val.str); 2603 if (!str) { 2604 zfree(&temp.config); 2605 return -ENOMEM; 2606 } 2607 return new_term(new, &temp, str, /*num=*/0); 2608 } 2609 2610 void parse_events_term__delete(struct parse_events_term *term) 2611 { 2612 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) 2613 zfree(&term->val.str); 2614 2615 zfree(&term->config); 2616 free(term); 2617 } 2618 2619 static int parse_events_terms__copy(const struct parse_events_terms *src, 2620 struct parse_events_terms *dest) 2621 { 2622 struct parse_events_term *term; 2623 2624 list_for_each_entry (term, &src->terms, list) { 2625 struct parse_events_term *n; 2626 int ret; 2627 2628 ret = parse_events_term__clone(&n, term); 2629 if (ret) 2630 return ret; 2631 2632 list_add_tail(&n->list, &dest->terms); 2633 } 2634 return 0; 2635 } 2636 2637 void parse_events_terms__init(struct parse_events_terms *terms) 2638 { 2639 INIT_LIST_HEAD(&terms->terms); 2640 } 2641 2642 void parse_events_terms__exit(struct parse_events_terms *terms) 2643 { 2644 struct parse_events_term *term, *h; 2645 2646 list_for_each_entry_safe(term, h, &terms->terms, list) { 2647 list_del_init(&term->list); 2648 parse_events_term__delete(term); 2649 } 2650 } 2651 2652 void parse_events_terms__delete(struct parse_events_terms *terms) 2653 { 2654 if (!terms) 2655 return; 2656 parse_events_terms__exit(terms); 2657 free(terms); 2658 } 2659 2660 int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb) 2661 { 2662 struct parse_events_term *term; 2663 bool first = true; 2664 2665 if (!terms) 2666 return 0; 2667 2668 list_for_each_entry(term, &terms->terms, list) { 2669 int ret; 2670 2671 if (!first) { 2672 ret = strbuf_addch(sb, ','); 2673 if (ret < 0) 2674 return ret; 2675 } 2676 first = false; 2677 2678 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2679 if (term->no_value) { 2680 assert(term->val.num == 1); 2681 ret = strbuf_addf(sb, "%s", term->config); 2682 } else 2683 ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num); 2684 else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) { 2685 if (term->config) { 2686 ret = strbuf_addf(sb, "%s=", term->config); 2687 if (ret < 0) 2688 return ret; 2689 } else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) { 2690 ret = strbuf_addf(sb, "%s=", config_term_name(term->type_term)); 2691 if (ret < 0) 2692 return ret; 2693 } 2694 assert(!term->no_value); 2695 ret = strbuf_addf(sb, "%s", term->val.str); 2696 } 2697 if (ret < 0) 2698 return ret; 2699 } 2700 return 0; 2701 } 2702 2703 static void config_terms_list(char *buf, size_t buf_sz) 2704 { 2705 int i; 2706 bool first = true; 2707 2708 buf[0] = '\0'; 2709 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) { 2710 const char *name = config_term_name(i); 2711 2712 if (!config_term_avail(i, NULL)) 2713 continue; 2714 if (!name) 2715 continue; 2716 if (name[0] == '<') 2717 continue; 2718 2719 if (strlen(buf) + strlen(name) + 2 >= buf_sz) 2720 return; 2721 2722 if (!first) 2723 strcat(buf, ","); 2724 else 2725 first = false; 2726 strcat(buf, name); 2727 } 2728 } 2729 2730 /* 2731 * Return string contains valid config terms of an event. 2732 * @additional_terms: For terms such as PMU sysfs terms. 2733 */ 2734 char *parse_events_formats_error_string(char *additional_terms) 2735 { 2736 char *str; 2737 /* "no-overwrite" is the longest name */ 2738 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR * 2739 (sizeof("no-overwrite") - 1)]; 2740 2741 config_terms_list(static_terms, sizeof(static_terms)); 2742 /* valid terms */ 2743 if (additional_terms) { 2744 if (asprintf(&str, "valid terms: %s,%s", 2745 additional_terms, static_terms) < 0) 2746 goto fail; 2747 } else { 2748 if (asprintf(&str, "valid terms: %s", static_terms) < 0) 2749 goto fail; 2750 } 2751 return str; 2752 2753 fail: 2754 return NULL; 2755 } 2756