1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/hw_breakpoint.h> 3 #include <linux/err.h> 4 #include <linux/zalloc.h> 5 #include <dirent.h> 6 #include <errno.h> 7 #include <sys/ioctl.h> 8 #include <sys/param.h> 9 #include "term.h" 10 #include "evlist.h" 11 #include "evsel.h" 12 #include <subcmd/parse-options.h> 13 #include "parse-events.h" 14 #include "string2.h" 15 #include "strlist.h" 16 #include "bpf-loader.h" 17 #include "debug.h" 18 #include <api/fs/tracing_path.h> 19 #include <perf/cpumap.h> 20 #include "parse-events-bison.h" 21 #include "parse-events-flex.h" 22 #include "pmu.h" 23 #include "asm/bug.h" 24 #include "util/parse-branch-options.h" 25 #include "util/evsel_config.h" 26 #include "util/event.h" 27 #include "perf.h" 28 #include "util/parse-events-hybrid.h" 29 #include "util/pmu-hybrid.h" 30 #include "tracepoint.h" 31 32 #define MAX_NAME_LEN 100 33 34 struct perf_pmu_event_symbol { 35 char *symbol; 36 enum perf_pmu_event_symbol_type type; 37 }; 38 39 #ifdef PARSER_DEBUG 40 extern int parse_events_debug; 41 #endif 42 int parse_events_parse(void *parse_state, void *scanner); 43 static int get_config_terms(struct list_head *head_config, 44 struct list_head *head_terms __maybe_unused); 45 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state, 46 const char *str, char *pmu_name, 47 struct list_head *list); 48 49 static struct perf_pmu_event_symbol *perf_pmu_events_list; 50 /* 51 * The variable indicates the number of supported pmu event symbols. 52 * 0 means not initialized and ready to init 53 * -1 means failed to init, don't try anymore 54 * >0 is the number of supported pmu event symbols 55 */ 56 static int perf_pmu_events_list_num; 57 58 struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { 59 [PERF_COUNT_HW_CPU_CYCLES] = { 60 .symbol = "cpu-cycles", 61 .alias = "cycles", 62 }, 63 [PERF_COUNT_HW_INSTRUCTIONS] = { 64 .symbol = "instructions", 65 .alias = "", 66 }, 67 [PERF_COUNT_HW_CACHE_REFERENCES] = { 68 .symbol = "cache-references", 69 .alias = "", 70 }, 71 [PERF_COUNT_HW_CACHE_MISSES] = { 72 .symbol = "cache-misses", 73 .alias = "", 74 }, 75 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 76 .symbol = "branch-instructions", 77 .alias = "branches", 78 }, 79 [PERF_COUNT_HW_BRANCH_MISSES] = { 80 .symbol = "branch-misses", 81 .alias = "", 82 }, 83 [PERF_COUNT_HW_BUS_CYCLES] = { 84 .symbol = "bus-cycles", 85 .alias = "", 86 }, 87 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = { 88 .symbol = "stalled-cycles-frontend", 89 .alias = "idle-cycles-frontend", 90 }, 91 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = { 92 .symbol = "stalled-cycles-backend", 93 .alias = "idle-cycles-backend", 94 }, 95 [PERF_COUNT_HW_REF_CPU_CYCLES] = { 96 .symbol = "ref-cycles", 97 .alias = "", 98 }, 99 }; 100 101 struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { 102 [PERF_COUNT_SW_CPU_CLOCK] = { 103 .symbol = "cpu-clock", 104 .alias = "", 105 }, 106 [PERF_COUNT_SW_TASK_CLOCK] = { 107 .symbol = "task-clock", 108 .alias = "", 109 }, 110 [PERF_COUNT_SW_PAGE_FAULTS] = { 111 .symbol = "page-faults", 112 .alias = "faults", 113 }, 114 [PERF_COUNT_SW_CONTEXT_SWITCHES] = { 115 .symbol = "context-switches", 116 .alias = "cs", 117 }, 118 [PERF_COUNT_SW_CPU_MIGRATIONS] = { 119 .symbol = "cpu-migrations", 120 .alias = "migrations", 121 }, 122 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = { 123 .symbol = "minor-faults", 124 .alias = "", 125 }, 126 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = { 127 .symbol = "major-faults", 128 .alias = "", 129 }, 130 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = { 131 .symbol = "alignment-faults", 132 .alias = "", 133 }, 134 [PERF_COUNT_SW_EMULATION_FAULTS] = { 135 .symbol = "emulation-faults", 136 .alias = "", 137 }, 138 [PERF_COUNT_SW_DUMMY] = { 139 .symbol = "dummy", 140 .alias = "", 141 }, 142 [PERF_COUNT_SW_BPF_OUTPUT] = { 143 .symbol = "bpf-output", 144 .alias = "", 145 }, 146 [PERF_COUNT_SW_CGROUP_SWITCHES] = { 147 .symbol = "cgroup-switches", 148 .alias = "", 149 }, 150 }; 151 152 #define __PERF_EVENT_FIELD(config, name) \ 153 ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) 154 155 #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) 156 #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) 157 #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) 158 #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) 159 160 const char *event_type(int type) 161 { 162 switch (type) { 163 case PERF_TYPE_HARDWARE: 164 return "hardware"; 165 166 case PERF_TYPE_SOFTWARE: 167 return "software"; 168 169 case PERF_TYPE_TRACEPOINT: 170 return "tracepoint"; 171 172 case PERF_TYPE_HW_CACHE: 173 return "hardware-cache"; 174 175 default: 176 break; 177 } 178 179 return "unknown"; 180 } 181 182 static char *get_config_str(struct list_head *head_terms, int type_term) 183 { 184 struct parse_events_term *term; 185 186 if (!head_terms) 187 return NULL; 188 189 list_for_each_entry(term, head_terms, list) 190 if (term->type_term == type_term) 191 return term->val.str; 192 193 return NULL; 194 } 195 196 static char *get_config_metric_id(struct list_head *head_terms) 197 { 198 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID); 199 } 200 201 static char *get_config_name(struct list_head *head_terms) 202 { 203 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME); 204 } 205 206 static struct evsel * 207 __add_event(struct list_head *list, int *idx, 208 struct perf_event_attr *attr, 209 bool init_attr, 210 const char *name, const char *metric_id, struct perf_pmu *pmu, 211 struct list_head *config_terms, bool auto_merge_stats, 212 const char *cpu_list) 213 { 214 struct evsel *evsel; 215 struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) : 216 cpu_list ? perf_cpu_map__new(cpu_list) : NULL; 217 218 if (pmu && attr->type == PERF_TYPE_RAW) 219 perf_pmu__warn_invalid_config(pmu, attr->config, name); 220 221 if (init_attr) 222 event_attr_init(attr); 223 224 evsel = evsel__new_idx(attr, *idx); 225 if (!evsel) { 226 perf_cpu_map__put(cpus); 227 return NULL; 228 } 229 230 (*idx)++; 231 evsel->core.cpus = cpus; 232 evsel->core.own_cpus = perf_cpu_map__get(cpus); 233 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false; 234 evsel->auto_merge_stats = auto_merge_stats; 235 236 if (name) 237 evsel->name = strdup(name); 238 239 if (metric_id) 240 evsel->metric_id = strdup(metric_id); 241 242 if (config_terms) 243 list_splice_init(config_terms, &evsel->config_terms); 244 245 if (list) 246 list_add_tail(&evsel->core.node, list); 247 248 return evsel; 249 } 250 251 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr, 252 const char *name, const char *metric_id, 253 struct perf_pmu *pmu) 254 { 255 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name, 256 metric_id, pmu, /*config_terms=*/NULL, 257 /*auto_merge_stats=*/false, /*cpu_list=*/NULL); 258 } 259 260 static int add_event(struct list_head *list, int *idx, 261 struct perf_event_attr *attr, const char *name, 262 const char *metric_id, struct list_head *config_terms) 263 { 264 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id, 265 /*pmu=*/NULL, config_terms, 266 /*auto_merge_stats=*/false, /*cpu_list=*/NULL) ? 0 : -ENOMEM; 267 } 268 269 static int add_event_tool(struct list_head *list, int *idx, 270 enum perf_tool_event tool_event) 271 { 272 struct evsel *evsel; 273 struct perf_event_attr attr = { 274 .type = PERF_TYPE_SOFTWARE, 275 .config = PERF_COUNT_SW_DUMMY, 276 }; 277 278 evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL, 279 /*metric_id=*/NULL, /*pmu=*/NULL, 280 /*config_terms=*/NULL, /*auto_merge_stats=*/false, 281 /*cpu_list=*/"0"); 282 if (!evsel) 283 return -ENOMEM; 284 evsel->tool_event = tool_event; 285 if (tool_event == PERF_TOOL_DURATION_TIME 286 || tool_event == PERF_TOOL_USER_TIME 287 || tool_event == PERF_TOOL_SYSTEM_TIME) { 288 free((char *)evsel->unit); 289 evsel->unit = strdup("ns"); 290 } 291 return 0; 292 } 293 294 static int parse_aliases(char *str, const char *const names[][EVSEL__MAX_ALIASES], int size) 295 { 296 int i, j; 297 int n, longest = -1; 298 299 for (i = 0; i < size; i++) { 300 for (j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) { 301 n = strlen(names[i][j]); 302 if (n > longest && !strncasecmp(str, names[i][j], n)) 303 longest = n; 304 } 305 if (longest > 0) 306 return i; 307 } 308 309 return -1; 310 } 311 312 typedef int config_term_func_t(struct perf_event_attr *attr, 313 struct parse_events_term *term, 314 struct parse_events_error *err); 315 static int config_term_common(struct perf_event_attr *attr, 316 struct parse_events_term *term, 317 struct parse_events_error *err); 318 static int config_attr(struct perf_event_attr *attr, 319 struct list_head *head, 320 struct parse_events_error *err, 321 config_term_func_t config_term); 322 323 int parse_events_add_cache(struct list_head *list, int *idx, 324 char *type, char *op_result1, char *op_result2, 325 struct parse_events_error *err, 326 struct list_head *head_config, 327 struct parse_events_state *parse_state) 328 { 329 struct perf_event_attr attr; 330 LIST_HEAD(config_terms); 331 char name[MAX_NAME_LEN]; 332 const char *config_name, *metric_id; 333 int cache_type = -1, cache_op = -1, cache_result = -1; 334 char *op_result[2] = { op_result1, op_result2 }; 335 int i, n, ret; 336 bool hybrid; 337 338 /* 339 * No fallback - if we cannot get a clear cache type 340 * then bail out: 341 */ 342 cache_type = parse_aliases(type, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX); 343 if (cache_type == -1) 344 return -EINVAL; 345 346 config_name = get_config_name(head_config); 347 n = snprintf(name, MAX_NAME_LEN, "%s", type); 348 349 for (i = 0; (i < 2) && (op_result[i]); i++) { 350 char *str = op_result[i]; 351 352 n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str); 353 354 if (cache_op == -1) { 355 cache_op = parse_aliases(str, evsel__hw_cache_op, 356 PERF_COUNT_HW_CACHE_OP_MAX); 357 if (cache_op >= 0) { 358 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 359 return -EINVAL; 360 continue; 361 } 362 } 363 364 if (cache_result == -1) { 365 cache_result = parse_aliases(str, evsel__hw_cache_result, 366 PERF_COUNT_HW_CACHE_RESULT_MAX); 367 if (cache_result >= 0) 368 continue; 369 } 370 } 371 372 /* 373 * Fall back to reads: 374 */ 375 if (cache_op == -1) 376 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 377 378 /* 379 * Fall back to accesses: 380 */ 381 if (cache_result == -1) 382 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 383 384 memset(&attr, 0, sizeof(attr)); 385 attr.config = cache_type | (cache_op << 8) | (cache_result << 16); 386 attr.type = PERF_TYPE_HW_CACHE; 387 388 if (head_config) { 389 if (config_attr(&attr, head_config, err, 390 config_term_common)) 391 return -EINVAL; 392 393 if (get_config_terms(head_config, &config_terms)) 394 return -ENOMEM; 395 } 396 397 metric_id = get_config_metric_id(head_config); 398 ret = parse_events__add_cache_hybrid(list, idx, &attr, 399 config_name ? : name, 400 metric_id, 401 &config_terms, 402 &hybrid, parse_state); 403 if (hybrid) 404 goto out_free_terms; 405 406 ret = add_event(list, idx, &attr, config_name ? : name, metric_id, 407 &config_terms); 408 out_free_terms: 409 free_config_terms(&config_terms); 410 return ret; 411 } 412 413 static void tracepoint_error(struct parse_events_error *e, int err, 414 const char *sys, const char *name) 415 { 416 const char *str; 417 char help[BUFSIZ]; 418 419 if (!e) 420 return; 421 422 /* 423 * We get error directly from syscall errno ( > 0), 424 * or from encoded pointer's error ( < 0). 425 */ 426 err = abs(err); 427 428 switch (err) { 429 case EACCES: 430 str = "can't access trace events"; 431 break; 432 case ENOENT: 433 str = "unknown tracepoint"; 434 break; 435 default: 436 str = "failed to add tracepoint"; 437 break; 438 } 439 440 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name); 441 parse_events_error__handle(e, 0, strdup(str), strdup(help)); 442 } 443 444 static int add_tracepoint(struct list_head *list, int *idx, 445 const char *sys_name, const char *evt_name, 446 struct parse_events_error *err, 447 struct list_head *head_config) 448 { 449 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++); 450 451 if (IS_ERR(evsel)) { 452 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name); 453 return PTR_ERR(evsel); 454 } 455 456 if (head_config) { 457 LIST_HEAD(config_terms); 458 459 if (get_config_terms(head_config, &config_terms)) 460 return -ENOMEM; 461 list_splice(&config_terms, &evsel->config_terms); 462 } 463 464 list_add_tail(&evsel->core.node, list); 465 return 0; 466 } 467 468 static int add_tracepoint_multi_event(struct list_head *list, int *idx, 469 const char *sys_name, const char *evt_name, 470 struct parse_events_error *err, 471 struct list_head *head_config) 472 { 473 char *evt_path; 474 struct dirent *evt_ent; 475 DIR *evt_dir; 476 int ret = 0, found = 0; 477 478 evt_path = get_events_file(sys_name); 479 if (!evt_path) { 480 tracepoint_error(err, errno, sys_name, evt_name); 481 return -1; 482 } 483 evt_dir = opendir(evt_path); 484 if (!evt_dir) { 485 put_events_file(evt_path); 486 tracepoint_error(err, errno, sys_name, evt_name); 487 return -1; 488 } 489 490 while (!ret && (evt_ent = readdir(evt_dir))) { 491 if (!strcmp(evt_ent->d_name, ".") 492 || !strcmp(evt_ent->d_name, "..") 493 || !strcmp(evt_ent->d_name, "enable") 494 || !strcmp(evt_ent->d_name, "filter")) 495 continue; 496 497 if (!strglobmatch(evt_ent->d_name, evt_name)) 498 continue; 499 500 found++; 501 502 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name, 503 err, head_config); 504 } 505 506 if (!found) { 507 tracepoint_error(err, ENOENT, sys_name, evt_name); 508 ret = -1; 509 } 510 511 put_events_file(evt_path); 512 closedir(evt_dir); 513 return ret; 514 } 515 516 static int add_tracepoint_event(struct list_head *list, int *idx, 517 const char *sys_name, const char *evt_name, 518 struct parse_events_error *err, 519 struct list_head *head_config) 520 { 521 return strpbrk(evt_name, "*?") ? 522 add_tracepoint_multi_event(list, idx, sys_name, evt_name, 523 err, head_config) : 524 add_tracepoint(list, idx, sys_name, evt_name, 525 err, head_config); 526 } 527 528 static int add_tracepoint_multi_sys(struct list_head *list, int *idx, 529 const char *sys_name, const char *evt_name, 530 struct parse_events_error *err, 531 struct list_head *head_config) 532 { 533 struct dirent *events_ent; 534 DIR *events_dir; 535 int ret = 0; 536 537 events_dir = tracing_events__opendir(); 538 if (!events_dir) { 539 tracepoint_error(err, errno, sys_name, evt_name); 540 return -1; 541 } 542 543 while (!ret && (events_ent = readdir(events_dir))) { 544 if (!strcmp(events_ent->d_name, ".") 545 || !strcmp(events_ent->d_name, "..") 546 || !strcmp(events_ent->d_name, "enable") 547 || !strcmp(events_ent->d_name, "header_event") 548 || !strcmp(events_ent->d_name, "header_page")) 549 continue; 550 551 if (!strglobmatch(events_ent->d_name, sys_name)) 552 continue; 553 554 ret = add_tracepoint_event(list, idx, events_ent->d_name, 555 evt_name, err, head_config); 556 } 557 558 closedir(events_dir); 559 return ret; 560 } 561 562 #ifdef HAVE_LIBBPF_SUPPORT 563 struct __add_bpf_event_param { 564 struct parse_events_state *parse_state; 565 struct list_head *list; 566 struct list_head *head_config; 567 }; 568 569 static int add_bpf_event(const char *group, const char *event, int fd, struct bpf_object *obj, 570 void *_param) 571 { 572 LIST_HEAD(new_evsels); 573 struct __add_bpf_event_param *param = _param; 574 struct parse_events_state *parse_state = param->parse_state; 575 struct list_head *list = param->list; 576 struct evsel *pos; 577 int err; 578 /* 579 * Check if we should add the event, i.e. if it is a TP but starts with a '!', 580 * then don't add the tracepoint, this will be used for something else, like 581 * adding to a BPF_MAP_TYPE_PROG_ARRAY. 582 * 583 * See tools/perf/examples/bpf/augmented_raw_syscalls.c 584 */ 585 if (group[0] == '!') 586 return 0; 587 588 pr_debug("add bpf event %s:%s and attach bpf program %d\n", 589 group, event, fd); 590 591 err = parse_events_add_tracepoint(&new_evsels, &parse_state->idx, group, 592 event, parse_state->error, 593 param->head_config); 594 if (err) { 595 struct evsel *evsel, *tmp; 596 597 pr_debug("Failed to add BPF event %s:%s\n", 598 group, event); 599 list_for_each_entry_safe(evsel, tmp, &new_evsels, core.node) { 600 list_del_init(&evsel->core.node); 601 evsel__delete(evsel); 602 } 603 return err; 604 } 605 pr_debug("adding %s:%s\n", group, event); 606 607 list_for_each_entry(pos, &new_evsels, core.node) { 608 pr_debug("adding %s:%s to %p\n", 609 group, event, pos); 610 pos->bpf_fd = fd; 611 pos->bpf_obj = obj; 612 } 613 list_splice(&new_evsels, list); 614 return 0; 615 } 616 617 int parse_events_load_bpf_obj(struct parse_events_state *parse_state, 618 struct list_head *list, 619 struct bpf_object *obj, 620 struct list_head *head_config) 621 { 622 int err; 623 char errbuf[BUFSIZ]; 624 struct __add_bpf_event_param param = {parse_state, list, head_config}; 625 static bool registered_unprobe_atexit = false; 626 627 if (IS_ERR(obj) || !obj) { 628 snprintf(errbuf, sizeof(errbuf), 629 "Internal error: load bpf obj with NULL"); 630 err = -EINVAL; 631 goto errout; 632 } 633 634 /* 635 * Register atexit handler before calling bpf__probe() so 636 * bpf__probe() don't need to unprobe probe points its already 637 * created when failure. 638 */ 639 if (!registered_unprobe_atexit) { 640 atexit(bpf__clear); 641 registered_unprobe_atexit = true; 642 } 643 644 err = bpf__probe(obj); 645 if (err) { 646 bpf__strerror_probe(obj, err, errbuf, sizeof(errbuf)); 647 goto errout; 648 } 649 650 err = bpf__load(obj); 651 if (err) { 652 bpf__strerror_load(obj, err, errbuf, sizeof(errbuf)); 653 goto errout; 654 } 655 656 err = bpf__foreach_event(obj, add_bpf_event, ¶m); 657 if (err) { 658 snprintf(errbuf, sizeof(errbuf), 659 "Attach events in BPF object failed"); 660 goto errout; 661 } 662 663 return 0; 664 errout: 665 parse_events_error__handle(parse_state->error, 0, 666 strdup(errbuf), strdup("(add -v to see detail)")); 667 return err; 668 } 669 670 static int 671 parse_events_config_bpf(struct parse_events_state *parse_state, 672 struct bpf_object *obj, 673 struct list_head *head_config) 674 { 675 struct parse_events_term *term; 676 int error_pos; 677 678 if (!head_config || list_empty(head_config)) 679 return 0; 680 681 list_for_each_entry(term, head_config, list) { 682 int err; 683 684 if (term->type_term != PARSE_EVENTS__TERM_TYPE_USER) { 685 parse_events_error__handle(parse_state->error, term->err_term, 686 strdup("Invalid config term for BPF object"), 687 NULL); 688 return -EINVAL; 689 } 690 691 err = bpf__config_obj(obj, term, parse_state->evlist, &error_pos); 692 if (err) { 693 char errbuf[BUFSIZ]; 694 int idx; 695 696 bpf__strerror_config_obj(obj, term, parse_state->evlist, 697 &error_pos, err, errbuf, 698 sizeof(errbuf)); 699 700 if (err == -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE) 701 idx = term->err_val; 702 else 703 idx = term->err_term + error_pos; 704 705 parse_events_error__handle(parse_state->error, idx, 706 strdup(errbuf), 707 strdup( 708 "Hint:\tValid config terms:\n" 709 " \tmap:[<arraymap>].value<indices>=[value]\n" 710 " \tmap:[<eventmap>].event<indices>=[event]\n" 711 "\n" 712 " \twhere <indices> is something like [0,3...5] or [all]\n" 713 " \t(add -v to see detail)")); 714 return err; 715 } 716 } 717 return 0; 718 } 719 720 /* 721 * Split config terms: 722 * perf record -e bpf.c/call-graph=fp,map:array.value[0]=1/ ... 723 * 'call-graph=fp' is 'evt config', should be applied to each 724 * events in bpf.c. 725 * 'map:array.value[0]=1' is 'obj config', should be processed 726 * with parse_events_config_bpf. 727 * 728 * Move object config terms from the first list to obj_head_config. 729 */ 730 static void 731 split_bpf_config_terms(struct list_head *evt_head_config, 732 struct list_head *obj_head_config) 733 { 734 struct parse_events_term *term, *temp; 735 736 /* 737 * Currently, all possible user config term 738 * belong to bpf object. parse_events__is_hardcoded_term() 739 * happens to be a good flag. 740 * 741 * See parse_events_config_bpf() and 742 * config_term_tracepoint(). 743 */ 744 list_for_each_entry_safe(term, temp, evt_head_config, list) 745 if (!parse_events__is_hardcoded_term(term)) 746 list_move_tail(&term->list, obj_head_config); 747 } 748 749 int parse_events_load_bpf(struct parse_events_state *parse_state, 750 struct list_head *list, 751 char *bpf_file_name, 752 bool source, 753 struct list_head *head_config) 754 { 755 int err; 756 struct bpf_object *obj; 757 LIST_HEAD(obj_head_config); 758 759 if (head_config) 760 split_bpf_config_terms(head_config, &obj_head_config); 761 762 obj = bpf__prepare_load(bpf_file_name, source); 763 if (IS_ERR(obj)) { 764 char errbuf[BUFSIZ]; 765 766 err = PTR_ERR(obj); 767 768 if (err == -ENOTSUP) 769 snprintf(errbuf, sizeof(errbuf), 770 "BPF support is not compiled"); 771 else 772 bpf__strerror_prepare_load(bpf_file_name, 773 source, 774 -err, errbuf, 775 sizeof(errbuf)); 776 777 parse_events_error__handle(parse_state->error, 0, 778 strdup(errbuf), strdup("(add -v to see detail)")); 779 return err; 780 } 781 782 err = parse_events_load_bpf_obj(parse_state, list, obj, head_config); 783 if (err) 784 return err; 785 err = parse_events_config_bpf(parse_state, obj, &obj_head_config); 786 787 /* 788 * Caller doesn't know anything about obj_head_config, 789 * so combine them together again before returning. 790 */ 791 if (head_config) 792 list_splice_tail(&obj_head_config, head_config); 793 return err; 794 } 795 #else // HAVE_LIBBPF_SUPPORT 796 int parse_events_load_bpf_obj(struct parse_events_state *parse_state, 797 struct list_head *list __maybe_unused, 798 struct bpf_object *obj __maybe_unused, 799 struct list_head *head_config __maybe_unused) 800 { 801 parse_events_error__handle(parse_state->error, 0, 802 strdup("BPF support is not compiled"), 803 strdup("Make sure libbpf-devel is available at build time.")); 804 return -ENOTSUP; 805 } 806 807 int parse_events_load_bpf(struct parse_events_state *parse_state, 808 struct list_head *list __maybe_unused, 809 char *bpf_file_name __maybe_unused, 810 bool source __maybe_unused, 811 struct list_head *head_config __maybe_unused) 812 { 813 parse_events_error__handle(parse_state->error, 0, 814 strdup("BPF support is not compiled"), 815 strdup("Make sure libbpf-devel is available at build time.")); 816 return -ENOTSUP; 817 } 818 #endif // HAVE_LIBBPF_SUPPORT 819 820 static int 821 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 822 { 823 int i; 824 825 for (i = 0; i < 3; i++) { 826 if (!type || !type[i]) 827 break; 828 829 #define CHECK_SET_TYPE(bit) \ 830 do { \ 831 if (attr->bp_type & bit) \ 832 return -EINVAL; \ 833 else \ 834 attr->bp_type |= bit; \ 835 } while (0) 836 837 switch (type[i]) { 838 case 'r': 839 CHECK_SET_TYPE(HW_BREAKPOINT_R); 840 break; 841 case 'w': 842 CHECK_SET_TYPE(HW_BREAKPOINT_W); 843 break; 844 case 'x': 845 CHECK_SET_TYPE(HW_BREAKPOINT_X); 846 break; 847 default: 848 return -EINVAL; 849 } 850 } 851 852 #undef CHECK_SET_TYPE 853 854 if (!attr->bp_type) /* Default */ 855 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 856 857 return 0; 858 } 859 860 int parse_events_add_breakpoint(struct list_head *list, int *idx, 861 u64 addr, char *type, u64 len) 862 { 863 struct perf_event_attr attr; 864 865 memset(&attr, 0, sizeof(attr)); 866 attr.bp_addr = addr; 867 868 if (parse_breakpoint_type(type, &attr)) 869 return -EINVAL; 870 871 /* Provide some defaults if len is not specified */ 872 if (!len) { 873 if (attr.bp_type == HW_BREAKPOINT_X) 874 len = sizeof(long); 875 else 876 len = HW_BREAKPOINT_LEN_4; 877 } 878 879 attr.bp_len = len; 880 881 attr.type = PERF_TYPE_BREAKPOINT; 882 attr.sample_period = 1; 883 884 return add_event(list, idx, &attr, /*name=*/NULL, /*mertic_id=*/NULL, 885 /*config_terms=*/NULL); 886 } 887 888 static int check_type_val(struct parse_events_term *term, 889 struct parse_events_error *err, 890 int type) 891 { 892 if (type == term->type_val) 893 return 0; 894 895 if (err) { 896 parse_events_error__handle(err, term->err_val, 897 type == PARSE_EVENTS__TERM_TYPE_NUM 898 ? strdup("expected numeric value") 899 : strdup("expected string value"), 900 NULL); 901 } 902 return -EINVAL; 903 } 904 905 /* 906 * Update according to parse-events.l 907 */ 908 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { 909 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>", 910 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config", 911 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1", 912 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2", 913 [PARSE_EVENTS__TERM_TYPE_NAME] = "name", 914 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period", 915 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq", 916 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type", 917 [PARSE_EVENTS__TERM_TYPE_TIME] = "time", 918 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph", 919 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size", 920 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", 921 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", 922 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack", 923 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr", 924 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite", 925 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite", 926 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config", 927 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore", 928 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output", 929 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size", 930 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id", 931 }; 932 933 static bool config_term_shrinked; 934 935 static bool 936 config_term_avail(int term_type, struct parse_events_error *err) 937 { 938 char *err_str; 939 940 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) { 941 parse_events_error__handle(err, -1, 942 strdup("Invalid term_type"), NULL); 943 return false; 944 } 945 if (!config_term_shrinked) 946 return true; 947 948 switch (term_type) { 949 case PARSE_EVENTS__TERM_TYPE_CONFIG: 950 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 951 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 952 case PARSE_EVENTS__TERM_TYPE_NAME: 953 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 954 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 955 case PARSE_EVENTS__TERM_TYPE_PERCORE: 956 return true; 957 default: 958 if (!err) 959 return false; 960 961 /* term_type is validated so indexing is safe */ 962 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'", 963 config_term_names[term_type]) >= 0) 964 parse_events_error__handle(err, -1, err_str, NULL); 965 return false; 966 } 967 } 968 969 void parse_events__shrink_config_terms(void) 970 { 971 config_term_shrinked = true; 972 } 973 974 static int config_term_common(struct perf_event_attr *attr, 975 struct parse_events_term *term, 976 struct parse_events_error *err) 977 { 978 #define CHECK_TYPE_VAL(type) \ 979 do { \ 980 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \ 981 return -EINVAL; \ 982 } while (0) 983 984 switch (term->type_term) { 985 case PARSE_EVENTS__TERM_TYPE_CONFIG: 986 CHECK_TYPE_VAL(NUM); 987 attr->config = term->val.num; 988 break; 989 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 990 CHECK_TYPE_VAL(NUM); 991 attr->config1 = term->val.num; 992 break; 993 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 994 CHECK_TYPE_VAL(NUM); 995 attr->config2 = term->val.num; 996 break; 997 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 998 CHECK_TYPE_VAL(NUM); 999 break; 1000 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1001 CHECK_TYPE_VAL(NUM); 1002 break; 1003 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1004 CHECK_TYPE_VAL(STR); 1005 if (strcmp(term->val.str, "no") && 1006 parse_branch_str(term->val.str, 1007 &attr->branch_sample_type)) { 1008 parse_events_error__handle(err, term->err_val, 1009 strdup("invalid branch sample type"), 1010 NULL); 1011 return -EINVAL; 1012 } 1013 break; 1014 case PARSE_EVENTS__TERM_TYPE_TIME: 1015 CHECK_TYPE_VAL(NUM); 1016 if (term->val.num > 1) { 1017 parse_events_error__handle(err, term->err_val, 1018 strdup("expected 0 or 1"), 1019 NULL); 1020 return -EINVAL; 1021 } 1022 break; 1023 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1024 CHECK_TYPE_VAL(STR); 1025 break; 1026 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1027 CHECK_TYPE_VAL(NUM); 1028 break; 1029 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1030 CHECK_TYPE_VAL(NUM); 1031 break; 1032 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1033 CHECK_TYPE_VAL(NUM); 1034 break; 1035 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1036 CHECK_TYPE_VAL(NUM); 1037 break; 1038 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1039 CHECK_TYPE_VAL(NUM); 1040 break; 1041 case PARSE_EVENTS__TERM_TYPE_NAME: 1042 CHECK_TYPE_VAL(STR); 1043 break; 1044 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1045 CHECK_TYPE_VAL(STR); 1046 break; 1047 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1048 CHECK_TYPE_VAL(NUM); 1049 break; 1050 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1051 CHECK_TYPE_VAL(NUM); 1052 break; 1053 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1054 CHECK_TYPE_VAL(NUM); 1055 if ((unsigned int)term->val.num > 1) { 1056 parse_events_error__handle(err, term->err_val, 1057 strdup("expected 0 or 1"), 1058 NULL); 1059 return -EINVAL; 1060 } 1061 break; 1062 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1063 CHECK_TYPE_VAL(NUM); 1064 break; 1065 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1066 CHECK_TYPE_VAL(NUM); 1067 if (term->val.num > UINT_MAX) { 1068 parse_events_error__handle(err, term->err_val, 1069 strdup("too big"), 1070 NULL); 1071 return -EINVAL; 1072 } 1073 break; 1074 default: 1075 parse_events_error__handle(err, term->err_term, 1076 strdup("unknown term"), 1077 parse_events_formats_error_string(NULL)); 1078 return -EINVAL; 1079 } 1080 1081 /* 1082 * Check term availability after basic checking so 1083 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered. 1084 * 1085 * If check availability at the entry of this function, 1086 * user will see "'<sysfs term>' is not usable in 'perf stat'" 1087 * if an invalid config term is provided for legacy events 1088 * (for example, instructions/badterm/...), which is confusing. 1089 */ 1090 if (!config_term_avail(term->type_term, err)) 1091 return -EINVAL; 1092 return 0; 1093 #undef CHECK_TYPE_VAL 1094 } 1095 1096 static int config_term_pmu(struct perf_event_attr *attr, 1097 struct parse_events_term *term, 1098 struct parse_events_error *err) 1099 { 1100 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || 1101 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) 1102 /* 1103 * Always succeed for sysfs terms, as we dont know 1104 * at this point what type they need to have. 1105 */ 1106 return 0; 1107 else 1108 return config_term_common(attr, term, err); 1109 } 1110 1111 static int config_term_tracepoint(struct perf_event_attr *attr, 1112 struct parse_events_term *term, 1113 struct parse_events_error *err) 1114 { 1115 switch (term->type_term) { 1116 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1117 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1118 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1119 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1120 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1121 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1122 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1123 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1124 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1125 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1126 return config_term_common(attr, term, err); 1127 default: 1128 if (err) { 1129 parse_events_error__handle(err, term->err_term, 1130 strdup("unknown term"), 1131 strdup("valid terms: call-graph,stack-size\n")); 1132 } 1133 return -EINVAL; 1134 } 1135 1136 return 0; 1137 } 1138 1139 static int config_attr(struct perf_event_attr *attr, 1140 struct list_head *head, 1141 struct parse_events_error *err, 1142 config_term_func_t config_term) 1143 { 1144 struct parse_events_term *term; 1145 1146 list_for_each_entry(term, head, list) 1147 if (config_term(attr, term, err)) 1148 return -EINVAL; 1149 1150 return 0; 1151 } 1152 1153 static int get_config_terms(struct list_head *head_config, 1154 struct list_head *head_terms __maybe_unused) 1155 { 1156 #define ADD_CONFIG_TERM(__type, __weak) \ 1157 struct evsel_config_term *__t; \ 1158 \ 1159 __t = zalloc(sizeof(*__t)); \ 1160 if (!__t) \ 1161 return -ENOMEM; \ 1162 \ 1163 INIT_LIST_HEAD(&__t->list); \ 1164 __t->type = EVSEL__CONFIG_TERM_ ## __type; \ 1165 __t->weak = __weak; \ 1166 list_add_tail(&__t->list, head_terms) 1167 1168 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \ 1169 do { \ 1170 ADD_CONFIG_TERM(__type, __weak); \ 1171 __t->val.__name = __val; \ 1172 } while (0) 1173 1174 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \ 1175 do { \ 1176 ADD_CONFIG_TERM(__type, __weak); \ 1177 __t->val.str = strdup(__val); \ 1178 if (!__t->val.str) { \ 1179 zfree(&__t); \ 1180 return -ENOMEM; \ 1181 } \ 1182 __t->free_str = true; \ 1183 } while (0) 1184 1185 struct parse_events_term *term; 1186 1187 list_for_each_entry(term, head_config, list) { 1188 switch (term->type_term) { 1189 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1190 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak); 1191 break; 1192 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1193 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak); 1194 break; 1195 case PARSE_EVENTS__TERM_TYPE_TIME: 1196 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak); 1197 break; 1198 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1199 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak); 1200 break; 1201 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1202 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak); 1203 break; 1204 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1205 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user, 1206 term->val.num, term->weak); 1207 break; 1208 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1209 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1210 term->val.num ? 1 : 0, term->weak); 1211 break; 1212 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1213 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1214 term->val.num ? 0 : 1, term->weak); 1215 break; 1216 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1217 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack, 1218 term->val.num, term->weak); 1219 break; 1220 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1221 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events, 1222 term->val.num, term->weak); 1223 break; 1224 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1225 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1226 term->val.num ? 1 : 0, term->weak); 1227 break; 1228 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1229 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1230 term->val.num ? 0 : 1, term->weak); 1231 break; 1232 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1233 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak); 1234 break; 1235 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1236 ADD_CONFIG_TERM_VAL(PERCORE, percore, 1237 term->val.num ? true : false, term->weak); 1238 break; 1239 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1240 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output, 1241 term->val.num ? 1 : 0, term->weak); 1242 break; 1243 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1244 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size, 1245 term->val.num, term->weak); 1246 break; 1247 default: 1248 break; 1249 } 1250 } 1251 return 0; 1252 } 1253 1254 /* 1255 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for 1256 * each bit of attr->config that the user has changed. 1257 */ 1258 static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config, 1259 struct list_head *head_terms) 1260 { 1261 struct parse_events_term *term; 1262 u64 bits = 0; 1263 int type; 1264 1265 list_for_each_entry(term, head_config, list) { 1266 switch (term->type_term) { 1267 case PARSE_EVENTS__TERM_TYPE_USER: 1268 type = perf_pmu__format_type(&pmu->format, term->config); 1269 if (type != PERF_PMU_FORMAT_VALUE_CONFIG) 1270 continue; 1271 bits |= perf_pmu__format_bits(&pmu->format, term->config); 1272 break; 1273 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1274 bits = ~(u64)0; 1275 break; 1276 default: 1277 break; 1278 } 1279 } 1280 1281 if (bits) 1282 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false); 1283 1284 #undef ADD_CONFIG_TERM 1285 return 0; 1286 } 1287 1288 int parse_events_add_tracepoint(struct list_head *list, int *idx, 1289 const char *sys, const char *event, 1290 struct parse_events_error *err, 1291 struct list_head *head_config) 1292 { 1293 if (head_config) { 1294 struct perf_event_attr attr; 1295 1296 if (config_attr(&attr, head_config, err, 1297 config_term_tracepoint)) 1298 return -EINVAL; 1299 } 1300 1301 if (strpbrk(sys, "*?")) 1302 return add_tracepoint_multi_sys(list, idx, sys, event, 1303 err, head_config); 1304 else 1305 return add_tracepoint_event(list, idx, sys, event, 1306 err, head_config); 1307 } 1308 1309 int parse_events_add_numeric(struct parse_events_state *parse_state, 1310 struct list_head *list, 1311 u32 type, u64 config, 1312 struct list_head *head_config) 1313 { 1314 struct perf_event_attr attr; 1315 LIST_HEAD(config_terms); 1316 const char *name, *metric_id; 1317 bool hybrid; 1318 int ret; 1319 1320 memset(&attr, 0, sizeof(attr)); 1321 attr.type = type; 1322 attr.config = config; 1323 1324 if (head_config) { 1325 if (config_attr(&attr, head_config, parse_state->error, 1326 config_term_common)) 1327 return -EINVAL; 1328 1329 if (get_config_terms(head_config, &config_terms)) 1330 return -ENOMEM; 1331 } 1332 1333 name = get_config_name(head_config); 1334 metric_id = get_config_metric_id(head_config); 1335 ret = parse_events__add_numeric_hybrid(parse_state, list, &attr, 1336 name, metric_id, 1337 &config_terms, &hybrid); 1338 if (hybrid) 1339 goto out_free_terms; 1340 1341 ret = add_event(list, &parse_state->idx, &attr, name, metric_id, 1342 &config_terms); 1343 out_free_terms: 1344 free_config_terms(&config_terms); 1345 return ret; 1346 } 1347 1348 int parse_events_add_tool(struct parse_events_state *parse_state, 1349 struct list_head *list, 1350 int tool_event) 1351 { 1352 return add_event_tool(list, &parse_state->idx, tool_event); 1353 } 1354 1355 static bool config_term_percore(struct list_head *config_terms) 1356 { 1357 struct evsel_config_term *term; 1358 1359 list_for_each_entry(term, config_terms, list) { 1360 if (term->type == EVSEL__CONFIG_TERM_PERCORE) 1361 return term->val.percore; 1362 } 1363 1364 return false; 1365 } 1366 1367 static int parse_events__inside_hybrid_pmu(struct parse_events_state *parse_state, 1368 struct list_head *list, char *name, 1369 struct list_head *head_config) 1370 { 1371 struct parse_events_term *term; 1372 int ret = -1; 1373 1374 if (parse_state->fake_pmu || !head_config || list_empty(head_config) || 1375 !perf_pmu__is_hybrid(name)) { 1376 return -1; 1377 } 1378 1379 /* 1380 * More than one term in list. 1381 */ 1382 if (head_config->next && head_config->next->next != head_config) 1383 return -1; 1384 1385 term = list_first_entry(head_config, struct parse_events_term, list); 1386 if (term && term->config && strcmp(term->config, "event")) { 1387 ret = parse_events__with_hybrid_pmu(parse_state, term->config, 1388 name, list); 1389 } 1390 1391 return ret; 1392 } 1393 1394 int parse_events_add_pmu(struct parse_events_state *parse_state, 1395 struct list_head *list, char *name, 1396 struct list_head *head_config, 1397 bool auto_merge_stats, 1398 bool use_alias) 1399 { 1400 struct perf_event_attr attr; 1401 struct perf_pmu_info info; 1402 struct perf_pmu *pmu; 1403 struct evsel *evsel; 1404 struct parse_events_error *err = parse_state->error; 1405 bool use_uncore_alias; 1406 LIST_HEAD(config_terms); 1407 1408 pmu = parse_state->fake_pmu ?: perf_pmu__find(name); 1409 1410 if (verbose > 1 && !(pmu && pmu->selectable)) { 1411 fprintf(stderr, "Attempting to add event pmu '%s' with '", 1412 name); 1413 if (head_config) { 1414 struct parse_events_term *term; 1415 1416 list_for_each_entry(term, head_config, list) { 1417 fprintf(stderr, "%s,", term->config); 1418 } 1419 } 1420 fprintf(stderr, "' that may result in non-fatal errors\n"); 1421 } 1422 1423 if (!pmu) { 1424 char *err_str; 1425 1426 if (asprintf(&err_str, 1427 "Cannot find PMU `%s'. Missing kernel support?", 1428 name) >= 0) 1429 parse_events_error__handle(err, 0, err_str, NULL); 1430 return -EINVAL; 1431 } 1432 1433 if (pmu->default_config) { 1434 memcpy(&attr, pmu->default_config, 1435 sizeof(struct perf_event_attr)); 1436 } else { 1437 memset(&attr, 0, sizeof(attr)); 1438 } 1439 1440 use_uncore_alias = (pmu->is_uncore && use_alias); 1441 1442 if (!head_config) { 1443 attr.type = pmu->type; 1444 evsel = __add_event(list, &parse_state->idx, &attr, 1445 /*init_attr=*/true, /*name=*/NULL, 1446 /*metric_id=*/NULL, pmu, 1447 /*config_terms=*/NULL, auto_merge_stats, 1448 /*cpu_list=*/NULL); 1449 if (evsel) { 1450 evsel->pmu_name = name ? strdup(name) : NULL; 1451 evsel->use_uncore_alias = use_uncore_alias; 1452 return 0; 1453 } else { 1454 return -ENOMEM; 1455 } 1456 } 1457 1458 if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info)) 1459 return -EINVAL; 1460 1461 if (verbose > 1) { 1462 fprintf(stderr, "After aliases, add event pmu '%s' with '", 1463 name); 1464 if (head_config) { 1465 struct parse_events_term *term; 1466 1467 list_for_each_entry(term, head_config, list) { 1468 fprintf(stderr, "%s,", term->config); 1469 } 1470 } 1471 fprintf(stderr, "' that may result in non-fatal errors\n"); 1472 } 1473 1474 /* 1475 * Configure hardcoded terms first, no need to check 1476 * return value when called with fail == 0 ;) 1477 */ 1478 if (config_attr(&attr, head_config, parse_state->error, config_term_pmu)) 1479 return -EINVAL; 1480 1481 if (get_config_terms(head_config, &config_terms)) 1482 return -ENOMEM; 1483 1484 /* 1485 * When using default config, record which bits of attr->config were 1486 * changed by the user. 1487 */ 1488 if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms)) 1489 return -ENOMEM; 1490 1491 if (!parse_events__inside_hybrid_pmu(parse_state, list, name, 1492 head_config)) { 1493 return 0; 1494 } 1495 1496 if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) { 1497 free_config_terms(&config_terms); 1498 return -EINVAL; 1499 } 1500 1501 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true, 1502 get_config_name(head_config), 1503 get_config_metric_id(head_config), pmu, 1504 &config_terms, auto_merge_stats, /*cpu_list=*/NULL); 1505 if (!evsel) 1506 return -ENOMEM; 1507 1508 if (evsel->name) 1509 evsel->use_config_name = true; 1510 1511 evsel->pmu_name = name ? strdup(name) : NULL; 1512 evsel->use_uncore_alias = use_uncore_alias; 1513 evsel->percore = config_term_percore(&evsel->config_terms); 1514 1515 if (parse_state->fake_pmu) 1516 return 0; 1517 1518 free((char *)evsel->unit); 1519 evsel->unit = strdup(info.unit); 1520 evsel->scale = info.scale; 1521 evsel->per_pkg = info.per_pkg; 1522 evsel->snapshot = info.snapshot; 1523 evsel->metric_expr = info.metric_expr; 1524 evsel->metric_name = info.metric_name; 1525 return 0; 1526 } 1527 1528 int parse_events_multi_pmu_add(struct parse_events_state *parse_state, 1529 char *str, struct list_head *head, 1530 struct list_head **listp) 1531 { 1532 struct parse_events_term *term; 1533 struct list_head *list = NULL; 1534 struct list_head *orig_head = NULL; 1535 struct perf_pmu *pmu = NULL; 1536 int ok = 0; 1537 char *config; 1538 1539 *listp = NULL; 1540 1541 if (!head) { 1542 head = malloc(sizeof(struct list_head)); 1543 if (!head) 1544 goto out_err; 1545 1546 INIT_LIST_HEAD(head); 1547 } 1548 config = strdup(str); 1549 if (!config) 1550 goto out_err; 1551 1552 if (parse_events_term__num(&term, 1553 PARSE_EVENTS__TERM_TYPE_USER, 1554 config, 1, false, &config, 1555 NULL) < 0) { 1556 free(config); 1557 goto out_err; 1558 } 1559 list_add_tail(&term->list, head); 1560 1561 /* Add it for all PMUs that support the alias */ 1562 list = malloc(sizeof(struct list_head)); 1563 if (!list) 1564 goto out_err; 1565 1566 INIT_LIST_HEAD(list); 1567 1568 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 1569 struct perf_pmu_alias *alias; 1570 1571 list_for_each_entry(alias, &pmu->aliases, list) { 1572 if (!strcasecmp(alias->name, str)) { 1573 parse_events_copy_term_list(head, &orig_head); 1574 if (!parse_events_add_pmu(parse_state, list, 1575 pmu->name, orig_head, 1576 true, true)) { 1577 pr_debug("%s -> %s/%s/\n", str, 1578 pmu->name, alias->str); 1579 ok++; 1580 } 1581 parse_events_terms__delete(orig_head); 1582 } 1583 } 1584 } 1585 1586 if (parse_state->fake_pmu) { 1587 if (!parse_events_add_pmu(parse_state, list, str, head, 1588 true, true)) { 1589 pr_debug("%s -> %s/%s/\n", str, "fake_pmu", str); 1590 ok++; 1591 } 1592 } 1593 1594 out_err: 1595 if (ok) 1596 *listp = list; 1597 else 1598 free(list); 1599 1600 parse_events_terms__delete(head); 1601 return ok ? 0 : -1; 1602 } 1603 1604 int parse_events__modifier_group(struct list_head *list, 1605 char *event_mod) 1606 { 1607 return parse_events__modifier_event(list, event_mod, true); 1608 } 1609 1610 /* 1611 * Check if the two uncore PMUs are from the same uncore block 1612 * The format of the uncore PMU name is uncore_#blockname_#pmuidx 1613 */ 1614 static bool is_same_uncore_block(const char *pmu_name_a, const char *pmu_name_b) 1615 { 1616 char *end_a, *end_b; 1617 1618 end_a = strrchr(pmu_name_a, '_'); 1619 end_b = strrchr(pmu_name_b, '_'); 1620 1621 if (!end_a || !end_b) 1622 return false; 1623 1624 if ((end_a - pmu_name_a) != (end_b - pmu_name_b)) 1625 return false; 1626 1627 return (strncmp(pmu_name_a, pmu_name_b, end_a - pmu_name_a) == 0); 1628 } 1629 1630 static int 1631 parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list, 1632 struct parse_events_state *parse_state) 1633 { 1634 struct evsel *evsel, *leader; 1635 uintptr_t *leaders; 1636 bool is_leader = true; 1637 int i, nr_pmu = 0, total_members, ret = 0; 1638 1639 leader = list_first_entry(list, struct evsel, core.node); 1640 evsel = list_last_entry(list, struct evsel, core.node); 1641 total_members = evsel->core.idx - leader->core.idx + 1; 1642 1643 leaders = calloc(total_members, sizeof(uintptr_t)); 1644 if (WARN_ON(!leaders)) 1645 return 0; 1646 1647 /* 1648 * Going through the whole group and doing sanity check. 1649 * All members must use alias, and be from the same uncore block. 1650 * Also, storing the leader events in an array. 1651 */ 1652 __evlist__for_each_entry(list, evsel) { 1653 1654 /* Only split the uncore group which members use alias */ 1655 if (!evsel->use_uncore_alias) 1656 goto out; 1657 1658 /* The events must be from the same uncore block */ 1659 if (!is_same_uncore_block(leader->pmu_name, evsel->pmu_name)) 1660 goto out; 1661 1662 if (!is_leader) 1663 continue; 1664 /* 1665 * If the event's PMU name starts to repeat, it must be a new 1666 * event. That can be used to distinguish the leader from 1667 * other members, even they have the same event name. 1668 */ 1669 if ((leader != evsel) && 1670 !strcmp(leader->pmu_name, evsel->pmu_name)) { 1671 is_leader = false; 1672 continue; 1673 } 1674 1675 /* Store the leader event for each PMU */ 1676 leaders[nr_pmu++] = (uintptr_t) evsel; 1677 } 1678 1679 /* only one event alias */ 1680 if (nr_pmu == total_members) { 1681 parse_state->nr_groups--; 1682 goto handled; 1683 } 1684 1685 /* 1686 * An uncore event alias is a joint name which means the same event 1687 * runs on all PMUs of a block. 1688 * Perf doesn't support mixed events from different PMUs in the same 1689 * group. The big group has to be split into multiple small groups 1690 * which only include the events from the same PMU. 1691 * 1692 * Here the uncore event aliases must be from the same uncore block. 1693 * The number of PMUs must be same for each alias. The number of new 1694 * small groups equals to the number of PMUs. 1695 * Setting the leader event for corresponding members in each group. 1696 */ 1697 i = 0; 1698 __evlist__for_each_entry(list, evsel) { 1699 if (i >= nr_pmu) 1700 i = 0; 1701 evsel__set_leader(evsel, (struct evsel *) leaders[i++]); 1702 } 1703 1704 /* The number of members and group name are same for each group */ 1705 for (i = 0; i < nr_pmu; i++) { 1706 evsel = (struct evsel *) leaders[i]; 1707 evsel->core.nr_members = total_members / nr_pmu; 1708 evsel->group_name = name ? strdup(name) : NULL; 1709 } 1710 1711 /* Take the new small groups into account */ 1712 parse_state->nr_groups += nr_pmu - 1; 1713 1714 handled: 1715 ret = 1; 1716 out: 1717 free(leaders); 1718 return ret; 1719 } 1720 1721 __weak struct evsel *arch_evlist__leader(struct list_head *list) 1722 { 1723 return list_first_entry(list, struct evsel, core.node); 1724 } 1725 1726 void parse_events__set_leader(char *name, struct list_head *list, 1727 struct parse_events_state *parse_state) 1728 { 1729 struct evsel *leader; 1730 1731 if (list_empty(list)) { 1732 WARN_ONCE(true, "WARNING: failed to set leader: empty list"); 1733 return; 1734 } 1735 1736 if (parse_events__set_leader_for_uncore_aliase(name, list, parse_state)) 1737 return; 1738 1739 leader = arch_evlist__leader(list); 1740 __perf_evlist__set_leader(list, &leader->core); 1741 leader->group_name = name ? strdup(name) : NULL; 1742 list_move(&leader->core.node, list); 1743 } 1744 1745 /* list_event is assumed to point to malloc'ed memory */ 1746 void parse_events_update_lists(struct list_head *list_event, 1747 struct list_head *list_all) 1748 { 1749 /* 1750 * Called for single event definition. Update the 1751 * 'all event' list, and reinit the 'single event' 1752 * list, for next event definition. 1753 */ 1754 list_splice_tail(list_event, list_all); 1755 free(list_event); 1756 } 1757 1758 struct event_modifier { 1759 int eu; 1760 int ek; 1761 int eh; 1762 int eH; 1763 int eG; 1764 int eI; 1765 int precise; 1766 int precise_max; 1767 int exclude_GH; 1768 int sample_read; 1769 int pinned; 1770 int weak; 1771 int exclusive; 1772 int bpf_counter; 1773 }; 1774 1775 static int get_event_modifier(struct event_modifier *mod, char *str, 1776 struct evsel *evsel) 1777 { 1778 int eu = evsel ? evsel->core.attr.exclude_user : 0; 1779 int ek = evsel ? evsel->core.attr.exclude_kernel : 0; 1780 int eh = evsel ? evsel->core.attr.exclude_hv : 0; 1781 int eH = evsel ? evsel->core.attr.exclude_host : 0; 1782 int eG = evsel ? evsel->core.attr.exclude_guest : 0; 1783 int eI = evsel ? evsel->core.attr.exclude_idle : 0; 1784 int precise = evsel ? evsel->core.attr.precise_ip : 0; 1785 int precise_max = 0; 1786 int sample_read = 0; 1787 int pinned = evsel ? evsel->core.attr.pinned : 0; 1788 int exclusive = evsel ? evsel->core.attr.exclusive : 0; 1789 1790 int exclude = eu | ek | eh; 1791 int exclude_GH = evsel ? evsel->exclude_GH : 0; 1792 int weak = 0; 1793 int bpf_counter = 0; 1794 1795 memset(mod, 0, sizeof(*mod)); 1796 1797 while (*str) { 1798 if (*str == 'u') { 1799 if (!exclude) 1800 exclude = eu = ek = eh = 1; 1801 if (!exclude_GH && !perf_guest) 1802 eG = 1; 1803 eu = 0; 1804 } else if (*str == 'k') { 1805 if (!exclude) 1806 exclude = eu = ek = eh = 1; 1807 ek = 0; 1808 } else if (*str == 'h') { 1809 if (!exclude) 1810 exclude = eu = ek = eh = 1; 1811 eh = 0; 1812 } else if (*str == 'G') { 1813 if (!exclude_GH) 1814 exclude_GH = eG = eH = 1; 1815 eG = 0; 1816 } else if (*str == 'H') { 1817 if (!exclude_GH) 1818 exclude_GH = eG = eH = 1; 1819 eH = 0; 1820 } else if (*str == 'I') { 1821 eI = 1; 1822 } else if (*str == 'p') { 1823 precise++; 1824 /* use of precise requires exclude_guest */ 1825 if (!exclude_GH) 1826 eG = 1; 1827 } else if (*str == 'P') { 1828 precise_max = 1; 1829 } else if (*str == 'S') { 1830 sample_read = 1; 1831 } else if (*str == 'D') { 1832 pinned = 1; 1833 } else if (*str == 'e') { 1834 exclusive = 1; 1835 } else if (*str == 'W') { 1836 weak = 1; 1837 } else if (*str == 'b') { 1838 bpf_counter = 1; 1839 } else 1840 break; 1841 1842 ++str; 1843 } 1844 1845 /* 1846 * precise ip: 1847 * 1848 * 0 - SAMPLE_IP can have arbitrary skid 1849 * 1 - SAMPLE_IP must have constant skid 1850 * 2 - SAMPLE_IP requested to have 0 skid 1851 * 3 - SAMPLE_IP must have 0 skid 1852 * 1853 * See also PERF_RECORD_MISC_EXACT_IP 1854 */ 1855 if (precise > 3) 1856 return -EINVAL; 1857 1858 mod->eu = eu; 1859 mod->ek = ek; 1860 mod->eh = eh; 1861 mod->eH = eH; 1862 mod->eG = eG; 1863 mod->eI = eI; 1864 mod->precise = precise; 1865 mod->precise_max = precise_max; 1866 mod->exclude_GH = exclude_GH; 1867 mod->sample_read = sample_read; 1868 mod->pinned = pinned; 1869 mod->weak = weak; 1870 mod->bpf_counter = bpf_counter; 1871 mod->exclusive = exclusive; 1872 1873 return 0; 1874 } 1875 1876 /* 1877 * Basic modifier sanity check to validate it contains only one 1878 * instance of any modifier (apart from 'p') present. 1879 */ 1880 static int check_modifier(char *str) 1881 { 1882 char *p = str; 1883 1884 /* The sizeof includes 0 byte as well. */ 1885 if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1)) 1886 return -1; 1887 1888 while (*p) { 1889 if (*p != 'p' && strchr(p + 1, *p)) 1890 return -1; 1891 p++; 1892 } 1893 1894 return 0; 1895 } 1896 1897 int parse_events__modifier_event(struct list_head *list, char *str, bool add) 1898 { 1899 struct evsel *evsel; 1900 struct event_modifier mod; 1901 1902 if (str == NULL) 1903 return 0; 1904 1905 if (check_modifier(str)) 1906 return -EINVAL; 1907 1908 if (!add && get_event_modifier(&mod, str, NULL)) 1909 return -EINVAL; 1910 1911 __evlist__for_each_entry(list, evsel) { 1912 if (add && get_event_modifier(&mod, str, evsel)) 1913 return -EINVAL; 1914 1915 evsel->core.attr.exclude_user = mod.eu; 1916 evsel->core.attr.exclude_kernel = mod.ek; 1917 evsel->core.attr.exclude_hv = mod.eh; 1918 evsel->core.attr.precise_ip = mod.precise; 1919 evsel->core.attr.exclude_host = mod.eH; 1920 evsel->core.attr.exclude_guest = mod.eG; 1921 evsel->core.attr.exclude_idle = mod.eI; 1922 evsel->exclude_GH = mod.exclude_GH; 1923 evsel->sample_read = mod.sample_read; 1924 evsel->precise_max = mod.precise_max; 1925 evsel->weak_group = mod.weak; 1926 evsel->bpf_counter = mod.bpf_counter; 1927 1928 if (evsel__is_group_leader(evsel)) { 1929 evsel->core.attr.pinned = mod.pinned; 1930 evsel->core.attr.exclusive = mod.exclusive; 1931 } 1932 } 1933 1934 return 0; 1935 } 1936 1937 int parse_events_name(struct list_head *list, const char *name) 1938 { 1939 struct evsel *evsel; 1940 1941 __evlist__for_each_entry(list, evsel) { 1942 if (!evsel->name) 1943 evsel->name = strdup(name); 1944 } 1945 1946 return 0; 1947 } 1948 1949 static int 1950 comp_pmu(const void *p1, const void *p2) 1951 { 1952 struct perf_pmu_event_symbol *pmu1 = (struct perf_pmu_event_symbol *) p1; 1953 struct perf_pmu_event_symbol *pmu2 = (struct perf_pmu_event_symbol *) p2; 1954 1955 return strcasecmp(pmu1->symbol, pmu2->symbol); 1956 } 1957 1958 static void perf_pmu__parse_cleanup(void) 1959 { 1960 if (perf_pmu_events_list_num > 0) { 1961 struct perf_pmu_event_symbol *p; 1962 int i; 1963 1964 for (i = 0; i < perf_pmu_events_list_num; i++) { 1965 p = perf_pmu_events_list + i; 1966 zfree(&p->symbol); 1967 } 1968 zfree(&perf_pmu_events_list); 1969 perf_pmu_events_list_num = 0; 1970 } 1971 } 1972 1973 #define SET_SYMBOL(str, stype) \ 1974 do { \ 1975 p->symbol = str; \ 1976 if (!p->symbol) \ 1977 goto err; \ 1978 p->type = stype; \ 1979 } while (0) 1980 1981 /* 1982 * Read the pmu events list from sysfs 1983 * Save it into perf_pmu_events_list 1984 */ 1985 static void perf_pmu__parse_init(void) 1986 { 1987 1988 struct perf_pmu *pmu = NULL; 1989 struct perf_pmu_alias *alias; 1990 int len = 0; 1991 1992 pmu = NULL; 1993 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 1994 list_for_each_entry(alias, &pmu->aliases, list) { 1995 char *tmp = strchr(alias->name, '-'); 1996 1997 if (tmp) { 1998 char *tmp2 = NULL; 1999 2000 tmp2 = strchr(tmp + 1, '-'); 2001 len++; 2002 if (tmp2) 2003 len++; 2004 } 2005 2006 len++; 2007 } 2008 } 2009 2010 if (len == 0) { 2011 perf_pmu_events_list_num = -1; 2012 return; 2013 } 2014 perf_pmu_events_list = malloc(sizeof(struct perf_pmu_event_symbol) * len); 2015 if (!perf_pmu_events_list) 2016 return; 2017 perf_pmu_events_list_num = len; 2018 2019 len = 0; 2020 pmu = NULL; 2021 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 2022 list_for_each_entry(alias, &pmu->aliases, list) { 2023 struct perf_pmu_event_symbol *p = perf_pmu_events_list + len; 2024 char *tmp = strchr(alias->name, '-'); 2025 char *tmp2 = NULL; 2026 2027 if (tmp) 2028 tmp2 = strchr(tmp + 1, '-'); 2029 if (tmp2) { 2030 SET_SYMBOL(strndup(alias->name, tmp - alias->name), 2031 PMU_EVENT_SYMBOL_PREFIX); 2032 p++; 2033 tmp++; 2034 SET_SYMBOL(strndup(tmp, tmp2 - tmp), PMU_EVENT_SYMBOL_SUFFIX); 2035 p++; 2036 SET_SYMBOL(strdup(++tmp2), PMU_EVENT_SYMBOL_SUFFIX2); 2037 len += 3; 2038 } else if (tmp) { 2039 SET_SYMBOL(strndup(alias->name, tmp - alias->name), 2040 PMU_EVENT_SYMBOL_PREFIX); 2041 p++; 2042 SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX); 2043 len += 2; 2044 } else { 2045 SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL); 2046 len++; 2047 } 2048 } 2049 } 2050 qsort(perf_pmu_events_list, len, 2051 sizeof(struct perf_pmu_event_symbol), comp_pmu); 2052 2053 return; 2054 err: 2055 perf_pmu__parse_cleanup(); 2056 } 2057 2058 /* 2059 * This function injects special term in 2060 * perf_pmu_events_list so the test code 2061 * can check on this functionality. 2062 */ 2063 int perf_pmu__test_parse_init(void) 2064 { 2065 struct perf_pmu_event_symbol *list, *tmp, symbols[] = { 2066 {(char *)"read", PMU_EVENT_SYMBOL}, 2067 {(char *)"event", PMU_EVENT_SYMBOL_PREFIX}, 2068 {(char *)"two", PMU_EVENT_SYMBOL_SUFFIX}, 2069 {(char *)"hyphen", PMU_EVENT_SYMBOL_SUFFIX}, 2070 {(char *)"hyph", PMU_EVENT_SYMBOL_SUFFIX2}, 2071 }; 2072 unsigned long i, j; 2073 2074 tmp = list = malloc(sizeof(*list) * ARRAY_SIZE(symbols)); 2075 if (!list) 2076 return -ENOMEM; 2077 2078 for (i = 0; i < ARRAY_SIZE(symbols); i++, tmp++) { 2079 tmp->type = symbols[i].type; 2080 tmp->symbol = strdup(symbols[i].symbol); 2081 if (!tmp->symbol) 2082 goto err_free; 2083 } 2084 2085 perf_pmu_events_list = list; 2086 perf_pmu_events_list_num = ARRAY_SIZE(symbols); 2087 2088 qsort(perf_pmu_events_list, ARRAY_SIZE(symbols), 2089 sizeof(struct perf_pmu_event_symbol), comp_pmu); 2090 return 0; 2091 2092 err_free: 2093 for (j = 0, tmp = list; j < i; j++, tmp++) 2094 free(tmp->symbol); 2095 free(list); 2096 return -ENOMEM; 2097 } 2098 2099 enum perf_pmu_event_symbol_type 2100 perf_pmu__parse_check(const char *name) 2101 { 2102 struct perf_pmu_event_symbol p, *r; 2103 2104 /* scan kernel pmu events from sysfs if needed */ 2105 if (perf_pmu_events_list_num == 0) 2106 perf_pmu__parse_init(); 2107 /* 2108 * name "cpu" could be prefix of cpu-cycles or cpu// events. 2109 * cpu-cycles has been handled by hardcode. 2110 * So it must be cpu// events, not kernel pmu event. 2111 */ 2112 if ((perf_pmu_events_list_num <= 0) || !strcmp(name, "cpu")) 2113 return PMU_EVENT_SYMBOL_ERR; 2114 2115 p.symbol = strdup(name); 2116 r = bsearch(&p, perf_pmu_events_list, 2117 (size_t) perf_pmu_events_list_num, 2118 sizeof(struct perf_pmu_event_symbol), comp_pmu); 2119 zfree(&p.symbol); 2120 return r ? r->type : PMU_EVENT_SYMBOL_ERR; 2121 } 2122 2123 static int parse_events__scanner(const char *str, 2124 struct parse_events_state *parse_state) 2125 { 2126 YY_BUFFER_STATE buffer; 2127 void *scanner; 2128 int ret; 2129 2130 ret = parse_events_lex_init_extra(parse_state, &scanner); 2131 if (ret) 2132 return ret; 2133 2134 buffer = parse_events__scan_string(str, scanner); 2135 2136 #ifdef PARSER_DEBUG 2137 parse_events_debug = 1; 2138 parse_events_set_debug(1, scanner); 2139 #endif 2140 ret = parse_events_parse(parse_state, scanner); 2141 2142 parse_events__flush_buffer(buffer, scanner); 2143 parse_events__delete_buffer(buffer, scanner); 2144 parse_events_lex_destroy(scanner); 2145 return ret; 2146 } 2147 2148 /* 2149 * parse event config string, return a list of event terms. 2150 */ 2151 int parse_events_terms(struct list_head *terms, const char *str) 2152 { 2153 struct parse_events_state parse_state = { 2154 .terms = NULL, 2155 .stoken = PE_START_TERMS, 2156 }; 2157 int ret; 2158 2159 ret = parse_events__scanner(str, &parse_state); 2160 perf_pmu__parse_cleanup(); 2161 2162 if (!ret) { 2163 list_splice(parse_state.terms, terms); 2164 zfree(&parse_state.terms); 2165 return 0; 2166 } 2167 2168 parse_events_terms__delete(parse_state.terms); 2169 return ret; 2170 } 2171 2172 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state, 2173 const char *str, char *pmu_name, 2174 struct list_head *list) 2175 { 2176 struct parse_events_state ps = { 2177 .list = LIST_HEAD_INIT(ps.list), 2178 .stoken = PE_START_EVENTS, 2179 .hybrid_pmu_name = pmu_name, 2180 .idx = parse_state->idx, 2181 }; 2182 int ret; 2183 2184 ret = parse_events__scanner(str, &ps); 2185 perf_pmu__parse_cleanup(); 2186 2187 if (!ret) { 2188 if (!list_empty(&ps.list)) { 2189 list_splice(&ps.list, list); 2190 parse_state->idx = ps.idx; 2191 return 0; 2192 } else 2193 return -1; 2194 } 2195 2196 return ret; 2197 } 2198 2199 int __parse_events(struct evlist *evlist, const char *str, 2200 struct parse_events_error *err, struct perf_pmu *fake_pmu) 2201 { 2202 struct parse_events_state parse_state = { 2203 .list = LIST_HEAD_INIT(parse_state.list), 2204 .idx = evlist->core.nr_entries, 2205 .error = err, 2206 .evlist = evlist, 2207 .stoken = PE_START_EVENTS, 2208 .fake_pmu = fake_pmu, 2209 }; 2210 int ret; 2211 2212 ret = parse_events__scanner(str, &parse_state); 2213 perf_pmu__parse_cleanup(); 2214 2215 if (!ret && list_empty(&parse_state.list)) { 2216 WARN_ONCE(true, "WARNING: event parser found nothing\n"); 2217 return -1; 2218 } 2219 2220 /* 2221 * Add list to the evlist even with errors to allow callers to clean up. 2222 */ 2223 evlist__splice_list_tail(evlist, &parse_state.list); 2224 2225 if (!ret) { 2226 struct evsel *last; 2227 2228 evlist->core.nr_groups += parse_state.nr_groups; 2229 last = evlist__last(evlist); 2230 last->cmdline_group_boundary = true; 2231 2232 return 0; 2233 } 2234 2235 /* 2236 * There are 2 users - builtin-record and builtin-test objects. 2237 * Both call evlist__delete in case of error, so we dont 2238 * need to bother. 2239 */ 2240 return ret; 2241 } 2242 2243 int parse_event(struct evlist *evlist, const char *str) 2244 { 2245 struct parse_events_error err; 2246 int ret; 2247 2248 parse_events_error__init(&err); 2249 ret = parse_events(evlist, str, &err); 2250 parse_events_error__exit(&err); 2251 return ret; 2252 } 2253 2254 void parse_events_error__init(struct parse_events_error *err) 2255 { 2256 bzero(err, sizeof(*err)); 2257 } 2258 2259 void parse_events_error__exit(struct parse_events_error *err) 2260 { 2261 zfree(&err->str); 2262 zfree(&err->help); 2263 zfree(&err->first_str); 2264 zfree(&err->first_help); 2265 } 2266 2267 void parse_events_error__handle(struct parse_events_error *err, int idx, 2268 char *str, char *help) 2269 { 2270 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) 2271 goto out_free; 2272 switch (err->num_errors) { 2273 case 0: 2274 err->idx = idx; 2275 err->str = str; 2276 err->help = help; 2277 break; 2278 case 1: 2279 err->first_idx = err->idx; 2280 err->idx = idx; 2281 err->first_str = err->str; 2282 err->str = str; 2283 err->first_help = err->help; 2284 err->help = help; 2285 break; 2286 default: 2287 pr_debug("Multiple errors dropping message: %s (%s)\n", 2288 err->str, err->help); 2289 free(err->str); 2290 err->str = str; 2291 free(err->help); 2292 err->help = help; 2293 break; 2294 } 2295 err->num_errors++; 2296 return; 2297 2298 out_free: 2299 free(str); 2300 free(help); 2301 } 2302 2303 #define MAX_WIDTH 1000 2304 static int get_term_width(void) 2305 { 2306 struct winsize ws; 2307 2308 get_term_dimensions(&ws); 2309 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; 2310 } 2311 2312 static void __parse_events_error__print(int err_idx, const char *err_str, 2313 const char *err_help, const char *event) 2314 { 2315 const char *str = "invalid or unsupported event: "; 2316 char _buf[MAX_WIDTH]; 2317 char *buf = (char *) event; 2318 int idx = 0; 2319 if (err_str) { 2320 /* -2 for extra '' in the final fprintf */ 2321 int width = get_term_width() - 2; 2322 int len_event = strlen(event); 2323 int len_str, max_len, cut = 0; 2324 2325 /* 2326 * Maximum error index indent, we will cut 2327 * the event string if it's bigger. 2328 */ 2329 int max_err_idx = 13; 2330 2331 /* 2332 * Let's be specific with the message when 2333 * we have the precise error. 2334 */ 2335 str = "event syntax error: "; 2336 len_str = strlen(str); 2337 max_len = width - len_str; 2338 2339 buf = _buf; 2340 2341 /* We're cutting from the beginning. */ 2342 if (err_idx > max_err_idx) 2343 cut = err_idx - max_err_idx; 2344 2345 strncpy(buf, event + cut, max_len); 2346 2347 /* Mark cut parts with '..' on both sides. */ 2348 if (cut) 2349 buf[0] = buf[1] = '.'; 2350 2351 if ((len_event - cut) > max_len) { 2352 buf[max_len - 1] = buf[max_len - 2] = '.'; 2353 buf[max_len] = 0; 2354 } 2355 2356 idx = len_str + err_idx - cut; 2357 } 2358 2359 fprintf(stderr, "%s'%s'\n", str, buf); 2360 if (idx) { 2361 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str); 2362 if (err_help) 2363 fprintf(stderr, "\n%s\n", err_help); 2364 } 2365 } 2366 2367 void parse_events_error__print(struct parse_events_error *err, 2368 const char *event) 2369 { 2370 if (!err->num_errors) 2371 return; 2372 2373 __parse_events_error__print(err->idx, err->str, err->help, event); 2374 2375 if (err->num_errors > 1) { 2376 fputs("\nInitial error:\n", stderr); 2377 __parse_events_error__print(err->first_idx, err->first_str, 2378 err->first_help, event); 2379 } 2380 } 2381 2382 #undef MAX_WIDTH 2383 2384 int parse_events_option(const struct option *opt, const char *str, 2385 int unset __maybe_unused) 2386 { 2387 struct evlist *evlist = *(struct evlist **)opt->value; 2388 struct parse_events_error err; 2389 int ret; 2390 2391 parse_events_error__init(&err); 2392 ret = parse_events(evlist, str, &err); 2393 2394 if (ret) { 2395 parse_events_error__print(&err, str); 2396 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 2397 } 2398 parse_events_error__exit(&err); 2399 2400 return ret; 2401 } 2402 2403 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset) 2404 { 2405 struct evlist **evlistp = opt->value; 2406 int ret; 2407 2408 if (*evlistp == NULL) { 2409 *evlistp = evlist__new(); 2410 2411 if (*evlistp == NULL) { 2412 fprintf(stderr, "Not enough memory to create evlist\n"); 2413 return -1; 2414 } 2415 } 2416 2417 ret = parse_events_option(opt, str, unset); 2418 if (ret) { 2419 evlist__delete(*evlistp); 2420 *evlistp = NULL; 2421 } 2422 2423 return ret; 2424 } 2425 2426 static int 2427 foreach_evsel_in_last_glob(struct evlist *evlist, 2428 int (*func)(struct evsel *evsel, 2429 const void *arg), 2430 const void *arg) 2431 { 2432 struct evsel *last = NULL; 2433 int err; 2434 2435 /* 2436 * Don't return when list_empty, give func a chance to report 2437 * error when it found last == NULL. 2438 * 2439 * So no need to WARN here, let *func do this. 2440 */ 2441 if (evlist->core.nr_entries > 0) 2442 last = evlist__last(evlist); 2443 2444 do { 2445 err = (*func)(last, arg); 2446 if (err) 2447 return -1; 2448 if (!last) 2449 return 0; 2450 2451 if (last->core.node.prev == &evlist->core.entries) 2452 return 0; 2453 last = list_entry(last->core.node.prev, struct evsel, core.node); 2454 } while (!last->cmdline_group_boundary); 2455 2456 return 0; 2457 } 2458 2459 static int set_filter(struct evsel *evsel, const void *arg) 2460 { 2461 const char *str = arg; 2462 bool found = false; 2463 int nr_addr_filters = 0; 2464 struct perf_pmu *pmu = NULL; 2465 2466 if (evsel == NULL) { 2467 fprintf(stderr, 2468 "--filter option should follow a -e tracepoint or HW tracer option\n"); 2469 return -1; 2470 } 2471 2472 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 2473 if (evsel__append_tp_filter(evsel, str) < 0) { 2474 fprintf(stderr, 2475 "not enough memory to hold filter string\n"); 2476 return -1; 2477 } 2478 2479 return 0; 2480 } 2481 2482 while ((pmu = perf_pmu__scan(pmu)) != NULL) 2483 if (pmu->type == evsel->core.attr.type) { 2484 found = true; 2485 break; 2486 } 2487 2488 if (found) 2489 perf_pmu__scan_file(pmu, "nr_addr_filters", 2490 "%d", &nr_addr_filters); 2491 2492 if (!nr_addr_filters) { 2493 fprintf(stderr, 2494 "This CPU does not support address filtering\n"); 2495 return -1; 2496 } 2497 2498 if (evsel__append_addr_filter(evsel, str) < 0) { 2499 fprintf(stderr, 2500 "not enough memory to hold filter string\n"); 2501 return -1; 2502 } 2503 2504 return 0; 2505 } 2506 2507 int parse_filter(const struct option *opt, const char *str, 2508 int unset __maybe_unused) 2509 { 2510 struct evlist *evlist = *(struct evlist **)opt->value; 2511 2512 return foreach_evsel_in_last_glob(evlist, set_filter, 2513 (const void *)str); 2514 } 2515 2516 static int add_exclude_perf_filter(struct evsel *evsel, 2517 const void *arg __maybe_unused) 2518 { 2519 char new_filter[64]; 2520 2521 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2522 fprintf(stderr, 2523 "--exclude-perf option should follow a -e tracepoint option\n"); 2524 return -1; 2525 } 2526 2527 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid()); 2528 2529 if (evsel__append_tp_filter(evsel, new_filter) < 0) { 2530 fprintf(stderr, 2531 "not enough memory to hold filter string\n"); 2532 return -1; 2533 } 2534 2535 return 0; 2536 } 2537 2538 int exclude_perf(const struct option *opt, 2539 const char *arg __maybe_unused, 2540 int unset __maybe_unused) 2541 { 2542 struct evlist *evlist = *(struct evlist **)opt->value; 2543 2544 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter, 2545 NULL); 2546 } 2547 2548 int parse_events__is_hardcoded_term(struct parse_events_term *term) 2549 { 2550 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 2551 } 2552 2553 static int new_term(struct parse_events_term **_term, 2554 struct parse_events_term *temp, 2555 char *str, u64 num) 2556 { 2557 struct parse_events_term *term; 2558 2559 term = malloc(sizeof(*term)); 2560 if (!term) 2561 return -ENOMEM; 2562 2563 *term = *temp; 2564 INIT_LIST_HEAD(&term->list); 2565 term->weak = false; 2566 2567 switch (term->type_val) { 2568 case PARSE_EVENTS__TERM_TYPE_NUM: 2569 term->val.num = num; 2570 break; 2571 case PARSE_EVENTS__TERM_TYPE_STR: 2572 term->val.str = str; 2573 break; 2574 default: 2575 free(term); 2576 return -EINVAL; 2577 } 2578 2579 *_term = term; 2580 return 0; 2581 } 2582 2583 int parse_events_term__num(struct parse_events_term **term, 2584 int type_term, char *config, u64 num, 2585 bool no_value, 2586 void *loc_term_, void *loc_val_) 2587 { 2588 YYLTYPE *loc_term = loc_term_; 2589 YYLTYPE *loc_val = loc_val_; 2590 2591 struct parse_events_term temp = { 2592 .type_val = PARSE_EVENTS__TERM_TYPE_NUM, 2593 .type_term = type_term, 2594 .config = config ? : strdup(config_term_names[type_term]), 2595 .no_value = no_value, 2596 .err_term = loc_term ? loc_term->first_column : 0, 2597 .err_val = loc_val ? loc_val->first_column : 0, 2598 }; 2599 2600 return new_term(term, &temp, NULL, num); 2601 } 2602 2603 int parse_events_term__str(struct parse_events_term **term, 2604 int type_term, char *config, char *str, 2605 void *loc_term_, void *loc_val_) 2606 { 2607 YYLTYPE *loc_term = loc_term_; 2608 YYLTYPE *loc_val = loc_val_; 2609 2610 struct parse_events_term temp = { 2611 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2612 .type_term = type_term, 2613 .config = config, 2614 .err_term = loc_term ? loc_term->first_column : 0, 2615 .err_val = loc_val ? loc_val->first_column : 0, 2616 }; 2617 2618 return new_term(term, &temp, str, 0); 2619 } 2620 2621 int parse_events_term__sym_hw(struct parse_events_term **term, 2622 char *config, unsigned idx) 2623 { 2624 struct event_symbol *sym; 2625 char *str; 2626 struct parse_events_term temp = { 2627 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2628 .type_term = PARSE_EVENTS__TERM_TYPE_USER, 2629 .config = config, 2630 }; 2631 2632 if (!temp.config) { 2633 temp.config = strdup("event"); 2634 if (!temp.config) 2635 return -ENOMEM; 2636 } 2637 BUG_ON(idx >= PERF_COUNT_HW_MAX); 2638 sym = &event_symbols_hw[idx]; 2639 2640 str = strdup(sym->symbol); 2641 if (!str) 2642 return -ENOMEM; 2643 return new_term(term, &temp, str, 0); 2644 } 2645 2646 int parse_events_term__clone(struct parse_events_term **new, 2647 struct parse_events_term *term) 2648 { 2649 char *str; 2650 struct parse_events_term temp = { 2651 .type_val = term->type_val, 2652 .type_term = term->type_term, 2653 .config = NULL, 2654 .err_term = term->err_term, 2655 .err_val = term->err_val, 2656 }; 2657 2658 if (term->config) { 2659 temp.config = strdup(term->config); 2660 if (!temp.config) 2661 return -ENOMEM; 2662 } 2663 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2664 return new_term(new, &temp, NULL, term->val.num); 2665 2666 str = strdup(term->val.str); 2667 if (!str) 2668 return -ENOMEM; 2669 return new_term(new, &temp, str, 0); 2670 } 2671 2672 void parse_events_term__delete(struct parse_events_term *term) 2673 { 2674 if (term->array.nr_ranges) 2675 zfree(&term->array.ranges); 2676 2677 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) 2678 zfree(&term->val.str); 2679 2680 zfree(&term->config); 2681 free(term); 2682 } 2683 2684 int parse_events_copy_term_list(struct list_head *old, 2685 struct list_head **new) 2686 { 2687 struct parse_events_term *term, *n; 2688 int ret; 2689 2690 if (!old) { 2691 *new = NULL; 2692 return 0; 2693 } 2694 2695 *new = malloc(sizeof(struct list_head)); 2696 if (!*new) 2697 return -ENOMEM; 2698 INIT_LIST_HEAD(*new); 2699 2700 list_for_each_entry (term, old, list) { 2701 ret = parse_events_term__clone(&n, term); 2702 if (ret) 2703 return ret; 2704 list_add_tail(&n->list, *new); 2705 } 2706 return 0; 2707 } 2708 2709 void parse_events_terms__purge(struct list_head *terms) 2710 { 2711 struct parse_events_term *term, *h; 2712 2713 list_for_each_entry_safe(term, h, terms, list) { 2714 list_del_init(&term->list); 2715 parse_events_term__delete(term); 2716 } 2717 } 2718 2719 void parse_events_terms__delete(struct list_head *terms) 2720 { 2721 if (!terms) 2722 return; 2723 parse_events_terms__purge(terms); 2724 free(terms); 2725 } 2726 2727 void parse_events__clear_array(struct parse_events_array *a) 2728 { 2729 zfree(&a->ranges); 2730 } 2731 2732 void parse_events_evlist_error(struct parse_events_state *parse_state, 2733 int idx, const char *str) 2734 { 2735 if (!parse_state->error) 2736 return; 2737 2738 parse_events_error__handle(parse_state->error, idx, strdup(str), NULL); 2739 } 2740 2741 static void config_terms_list(char *buf, size_t buf_sz) 2742 { 2743 int i; 2744 bool first = true; 2745 2746 buf[0] = '\0'; 2747 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) { 2748 const char *name = config_term_names[i]; 2749 2750 if (!config_term_avail(i, NULL)) 2751 continue; 2752 if (!name) 2753 continue; 2754 if (name[0] == '<') 2755 continue; 2756 2757 if (strlen(buf) + strlen(name) + 2 >= buf_sz) 2758 return; 2759 2760 if (!first) 2761 strcat(buf, ","); 2762 else 2763 first = false; 2764 strcat(buf, name); 2765 } 2766 } 2767 2768 /* 2769 * Return string contains valid config terms of an event. 2770 * @additional_terms: For terms such as PMU sysfs terms. 2771 */ 2772 char *parse_events_formats_error_string(char *additional_terms) 2773 { 2774 char *str; 2775 /* "no-overwrite" is the longest name */ 2776 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR * 2777 (sizeof("no-overwrite") - 1)]; 2778 2779 config_terms_list(static_terms, sizeof(static_terms)); 2780 /* valid terms */ 2781 if (additional_terms) { 2782 if (asprintf(&str, "valid terms: %s,%s", 2783 additional_terms, static_terms) < 0) 2784 goto fail; 2785 } else { 2786 if (asprintf(&str, "valid terms: %s", static_terms) < 0) 2787 goto fail; 2788 } 2789 return str; 2790 2791 fail: 2792 return NULL; 2793 } 2794 2795 struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx, 2796 struct perf_event_attr *attr, 2797 const char *name, 2798 const char *metric_id, 2799 struct perf_pmu *pmu, 2800 struct list_head *config_terms) 2801 { 2802 return __add_event(list, idx, attr, /*init_attr=*/true, name, metric_id, 2803 pmu, config_terms, /*auto_merge_stats=*/false, 2804 /*cpu_list=*/NULL); 2805 } 2806