1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/hw_breakpoint.h> 3 #include <linux/err.h> 4 #include <linux/zalloc.h> 5 #include <dirent.h> 6 #include <errno.h> 7 #include <sys/ioctl.h> 8 #include <sys/param.h> 9 #include "term.h" 10 #include "evlist.h" 11 #include "evsel.h" 12 #include <subcmd/parse-options.h> 13 #include "parse-events.h" 14 #include "string2.h" 15 #include "strlist.h" 16 #include "bpf-loader.h" 17 #include "debug.h" 18 #include <api/fs/tracing_path.h> 19 #include <perf/cpumap.h> 20 #include "parse-events-bison.h" 21 #include "parse-events-flex.h" 22 #include "pmu.h" 23 #include "asm/bug.h" 24 #include "util/parse-branch-options.h" 25 #include "util/evsel_config.h" 26 #include "util/event.h" 27 #include "perf.h" 28 #include "util/parse-events-hybrid.h" 29 #include "util/pmu-hybrid.h" 30 #include "tracepoint.h" 31 #include "thread_map.h" 32 33 #define MAX_NAME_LEN 100 34 35 struct perf_pmu_event_symbol { 36 char *symbol; 37 enum perf_pmu_event_symbol_type type; 38 }; 39 40 #ifdef PARSER_DEBUG 41 extern int parse_events_debug; 42 #endif 43 int parse_events_parse(void *parse_state, void *scanner); 44 static int get_config_terms(struct list_head *head_config, 45 struct list_head *head_terms __maybe_unused); 46 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state, 47 const char *str, char *pmu_name, 48 struct list_head *list); 49 50 static struct perf_pmu_event_symbol *perf_pmu_events_list; 51 /* 52 * The variable indicates the number of supported pmu event symbols. 53 * 0 means not initialized and ready to init 54 * -1 means failed to init, don't try anymore 55 * >0 is the number of supported pmu event symbols 56 */ 57 static int perf_pmu_events_list_num; 58 59 struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { 60 [PERF_COUNT_HW_CPU_CYCLES] = { 61 .symbol = "cpu-cycles", 62 .alias = "cycles", 63 }, 64 [PERF_COUNT_HW_INSTRUCTIONS] = { 65 .symbol = "instructions", 66 .alias = "", 67 }, 68 [PERF_COUNT_HW_CACHE_REFERENCES] = { 69 .symbol = "cache-references", 70 .alias = "", 71 }, 72 [PERF_COUNT_HW_CACHE_MISSES] = { 73 .symbol = "cache-misses", 74 .alias = "", 75 }, 76 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 77 .symbol = "branch-instructions", 78 .alias = "branches", 79 }, 80 [PERF_COUNT_HW_BRANCH_MISSES] = { 81 .symbol = "branch-misses", 82 .alias = "", 83 }, 84 [PERF_COUNT_HW_BUS_CYCLES] = { 85 .symbol = "bus-cycles", 86 .alias = "", 87 }, 88 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = { 89 .symbol = "stalled-cycles-frontend", 90 .alias = "idle-cycles-frontend", 91 }, 92 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = { 93 .symbol = "stalled-cycles-backend", 94 .alias = "idle-cycles-backend", 95 }, 96 [PERF_COUNT_HW_REF_CPU_CYCLES] = { 97 .symbol = "ref-cycles", 98 .alias = "", 99 }, 100 }; 101 102 struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { 103 [PERF_COUNT_SW_CPU_CLOCK] = { 104 .symbol = "cpu-clock", 105 .alias = "", 106 }, 107 [PERF_COUNT_SW_TASK_CLOCK] = { 108 .symbol = "task-clock", 109 .alias = "", 110 }, 111 [PERF_COUNT_SW_PAGE_FAULTS] = { 112 .symbol = "page-faults", 113 .alias = "faults", 114 }, 115 [PERF_COUNT_SW_CONTEXT_SWITCHES] = { 116 .symbol = "context-switches", 117 .alias = "cs", 118 }, 119 [PERF_COUNT_SW_CPU_MIGRATIONS] = { 120 .symbol = "cpu-migrations", 121 .alias = "migrations", 122 }, 123 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = { 124 .symbol = "minor-faults", 125 .alias = "", 126 }, 127 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = { 128 .symbol = "major-faults", 129 .alias = "", 130 }, 131 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = { 132 .symbol = "alignment-faults", 133 .alias = "", 134 }, 135 [PERF_COUNT_SW_EMULATION_FAULTS] = { 136 .symbol = "emulation-faults", 137 .alias = "", 138 }, 139 [PERF_COUNT_SW_DUMMY] = { 140 .symbol = "dummy", 141 .alias = "", 142 }, 143 [PERF_COUNT_SW_BPF_OUTPUT] = { 144 .symbol = "bpf-output", 145 .alias = "", 146 }, 147 [PERF_COUNT_SW_CGROUP_SWITCHES] = { 148 .symbol = "cgroup-switches", 149 .alias = "", 150 }, 151 }; 152 153 bool is_event_supported(u8 type, u64 config) 154 { 155 bool ret = true; 156 int open_return; 157 struct evsel *evsel; 158 struct perf_event_attr attr = { 159 .type = type, 160 .config = config, 161 .disabled = 1, 162 }; 163 struct perf_thread_map *tmap = thread_map__new_by_tid(0); 164 165 if (tmap == NULL) 166 return false; 167 168 evsel = evsel__new(&attr); 169 if (evsel) { 170 open_return = evsel__open(evsel, NULL, tmap); 171 ret = open_return >= 0; 172 173 if (open_return == -EACCES) { 174 /* 175 * This happens if the paranoid value 176 * /proc/sys/kernel/perf_event_paranoid is set to 2 177 * Re-run with exclude_kernel set; we don't do that 178 * by default as some ARM machines do not support it. 179 * 180 */ 181 evsel->core.attr.exclude_kernel = 1; 182 ret = evsel__open(evsel, NULL, tmap) >= 0; 183 } 184 evsel__delete(evsel); 185 } 186 187 perf_thread_map__put(tmap); 188 return ret; 189 } 190 191 const char *event_type(int type) 192 { 193 switch (type) { 194 case PERF_TYPE_HARDWARE: 195 return "hardware"; 196 197 case PERF_TYPE_SOFTWARE: 198 return "software"; 199 200 case PERF_TYPE_TRACEPOINT: 201 return "tracepoint"; 202 203 case PERF_TYPE_HW_CACHE: 204 return "hardware-cache"; 205 206 default: 207 break; 208 } 209 210 return "unknown"; 211 } 212 213 static char *get_config_str(struct list_head *head_terms, int type_term) 214 { 215 struct parse_events_term *term; 216 217 if (!head_terms) 218 return NULL; 219 220 list_for_each_entry(term, head_terms, list) 221 if (term->type_term == type_term) 222 return term->val.str; 223 224 return NULL; 225 } 226 227 static char *get_config_metric_id(struct list_head *head_terms) 228 { 229 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID); 230 } 231 232 static char *get_config_name(struct list_head *head_terms) 233 { 234 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME); 235 } 236 237 static struct evsel * 238 __add_event(struct list_head *list, int *idx, 239 struct perf_event_attr *attr, 240 bool init_attr, 241 const char *name, const char *metric_id, struct perf_pmu *pmu, 242 struct list_head *config_terms, bool auto_merge_stats, 243 const char *cpu_list) 244 { 245 struct evsel *evsel; 246 struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) : 247 cpu_list ? perf_cpu_map__new(cpu_list) : NULL; 248 249 if (pmu) 250 perf_pmu__warn_invalid_formats(pmu); 251 252 if (pmu && attr->type == PERF_TYPE_RAW) 253 perf_pmu__warn_invalid_config(pmu, attr->config, name); 254 255 if (init_attr) 256 event_attr_init(attr); 257 258 evsel = evsel__new_idx(attr, *idx); 259 if (!evsel) { 260 perf_cpu_map__put(cpus); 261 return NULL; 262 } 263 264 (*idx)++; 265 evsel->core.cpus = cpus; 266 evsel->core.own_cpus = perf_cpu_map__get(cpus); 267 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false; 268 evsel->auto_merge_stats = auto_merge_stats; 269 evsel->pmu = pmu; 270 271 if (name) 272 evsel->name = strdup(name); 273 274 if (metric_id) 275 evsel->metric_id = strdup(metric_id); 276 277 if (config_terms) 278 list_splice_init(config_terms, &evsel->config_terms); 279 280 if (list) 281 list_add_tail(&evsel->core.node, list); 282 283 return evsel; 284 } 285 286 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr, 287 const char *name, const char *metric_id, 288 struct perf_pmu *pmu) 289 { 290 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name, 291 metric_id, pmu, /*config_terms=*/NULL, 292 /*auto_merge_stats=*/false, /*cpu_list=*/NULL); 293 } 294 295 static int add_event(struct list_head *list, int *idx, 296 struct perf_event_attr *attr, const char *name, 297 const char *metric_id, struct list_head *config_terms) 298 { 299 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id, 300 /*pmu=*/NULL, config_terms, 301 /*auto_merge_stats=*/false, /*cpu_list=*/NULL) ? 0 : -ENOMEM; 302 } 303 304 static int add_event_tool(struct list_head *list, int *idx, 305 enum perf_tool_event tool_event) 306 { 307 struct evsel *evsel; 308 struct perf_event_attr attr = { 309 .type = PERF_TYPE_SOFTWARE, 310 .config = PERF_COUNT_SW_DUMMY, 311 }; 312 313 evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL, 314 /*metric_id=*/NULL, /*pmu=*/NULL, 315 /*config_terms=*/NULL, /*auto_merge_stats=*/false, 316 /*cpu_list=*/"0"); 317 if (!evsel) 318 return -ENOMEM; 319 evsel->tool_event = tool_event; 320 if (tool_event == PERF_TOOL_DURATION_TIME 321 || tool_event == PERF_TOOL_USER_TIME 322 || tool_event == PERF_TOOL_SYSTEM_TIME) { 323 free((char *)evsel->unit); 324 evsel->unit = strdup("ns"); 325 } 326 return 0; 327 } 328 329 static int parse_aliases(char *str, const char *const names[][EVSEL__MAX_ALIASES], int size) 330 { 331 int i, j; 332 int n, longest = -1; 333 334 for (i = 0; i < size; i++) { 335 for (j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) { 336 n = strlen(names[i][j]); 337 if (n > longest && !strncasecmp(str, names[i][j], n)) 338 longest = n; 339 } 340 if (longest > 0) 341 return i; 342 } 343 344 return -1; 345 } 346 347 typedef int config_term_func_t(struct perf_event_attr *attr, 348 struct parse_events_term *term, 349 struct parse_events_error *err); 350 static int config_term_common(struct perf_event_attr *attr, 351 struct parse_events_term *term, 352 struct parse_events_error *err); 353 static int config_attr(struct perf_event_attr *attr, 354 struct list_head *head, 355 struct parse_events_error *err, 356 config_term_func_t config_term); 357 358 int parse_events_add_cache(struct list_head *list, int *idx, 359 char *type, char *op_result1, char *op_result2, 360 struct parse_events_error *err, 361 struct list_head *head_config, 362 struct parse_events_state *parse_state) 363 { 364 struct perf_event_attr attr; 365 LIST_HEAD(config_terms); 366 char name[MAX_NAME_LEN]; 367 const char *config_name, *metric_id; 368 int cache_type = -1, cache_op = -1, cache_result = -1; 369 char *op_result[2] = { op_result1, op_result2 }; 370 int i, n, ret; 371 bool hybrid; 372 373 /* 374 * No fallback - if we cannot get a clear cache type 375 * then bail out: 376 */ 377 cache_type = parse_aliases(type, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX); 378 if (cache_type == -1) 379 return -EINVAL; 380 381 config_name = get_config_name(head_config); 382 n = snprintf(name, MAX_NAME_LEN, "%s", type); 383 384 for (i = 0; (i < 2) && (op_result[i]); i++) { 385 char *str = op_result[i]; 386 387 n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str); 388 389 if (cache_op == -1) { 390 cache_op = parse_aliases(str, evsel__hw_cache_op, 391 PERF_COUNT_HW_CACHE_OP_MAX); 392 if (cache_op >= 0) { 393 if (!evsel__is_cache_op_valid(cache_type, cache_op)) 394 return -EINVAL; 395 continue; 396 } 397 } 398 399 if (cache_result == -1) { 400 cache_result = parse_aliases(str, evsel__hw_cache_result, 401 PERF_COUNT_HW_CACHE_RESULT_MAX); 402 if (cache_result >= 0) 403 continue; 404 } 405 } 406 407 /* 408 * Fall back to reads: 409 */ 410 if (cache_op == -1) 411 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 412 413 /* 414 * Fall back to accesses: 415 */ 416 if (cache_result == -1) 417 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 418 419 memset(&attr, 0, sizeof(attr)); 420 attr.config = cache_type | (cache_op << 8) | (cache_result << 16); 421 attr.type = PERF_TYPE_HW_CACHE; 422 423 if (head_config) { 424 if (config_attr(&attr, head_config, err, 425 config_term_common)) 426 return -EINVAL; 427 428 if (get_config_terms(head_config, &config_terms)) 429 return -ENOMEM; 430 } 431 432 metric_id = get_config_metric_id(head_config); 433 ret = parse_events__add_cache_hybrid(list, idx, &attr, 434 config_name ? : name, 435 metric_id, 436 &config_terms, 437 &hybrid, parse_state); 438 if (hybrid) 439 goto out_free_terms; 440 441 ret = add_event(list, idx, &attr, config_name ? : name, metric_id, 442 &config_terms); 443 out_free_terms: 444 free_config_terms(&config_terms); 445 return ret; 446 } 447 448 #ifdef HAVE_LIBTRACEEVENT 449 static void tracepoint_error(struct parse_events_error *e, int err, 450 const char *sys, const char *name) 451 { 452 const char *str; 453 char help[BUFSIZ]; 454 455 if (!e) 456 return; 457 458 /* 459 * We get error directly from syscall errno ( > 0), 460 * or from encoded pointer's error ( < 0). 461 */ 462 err = abs(err); 463 464 switch (err) { 465 case EACCES: 466 str = "can't access trace events"; 467 break; 468 case ENOENT: 469 str = "unknown tracepoint"; 470 break; 471 default: 472 str = "failed to add tracepoint"; 473 break; 474 } 475 476 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name); 477 parse_events_error__handle(e, 0, strdup(str), strdup(help)); 478 } 479 480 static int add_tracepoint(struct list_head *list, int *idx, 481 const char *sys_name, const char *evt_name, 482 struct parse_events_error *err, 483 struct list_head *head_config) 484 { 485 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++); 486 487 if (IS_ERR(evsel)) { 488 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name); 489 return PTR_ERR(evsel); 490 } 491 492 if (head_config) { 493 LIST_HEAD(config_terms); 494 495 if (get_config_terms(head_config, &config_terms)) 496 return -ENOMEM; 497 list_splice(&config_terms, &evsel->config_terms); 498 } 499 500 list_add_tail(&evsel->core.node, list); 501 return 0; 502 } 503 504 static int add_tracepoint_multi_event(struct list_head *list, int *idx, 505 const char *sys_name, const char *evt_name, 506 struct parse_events_error *err, 507 struct list_head *head_config) 508 { 509 char *evt_path; 510 struct dirent *evt_ent; 511 DIR *evt_dir; 512 int ret = 0, found = 0; 513 514 evt_path = get_events_file(sys_name); 515 if (!evt_path) { 516 tracepoint_error(err, errno, sys_name, evt_name); 517 return -1; 518 } 519 evt_dir = opendir(evt_path); 520 if (!evt_dir) { 521 put_events_file(evt_path); 522 tracepoint_error(err, errno, sys_name, evt_name); 523 return -1; 524 } 525 526 while (!ret && (evt_ent = readdir(evt_dir))) { 527 if (!strcmp(evt_ent->d_name, ".") 528 || !strcmp(evt_ent->d_name, "..") 529 || !strcmp(evt_ent->d_name, "enable") 530 || !strcmp(evt_ent->d_name, "filter")) 531 continue; 532 533 if (!strglobmatch(evt_ent->d_name, evt_name)) 534 continue; 535 536 found++; 537 538 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name, 539 err, head_config); 540 } 541 542 if (!found) { 543 tracepoint_error(err, ENOENT, sys_name, evt_name); 544 ret = -1; 545 } 546 547 put_events_file(evt_path); 548 closedir(evt_dir); 549 return ret; 550 } 551 552 static int add_tracepoint_event(struct list_head *list, int *idx, 553 const char *sys_name, const char *evt_name, 554 struct parse_events_error *err, 555 struct list_head *head_config) 556 { 557 return strpbrk(evt_name, "*?") ? 558 add_tracepoint_multi_event(list, idx, sys_name, evt_name, 559 err, head_config) : 560 add_tracepoint(list, idx, sys_name, evt_name, 561 err, head_config); 562 } 563 564 static int add_tracepoint_multi_sys(struct list_head *list, int *idx, 565 const char *sys_name, const char *evt_name, 566 struct parse_events_error *err, 567 struct list_head *head_config) 568 { 569 struct dirent *events_ent; 570 DIR *events_dir; 571 int ret = 0; 572 573 events_dir = tracing_events__opendir(); 574 if (!events_dir) { 575 tracepoint_error(err, errno, sys_name, evt_name); 576 return -1; 577 } 578 579 while (!ret && (events_ent = readdir(events_dir))) { 580 if (!strcmp(events_ent->d_name, ".") 581 || !strcmp(events_ent->d_name, "..") 582 || !strcmp(events_ent->d_name, "enable") 583 || !strcmp(events_ent->d_name, "header_event") 584 || !strcmp(events_ent->d_name, "header_page")) 585 continue; 586 587 if (!strglobmatch(events_ent->d_name, sys_name)) 588 continue; 589 590 ret = add_tracepoint_event(list, idx, events_ent->d_name, 591 evt_name, err, head_config); 592 } 593 594 closedir(events_dir); 595 return ret; 596 } 597 #endif /* HAVE_LIBTRACEEVENT */ 598 599 #ifdef HAVE_LIBBPF_SUPPORT 600 struct __add_bpf_event_param { 601 struct parse_events_state *parse_state; 602 struct list_head *list; 603 struct list_head *head_config; 604 }; 605 606 static int add_bpf_event(const char *group, const char *event, int fd, struct bpf_object *obj, 607 void *_param) 608 { 609 LIST_HEAD(new_evsels); 610 struct __add_bpf_event_param *param = _param; 611 struct parse_events_state *parse_state = param->parse_state; 612 struct list_head *list = param->list; 613 struct evsel *pos; 614 int err; 615 /* 616 * Check if we should add the event, i.e. if it is a TP but starts with a '!', 617 * then don't add the tracepoint, this will be used for something else, like 618 * adding to a BPF_MAP_TYPE_PROG_ARRAY. 619 * 620 * See tools/perf/examples/bpf/augmented_raw_syscalls.c 621 */ 622 if (group[0] == '!') 623 return 0; 624 625 pr_debug("add bpf event %s:%s and attach bpf program %d\n", 626 group, event, fd); 627 628 err = parse_events_add_tracepoint(&new_evsels, &parse_state->idx, group, 629 event, parse_state->error, 630 param->head_config); 631 if (err) { 632 struct evsel *evsel, *tmp; 633 634 pr_debug("Failed to add BPF event %s:%s\n", 635 group, event); 636 list_for_each_entry_safe(evsel, tmp, &new_evsels, core.node) { 637 list_del_init(&evsel->core.node); 638 evsel__delete(evsel); 639 } 640 return err; 641 } 642 pr_debug("adding %s:%s\n", group, event); 643 644 list_for_each_entry(pos, &new_evsels, core.node) { 645 pr_debug("adding %s:%s to %p\n", 646 group, event, pos); 647 pos->bpf_fd = fd; 648 pos->bpf_obj = obj; 649 } 650 list_splice(&new_evsels, list); 651 return 0; 652 } 653 654 int parse_events_load_bpf_obj(struct parse_events_state *parse_state, 655 struct list_head *list, 656 struct bpf_object *obj, 657 struct list_head *head_config) 658 { 659 int err; 660 char errbuf[BUFSIZ]; 661 struct __add_bpf_event_param param = {parse_state, list, head_config}; 662 static bool registered_unprobe_atexit = false; 663 664 if (IS_ERR(obj) || !obj) { 665 snprintf(errbuf, sizeof(errbuf), 666 "Internal error: load bpf obj with NULL"); 667 err = -EINVAL; 668 goto errout; 669 } 670 671 /* 672 * Register atexit handler before calling bpf__probe() so 673 * bpf__probe() don't need to unprobe probe points its already 674 * created when failure. 675 */ 676 if (!registered_unprobe_atexit) { 677 atexit(bpf__clear); 678 registered_unprobe_atexit = true; 679 } 680 681 err = bpf__probe(obj); 682 if (err) { 683 bpf__strerror_probe(obj, err, errbuf, sizeof(errbuf)); 684 goto errout; 685 } 686 687 err = bpf__load(obj); 688 if (err) { 689 bpf__strerror_load(obj, err, errbuf, sizeof(errbuf)); 690 goto errout; 691 } 692 693 err = bpf__foreach_event(obj, add_bpf_event, ¶m); 694 if (err) { 695 snprintf(errbuf, sizeof(errbuf), 696 "Attach events in BPF object failed"); 697 goto errout; 698 } 699 700 return 0; 701 errout: 702 parse_events_error__handle(parse_state->error, 0, 703 strdup(errbuf), strdup("(add -v to see detail)")); 704 return err; 705 } 706 707 static int 708 parse_events_config_bpf(struct parse_events_state *parse_state, 709 struct bpf_object *obj, 710 struct list_head *head_config) 711 { 712 struct parse_events_term *term; 713 int error_pos; 714 715 if (!head_config || list_empty(head_config)) 716 return 0; 717 718 list_for_each_entry(term, head_config, list) { 719 int err; 720 721 if (term->type_term != PARSE_EVENTS__TERM_TYPE_USER) { 722 parse_events_error__handle(parse_state->error, term->err_term, 723 strdup("Invalid config term for BPF object"), 724 NULL); 725 return -EINVAL; 726 } 727 728 err = bpf__config_obj(obj, term, parse_state->evlist, &error_pos); 729 if (err) { 730 char errbuf[BUFSIZ]; 731 int idx; 732 733 bpf__strerror_config_obj(obj, term, parse_state->evlist, 734 &error_pos, err, errbuf, 735 sizeof(errbuf)); 736 737 if (err == -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE) 738 idx = term->err_val; 739 else 740 idx = term->err_term + error_pos; 741 742 parse_events_error__handle(parse_state->error, idx, 743 strdup(errbuf), 744 strdup( 745 "Hint:\tValid config terms:\n" 746 " \tmap:[<arraymap>].value<indices>=[value]\n" 747 " \tmap:[<eventmap>].event<indices>=[event]\n" 748 "\n" 749 " \twhere <indices> is something like [0,3...5] or [all]\n" 750 " \t(add -v to see detail)")); 751 return err; 752 } 753 } 754 return 0; 755 } 756 757 /* 758 * Split config terms: 759 * perf record -e bpf.c/call-graph=fp,map:array.value[0]=1/ ... 760 * 'call-graph=fp' is 'evt config', should be applied to each 761 * events in bpf.c. 762 * 'map:array.value[0]=1' is 'obj config', should be processed 763 * with parse_events_config_bpf. 764 * 765 * Move object config terms from the first list to obj_head_config. 766 */ 767 static void 768 split_bpf_config_terms(struct list_head *evt_head_config, 769 struct list_head *obj_head_config) 770 { 771 struct parse_events_term *term, *temp; 772 773 /* 774 * Currently, all possible user config term 775 * belong to bpf object. parse_events__is_hardcoded_term() 776 * happens to be a good flag. 777 * 778 * See parse_events_config_bpf() and 779 * config_term_tracepoint(). 780 */ 781 list_for_each_entry_safe(term, temp, evt_head_config, list) 782 if (!parse_events__is_hardcoded_term(term)) 783 list_move_tail(&term->list, obj_head_config); 784 } 785 786 int parse_events_load_bpf(struct parse_events_state *parse_state, 787 struct list_head *list, 788 char *bpf_file_name, 789 bool source, 790 struct list_head *head_config) 791 { 792 int err; 793 struct bpf_object *obj; 794 LIST_HEAD(obj_head_config); 795 796 if (head_config) 797 split_bpf_config_terms(head_config, &obj_head_config); 798 799 obj = bpf__prepare_load(bpf_file_name, source); 800 if (IS_ERR(obj)) { 801 char errbuf[BUFSIZ]; 802 803 err = PTR_ERR(obj); 804 805 if (err == -ENOTSUP) 806 snprintf(errbuf, sizeof(errbuf), 807 "BPF support is not compiled"); 808 else 809 bpf__strerror_prepare_load(bpf_file_name, 810 source, 811 -err, errbuf, 812 sizeof(errbuf)); 813 814 parse_events_error__handle(parse_state->error, 0, 815 strdup(errbuf), strdup("(add -v to see detail)")); 816 return err; 817 } 818 819 err = parse_events_load_bpf_obj(parse_state, list, obj, head_config); 820 if (err) 821 return err; 822 err = parse_events_config_bpf(parse_state, obj, &obj_head_config); 823 824 /* 825 * Caller doesn't know anything about obj_head_config, 826 * so combine them together again before returning. 827 */ 828 if (head_config) 829 list_splice_tail(&obj_head_config, head_config); 830 return err; 831 } 832 #else // HAVE_LIBBPF_SUPPORT 833 int parse_events_load_bpf_obj(struct parse_events_state *parse_state, 834 struct list_head *list __maybe_unused, 835 struct bpf_object *obj __maybe_unused, 836 struct list_head *head_config __maybe_unused) 837 { 838 parse_events_error__handle(parse_state->error, 0, 839 strdup("BPF support is not compiled"), 840 strdup("Make sure libbpf-devel is available at build time.")); 841 return -ENOTSUP; 842 } 843 844 int parse_events_load_bpf(struct parse_events_state *parse_state, 845 struct list_head *list __maybe_unused, 846 char *bpf_file_name __maybe_unused, 847 bool source __maybe_unused, 848 struct list_head *head_config __maybe_unused) 849 { 850 parse_events_error__handle(parse_state->error, 0, 851 strdup("BPF support is not compiled"), 852 strdup("Make sure libbpf-devel is available at build time.")); 853 return -ENOTSUP; 854 } 855 #endif // HAVE_LIBBPF_SUPPORT 856 857 static int 858 parse_breakpoint_type(const char *type, struct perf_event_attr *attr) 859 { 860 int i; 861 862 for (i = 0; i < 3; i++) { 863 if (!type || !type[i]) 864 break; 865 866 #define CHECK_SET_TYPE(bit) \ 867 do { \ 868 if (attr->bp_type & bit) \ 869 return -EINVAL; \ 870 else \ 871 attr->bp_type |= bit; \ 872 } while (0) 873 874 switch (type[i]) { 875 case 'r': 876 CHECK_SET_TYPE(HW_BREAKPOINT_R); 877 break; 878 case 'w': 879 CHECK_SET_TYPE(HW_BREAKPOINT_W); 880 break; 881 case 'x': 882 CHECK_SET_TYPE(HW_BREAKPOINT_X); 883 break; 884 default: 885 return -EINVAL; 886 } 887 } 888 889 #undef CHECK_SET_TYPE 890 891 if (!attr->bp_type) /* Default */ 892 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 893 894 return 0; 895 } 896 897 int parse_events_add_breakpoint(struct list_head *list, int *idx, 898 u64 addr, char *type, u64 len) 899 { 900 struct perf_event_attr attr; 901 902 memset(&attr, 0, sizeof(attr)); 903 attr.bp_addr = addr; 904 905 if (parse_breakpoint_type(type, &attr)) 906 return -EINVAL; 907 908 /* Provide some defaults if len is not specified */ 909 if (!len) { 910 if (attr.bp_type == HW_BREAKPOINT_X) 911 len = sizeof(long); 912 else 913 len = HW_BREAKPOINT_LEN_4; 914 } 915 916 attr.bp_len = len; 917 918 attr.type = PERF_TYPE_BREAKPOINT; 919 attr.sample_period = 1; 920 921 return add_event(list, idx, &attr, /*name=*/NULL, /*mertic_id=*/NULL, 922 /*config_terms=*/NULL); 923 } 924 925 static int check_type_val(struct parse_events_term *term, 926 struct parse_events_error *err, 927 int type) 928 { 929 if (type == term->type_val) 930 return 0; 931 932 if (err) { 933 parse_events_error__handle(err, term->err_val, 934 type == PARSE_EVENTS__TERM_TYPE_NUM 935 ? strdup("expected numeric value") 936 : strdup("expected string value"), 937 NULL); 938 } 939 return -EINVAL; 940 } 941 942 /* 943 * Update according to parse-events.l 944 */ 945 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { 946 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>", 947 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config", 948 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1", 949 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2", 950 [PARSE_EVENTS__TERM_TYPE_NAME] = "name", 951 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period", 952 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq", 953 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type", 954 [PARSE_EVENTS__TERM_TYPE_TIME] = "time", 955 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph", 956 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size", 957 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", 958 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", 959 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack", 960 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr", 961 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite", 962 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite", 963 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config", 964 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore", 965 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output", 966 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size", 967 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id", 968 }; 969 970 static bool config_term_shrinked; 971 972 static bool 973 config_term_avail(int term_type, struct parse_events_error *err) 974 { 975 char *err_str; 976 977 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) { 978 parse_events_error__handle(err, -1, 979 strdup("Invalid term_type"), NULL); 980 return false; 981 } 982 if (!config_term_shrinked) 983 return true; 984 985 switch (term_type) { 986 case PARSE_EVENTS__TERM_TYPE_CONFIG: 987 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 988 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 989 case PARSE_EVENTS__TERM_TYPE_NAME: 990 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 991 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 992 case PARSE_EVENTS__TERM_TYPE_PERCORE: 993 return true; 994 default: 995 if (!err) 996 return false; 997 998 /* term_type is validated so indexing is safe */ 999 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'", 1000 config_term_names[term_type]) >= 0) 1001 parse_events_error__handle(err, -1, err_str, NULL); 1002 return false; 1003 } 1004 } 1005 1006 void parse_events__shrink_config_terms(void) 1007 { 1008 config_term_shrinked = true; 1009 } 1010 1011 static int config_term_common(struct perf_event_attr *attr, 1012 struct parse_events_term *term, 1013 struct parse_events_error *err) 1014 { 1015 #define CHECK_TYPE_VAL(type) \ 1016 do { \ 1017 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \ 1018 return -EINVAL; \ 1019 } while (0) 1020 1021 switch (term->type_term) { 1022 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1023 CHECK_TYPE_VAL(NUM); 1024 attr->config = term->val.num; 1025 break; 1026 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 1027 CHECK_TYPE_VAL(NUM); 1028 attr->config1 = term->val.num; 1029 break; 1030 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 1031 CHECK_TYPE_VAL(NUM); 1032 attr->config2 = term->val.num; 1033 break; 1034 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1035 CHECK_TYPE_VAL(NUM); 1036 break; 1037 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1038 CHECK_TYPE_VAL(NUM); 1039 break; 1040 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1041 CHECK_TYPE_VAL(STR); 1042 if (strcmp(term->val.str, "no") && 1043 parse_branch_str(term->val.str, 1044 &attr->branch_sample_type)) { 1045 parse_events_error__handle(err, term->err_val, 1046 strdup("invalid branch sample type"), 1047 NULL); 1048 return -EINVAL; 1049 } 1050 break; 1051 case PARSE_EVENTS__TERM_TYPE_TIME: 1052 CHECK_TYPE_VAL(NUM); 1053 if (term->val.num > 1) { 1054 parse_events_error__handle(err, term->err_val, 1055 strdup("expected 0 or 1"), 1056 NULL); 1057 return -EINVAL; 1058 } 1059 break; 1060 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1061 CHECK_TYPE_VAL(STR); 1062 break; 1063 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1064 CHECK_TYPE_VAL(NUM); 1065 break; 1066 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1067 CHECK_TYPE_VAL(NUM); 1068 break; 1069 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1070 CHECK_TYPE_VAL(NUM); 1071 break; 1072 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1073 CHECK_TYPE_VAL(NUM); 1074 break; 1075 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1076 CHECK_TYPE_VAL(NUM); 1077 break; 1078 case PARSE_EVENTS__TERM_TYPE_NAME: 1079 CHECK_TYPE_VAL(STR); 1080 break; 1081 case PARSE_EVENTS__TERM_TYPE_METRIC_ID: 1082 CHECK_TYPE_VAL(STR); 1083 break; 1084 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1085 CHECK_TYPE_VAL(NUM); 1086 break; 1087 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1088 CHECK_TYPE_VAL(NUM); 1089 break; 1090 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1091 CHECK_TYPE_VAL(NUM); 1092 if ((unsigned int)term->val.num > 1) { 1093 parse_events_error__handle(err, term->err_val, 1094 strdup("expected 0 or 1"), 1095 NULL); 1096 return -EINVAL; 1097 } 1098 break; 1099 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1100 CHECK_TYPE_VAL(NUM); 1101 break; 1102 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1103 CHECK_TYPE_VAL(NUM); 1104 if (term->val.num > UINT_MAX) { 1105 parse_events_error__handle(err, term->err_val, 1106 strdup("too big"), 1107 NULL); 1108 return -EINVAL; 1109 } 1110 break; 1111 default: 1112 parse_events_error__handle(err, term->err_term, 1113 strdup("unknown term"), 1114 parse_events_formats_error_string(NULL)); 1115 return -EINVAL; 1116 } 1117 1118 /* 1119 * Check term availability after basic checking so 1120 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered. 1121 * 1122 * If check availability at the entry of this function, 1123 * user will see "'<sysfs term>' is not usable in 'perf stat'" 1124 * if an invalid config term is provided for legacy events 1125 * (for example, instructions/badterm/...), which is confusing. 1126 */ 1127 if (!config_term_avail(term->type_term, err)) 1128 return -EINVAL; 1129 return 0; 1130 #undef CHECK_TYPE_VAL 1131 } 1132 1133 static int config_term_pmu(struct perf_event_attr *attr, 1134 struct parse_events_term *term, 1135 struct parse_events_error *err) 1136 { 1137 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || 1138 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) 1139 /* 1140 * Always succeed for sysfs terms, as we dont know 1141 * at this point what type they need to have. 1142 */ 1143 return 0; 1144 else 1145 return config_term_common(attr, term, err); 1146 } 1147 1148 #ifdef HAVE_LIBTRACEEVENT 1149 static int config_term_tracepoint(struct perf_event_attr *attr, 1150 struct parse_events_term *term, 1151 struct parse_events_error *err) 1152 { 1153 switch (term->type_term) { 1154 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1155 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1156 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1157 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1158 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1159 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1160 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1161 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1162 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1163 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1164 return config_term_common(attr, term, err); 1165 default: 1166 if (err) { 1167 parse_events_error__handle(err, term->err_term, 1168 strdup("unknown term"), 1169 strdup("valid terms: call-graph,stack-size\n")); 1170 } 1171 return -EINVAL; 1172 } 1173 1174 return 0; 1175 } 1176 #endif 1177 1178 static int config_attr(struct perf_event_attr *attr, 1179 struct list_head *head, 1180 struct parse_events_error *err, 1181 config_term_func_t config_term) 1182 { 1183 struct parse_events_term *term; 1184 1185 list_for_each_entry(term, head, list) 1186 if (config_term(attr, term, err)) 1187 return -EINVAL; 1188 1189 return 0; 1190 } 1191 1192 static int get_config_terms(struct list_head *head_config, 1193 struct list_head *head_terms __maybe_unused) 1194 { 1195 #define ADD_CONFIG_TERM(__type, __weak) \ 1196 struct evsel_config_term *__t; \ 1197 \ 1198 __t = zalloc(sizeof(*__t)); \ 1199 if (!__t) \ 1200 return -ENOMEM; \ 1201 \ 1202 INIT_LIST_HEAD(&__t->list); \ 1203 __t->type = EVSEL__CONFIG_TERM_ ## __type; \ 1204 __t->weak = __weak; \ 1205 list_add_tail(&__t->list, head_terms) 1206 1207 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \ 1208 do { \ 1209 ADD_CONFIG_TERM(__type, __weak); \ 1210 __t->val.__name = __val; \ 1211 } while (0) 1212 1213 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \ 1214 do { \ 1215 ADD_CONFIG_TERM(__type, __weak); \ 1216 __t->val.str = strdup(__val); \ 1217 if (!__t->val.str) { \ 1218 zfree(&__t); \ 1219 return -ENOMEM; \ 1220 } \ 1221 __t->free_str = true; \ 1222 } while (0) 1223 1224 struct parse_events_term *term; 1225 1226 list_for_each_entry(term, head_config, list) { 1227 switch (term->type_term) { 1228 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 1229 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak); 1230 break; 1231 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: 1232 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak); 1233 break; 1234 case PARSE_EVENTS__TERM_TYPE_TIME: 1235 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak); 1236 break; 1237 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: 1238 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak); 1239 break; 1240 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 1241 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak); 1242 break; 1243 case PARSE_EVENTS__TERM_TYPE_STACKSIZE: 1244 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user, 1245 term->val.num, term->weak); 1246 break; 1247 case PARSE_EVENTS__TERM_TYPE_INHERIT: 1248 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1249 term->val.num ? 1 : 0, term->weak); 1250 break; 1251 case PARSE_EVENTS__TERM_TYPE_NOINHERIT: 1252 ADD_CONFIG_TERM_VAL(INHERIT, inherit, 1253 term->val.num ? 0 : 1, term->weak); 1254 break; 1255 case PARSE_EVENTS__TERM_TYPE_MAX_STACK: 1256 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack, 1257 term->val.num, term->weak); 1258 break; 1259 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: 1260 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events, 1261 term->val.num, term->weak); 1262 break; 1263 case PARSE_EVENTS__TERM_TYPE_OVERWRITE: 1264 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1265 term->val.num ? 1 : 0, term->weak); 1266 break; 1267 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: 1268 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite, 1269 term->val.num ? 0 : 1, term->weak); 1270 break; 1271 case PARSE_EVENTS__TERM_TYPE_DRV_CFG: 1272 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak); 1273 break; 1274 case PARSE_EVENTS__TERM_TYPE_PERCORE: 1275 ADD_CONFIG_TERM_VAL(PERCORE, percore, 1276 term->val.num ? true : false, term->weak); 1277 break; 1278 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT: 1279 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output, 1280 term->val.num ? 1 : 0, term->weak); 1281 break; 1282 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE: 1283 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size, 1284 term->val.num, term->weak); 1285 break; 1286 default: 1287 break; 1288 } 1289 } 1290 return 0; 1291 } 1292 1293 /* 1294 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for 1295 * each bit of attr->config that the user has changed. 1296 */ 1297 static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config, 1298 struct list_head *head_terms) 1299 { 1300 struct parse_events_term *term; 1301 u64 bits = 0; 1302 int type; 1303 1304 list_for_each_entry(term, head_config, list) { 1305 switch (term->type_term) { 1306 case PARSE_EVENTS__TERM_TYPE_USER: 1307 type = perf_pmu__format_type(&pmu->format, term->config); 1308 if (type != PERF_PMU_FORMAT_VALUE_CONFIG) 1309 continue; 1310 bits |= perf_pmu__format_bits(&pmu->format, term->config); 1311 break; 1312 case PARSE_EVENTS__TERM_TYPE_CONFIG: 1313 bits = ~(u64)0; 1314 break; 1315 default: 1316 break; 1317 } 1318 } 1319 1320 if (bits) 1321 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false); 1322 1323 #undef ADD_CONFIG_TERM 1324 return 0; 1325 } 1326 1327 int parse_events_add_tracepoint(struct list_head *list, int *idx, 1328 const char *sys, const char *event, 1329 struct parse_events_error *err, 1330 struct list_head *head_config) 1331 { 1332 #ifdef HAVE_LIBTRACEEVENT 1333 if (head_config) { 1334 struct perf_event_attr attr; 1335 1336 if (config_attr(&attr, head_config, err, 1337 config_term_tracepoint)) 1338 return -EINVAL; 1339 } 1340 1341 if (strpbrk(sys, "*?")) 1342 return add_tracepoint_multi_sys(list, idx, sys, event, 1343 err, head_config); 1344 else 1345 return add_tracepoint_event(list, idx, sys, event, 1346 err, head_config); 1347 #else 1348 (void)list; 1349 (void)idx; 1350 (void)sys; 1351 (void)event; 1352 (void)head_config; 1353 parse_events_error__handle(err, 0, strdup("unsupported tracepoint"), 1354 strdup("libtraceevent is necessary for tracepoint support")); 1355 return -1; 1356 #endif 1357 } 1358 1359 int parse_events_add_numeric(struct parse_events_state *parse_state, 1360 struct list_head *list, 1361 u32 type, u64 config, 1362 struct list_head *head_config) 1363 { 1364 struct perf_event_attr attr; 1365 LIST_HEAD(config_terms); 1366 const char *name, *metric_id; 1367 bool hybrid; 1368 int ret; 1369 1370 memset(&attr, 0, sizeof(attr)); 1371 attr.type = type; 1372 attr.config = config; 1373 1374 if (head_config) { 1375 if (config_attr(&attr, head_config, parse_state->error, 1376 config_term_common)) 1377 return -EINVAL; 1378 1379 if (get_config_terms(head_config, &config_terms)) 1380 return -ENOMEM; 1381 } 1382 1383 name = get_config_name(head_config); 1384 metric_id = get_config_metric_id(head_config); 1385 ret = parse_events__add_numeric_hybrid(parse_state, list, &attr, 1386 name, metric_id, 1387 &config_terms, &hybrid); 1388 if (hybrid) 1389 goto out_free_terms; 1390 1391 ret = add_event(list, &parse_state->idx, &attr, name, metric_id, 1392 &config_terms); 1393 out_free_terms: 1394 free_config_terms(&config_terms); 1395 return ret; 1396 } 1397 1398 int parse_events_add_tool(struct parse_events_state *parse_state, 1399 struct list_head *list, 1400 int tool_event) 1401 { 1402 return add_event_tool(list, &parse_state->idx, tool_event); 1403 } 1404 1405 static bool config_term_percore(struct list_head *config_terms) 1406 { 1407 struct evsel_config_term *term; 1408 1409 list_for_each_entry(term, config_terms, list) { 1410 if (term->type == EVSEL__CONFIG_TERM_PERCORE) 1411 return term->val.percore; 1412 } 1413 1414 return false; 1415 } 1416 1417 static int parse_events__inside_hybrid_pmu(struct parse_events_state *parse_state, 1418 struct list_head *list, char *name, 1419 struct list_head *head_config) 1420 { 1421 struct parse_events_term *term; 1422 int ret = -1; 1423 1424 if (parse_state->fake_pmu || !head_config || list_empty(head_config) || 1425 !perf_pmu__is_hybrid(name)) { 1426 return -1; 1427 } 1428 1429 /* 1430 * More than one term in list. 1431 */ 1432 if (head_config->next && head_config->next->next != head_config) 1433 return -1; 1434 1435 term = list_first_entry(head_config, struct parse_events_term, list); 1436 if (term && term->config && strcmp(term->config, "event")) { 1437 ret = parse_events__with_hybrid_pmu(parse_state, term->config, 1438 name, list); 1439 } 1440 1441 return ret; 1442 } 1443 1444 int parse_events_add_pmu(struct parse_events_state *parse_state, 1445 struct list_head *list, char *name, 1446 struct list_head *head_config, 1447 bool auto_merge_stats, 1448 bool use_alias) 1449 { 1450 struct perf_event_attr attr; 1451 struct perf_pmu_info info; 1452 struct perf_pmu *pmu; 1453 struct evsel *evsel; 1454 struct parse_events_error *err = parse_state->error; 1455 bool use_uncore_alias; 1456 LIST_HEAD(config_terms); 1457 1458 pmu = parse_state->fake_pmu ?: perf_pmu__find(name); 1459 1460 if (verbose > 1 && !(pmu && pmu->selectable)) { 1461 fprintf(stderr, "Attempting to add event pmu '%s' with '", 1462 name); 1463 if (head_config) { 1464 struct parse_events_term *term; 1465 1466 list_for_each_entry(term, head_config, list) { 1467 fprintf(stderr, "%s,", term->config); 1468 } 1469 } 1470 fprintf(stderr, "' that may result in non-fatal errors\n"); 1471 } 1472 1473 if (!pmu) { 1474 char *err_str; 1475 1476 if (asprintf(&err_str, 1477 "Cannot find PMU `%s'. Missing kernel support?", 1478 name) >= 0) 1479 parse_events_error__handle(err, 0, err_str, NULL); 1480 return -EINVAL; 1481 } 1482 1483 if (pmu->default_config) { 1484 memcpy(&attr, pmu->default_config, 1485 sizeof(struct perf_event_attr)); 1486 } else { 1487 memset(&attr, 0, sizeof(attr)); 1488 } 1489 1490 use_uncore_alias = (pmu->is_uncore && use_alias); 1491 1492 if (!head_config) { 1493 attr.type = pmu->type; 1494 evsel = __add_event(list, &parse_state->idx, &attr, 1495 /*init_attr=*/true, /*name=*/NULL, 1496 /*metric_id=*/NULL, pmu, 1497 /*config_terms=*/NULL, auto_merge_stats, 1498 /*cpu_list=*/NULL); 1499 if (evsel) { 1500 evsel->pmu_name = name ? strdup(name) : NULL; 1501 evsel->use_uncore_alias = use_uncore_alias; 1502 return 0; 1503 } else { 1504 return -ENOMEM; 1505 } 1506 } 1507 1508 if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info)) 1509 return -EINVAL; 1510 1511 if (verbose > 1) { 1512 fprintf(stderr, "After aliases, add event pmu '%s' with '", 1513 name); 1514 if (head_config) { 1515 struct parse_events_term *term; 1516 1517 list_for_each_entry(term, head_config, list) { 1518 fprintf(stderr, "%s,", term->config); 1519 } 1520 } 1521 fprintf(stderr, "' that may result in non-fatal errors\n"); 1522 } 1523 1524 /* 1525 * Configure hardcoded terms first, no need to check 1526 * return value when called with fail == 0 ;) 1527 */ 1528 if (config_attr(&attr, head_config, parse_state->error, config_term_pmu)) 1529 return -EINVAL; 1530 1531 if (get_config_terms(head_config, &config_terms)) 1532 return -ENOMEM; 1533 1534 /* 1535 * When using default config, record which bits of attr->config were 1536 * changed by the user. 1537 */ 1538 if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms)) 1539 return -ENOMEM; 1540 1541 if (!parse_events__inside_hybrid_pmu(parse_state, list, name, 1542 head_config)) { 1543 return 0; 1544 } 1545 1546 if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) { 1547 free_config_terms(&config_terms); 1548 return -EINVAL; 1549 } 1550 1551 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true, 1552 get_config_name(head_config), 1553 get_config_metric_id(head_config), pmu, 1554 &config_terms, auto_merge_stats, /*cpu_list=*/NULL); 1555 if (!evsel) 1556 return -ENOMEM; 1557 1558 if (evsel->name) 1559 evsel->use_config_name = true; 1560 1561 evsel->pmu_name = name ? strdup(name) : NULL; 1562 evsel->use_uncore_alias = use_uncore_alias; 1563 evsel->percore = config_term_percore(&evsel->config_terms); 1564 1565 if (parse_state->fake_pmu) 1566 return 0; 1567 1568 free((char *)evsel->unit); 1569 evsel->unit = strdup(info.unit); 1570 evsel->scale = info.scale; 1571 evsel->per_pkg = info.per_pkg; 1572 evsel->snapshot = info.snapshot; 1573 return 0; 1574 } 1575 1576 int parse_events_multi_pmu_add(struct parse_events_state *parse_state, 1577 char *str, struct list_head *head, 1578 struct list_head **listp) 1579 { 1580 struct parse_events_term *term; 1581 struct list_head *list = NULL; 1582 struct list_head *orig_head = NULL; 1583 struct perf_pmu *pmu = NULL; 1584 int ok = 0; 1585 char *config; 1586 1587 *listp = NULL; 1588 1589 if (!head) { 1590 head = malloc(sizeof(struct list_head)); 1591 if (!head) 1592 goto out_err; 1593 1594 INIT_LIST_HEAD(head); 1595 } 1596 config = strdup(str); 1597 if (!config) 1598 goto out_err; 1599 1600 if (parse_events_term__num(&term, 1601 PARSE_EVENTS__TERM_TYPE_USER, 1602 config, 1, false, &config, 1603 NULL) < 0) { 1604 free(config); 1605 goto out_err; 1606 } 1607 list_add_tail(&term->list, head); 1608 1609 /* Add it for all PMUs that support the alias */ 1610 list = malloc(sizeof(struct list_head)); 1611 if (!list) 1612 goto out_err; 1613 1614 INIT_LIST_HEAD(list); 1615 1616 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 1617 struct perf_pmu_alias *alias; 1618 1619 list_for_each_entry(alias, &pmu->aliases, list) { 1620 if (!strcasecmp(alias->name, str)) { 1621 parse_events_copy_term_list(head, &orig_head); 1622 if (!parse_events_add_pmu(parse_state, list, 1623 pmu->name, orig_head, 1624 true, true)) { 1625 pr_debug("%s -> %s/%s/\n", str, 1626 pmu->name, alias->str); 1627 ok++; 1628 } 1629 parse_events_terms__delete(orig_head); 1630 } 1631 } 1632 } 1633 1634 if (parse_state->fake_pmu) { 1635 if (!parse_events_add_pmu(parse_state, list, str, head, 1636 true, true)) { 1637 pr_debug("%s -> %s/%s/\n", str, "fake_pmu", str); 1638 ok++; 1639 } 1640 } 1641 1642 out_err: 1643 if (ok) 1644 *listp = list; 1645 else 1646 free(list); 1647 1648 parse_events_terms__delete(head); 1649 return ok ? 0 : -1; 1650 } 1651 1652 int parse_events__modifier_group(struct list_head *list, 1653 char *event_mod) 1654 { 1655 return parse_events__modifier_event(list, event_mod, true); 1656 } 1657 1658 /* 1659 * Check if the two uncore PMUs are from the same uncore block 1660 * The format of the uncore PMU name is uncore_#blockname_#pmuidx 1661 */ 1662 static bool is_same_uncore_block(const char *pmu_name_a, const char *pmu_name_b) 1663 { 1664 char *end_a, *end_b; 1665 1666 end_a = strrchr(pmu_name_a, '_'); 1667 end_b = strrchr(pmu_name_b, '_'); 1668 1669 if (!end_a || !end_b) 1670 return false; 1671 1672 if ((end_a - pmu_name_a) != (end_b - pmu_name_b)) 1673 return false; 1674 1675 return (strncmp(pmu_name_a, pmu_name_b, end_a - pmu_name_a) == 0); 1676 } 1677 1678 static int 1679 parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list, 1680 struct parse_events_state *parse_state) 1681 { 1682 struct evsel *evsel, *leader; 1683 uintptr_t *leaders; 1684 bool is_leader = true; 1685 int i, nr_pmu = 0, total_members, ret = 0; 1686 1687 leader = list_first_entry(list, struct evsel, core.node); 1688 evsel = list_last_entry(list, struct evsel, core.node); 1689 total_members = evsel->core.idx - leader->core.idx + 1; 1690 1691 leaders = calloc(total_members, sizeof(uintptr_t)); 1692 if (WARN_ON(!leaders)) 1693 return 0; 1694 1695 /* 1696 * Going through the whole group and doing sanity check. 1697 * All members must use alias, and be from the same uncore block. 1698 * Also, storing the leader events in an array. 1699 */ 1700 __evlist__for_each_entry(list, evsel) { 1701 1702 /* Only split the uncore group which members use alias */ 1703 if (!evsel->use_uncore_alias) 1704 goto out; 1705 1706 /* The events must be from the same uncore block */ 1707 if (!is_same_uncore_block(leader->pmu_name, evsel->pmu_name)) 1708 goto out; 1709 1710 if (!is_leader) 1711 continue; 1712 /* 1713 * If the event's PMU name starts to repeat, it must be a new 1714 * event. That can be used to distinguish the leader from 1715 * other members, even they have the same event name. 1716 */ 1717 if ((leader != evsel) && 1718 !strcmp(leader->pmu_name, evsel->pmu_name)) { 1719 is_leader = false; 1720 continue; 1721 } 1722 1723 /* Store the leader event for each PMU */ 1724 leaders[nr_pmu++] = (uintptr_t) evsel; 1725 } 1726 1727 /* only one event alias */ 1728 if (nr_pmu == total_members) { 1729 parse_state->nr_groups--; 1730 goto handled; 1731 } 1732 1733 /* 1734 * An uncore event alias is a joint name which means the same event 1735 * runs on all PMUs of a block. 1736 * Perf doesn't support mixed events from different PMUs in the same 1737 * group. The big group has to be split into multiple small groups 1738 * which only include the events from the same PMU. 1739 * 1740 * Here the uncore event aliases must be from the same uncore block. 1741 * The number of PMUs must be same for each alias. The number of new 1742 * small groups equals to the number of PMUs. 1743 * Setting the leader event for corresponding members in each group. 1744 */ 1745 i = 0; 1746 __evlist__for_each_entry(list, evsel) { 1747 if (i >= nr_pmu) 1748 i = 0; 1749 evsel__set_leader(evsel, (struct evsel *) leaders[i++]); 1750 } 1751 1752 /* The number of members and group name are same for each group */ 1753 for (i = 0; i < nr_pmu; i++) { 1754 evsel = (struct evsel *) leaders[i]; 1755 evsel->core.nr_members = total_members / nr_pmu; 1756 evsel->group_name = name ? strdup(name) : NULL; 1757 } 1758 1759 /* Take the new small groups into account */ 1760 parse_state->nr_groups += nr_pmu - 1; 1761 1762 handled: 1763 ret = 1; 1764 out: 1765 free(leaders); 1766 return ret; 1767 } 1768 1769 __weak struct evsel *arch_evlist__leader(struct list_head *list) 1770 { 1771 return list_first_entry(list, struct evsel, core.node); 1772 } 1773 1774 void parse_events__set_leader(char *name, struct list_head *list, 1775 struct parse_events_state *parse_state) 1776 { 1777 struct evsel *leader; 1778 1779 if (list_empty(list)) { 1780 WARN_ONCE(true, "WARNING: failed to set leader: empty list"); 1781 return; 1782 } 1783 1784 if (parse_events__set_leader_for_uncore_aliase(name, list, parse_state)) 1785 return; 1786 1787 leader = arch_evlist__leader(list); 1788 __perf_evlist__set_leader(list, &leader->core); 1789 leader->group_name = name ? strdup(name) : NULL; 1790 list_move(&leader->core.node, list); 1791 } 1792 1793 /* list_event is assumed to point to malloc'ed memory */ 1794 void parse_events_update_lists(struct list_head *list_event, 1795 struct list_head *list_all) 1796 { 1797 /* 1798 * Called for single event definition. Update the 1799 * 'all event' list, and reinit the 'single event' 1800 * list, for next event definition. 1801 */ 1802 list_splice_tail(list_event, list_all); 1803 free(list_event); 1804 } 1805 1806 struct event_modifier { 1807 int eu; 1808 int ek; 1809 int eh; 1810 int eH; 1811 int eG; 1812 int eI; 1813 int precise; 1814 int precise_max; 1815 int exclude_GH; 1816 int sample_read; 1817 int pinned; 1818 int weak; 1819 int exclusive; 1820 int bpf_counter; 1821 }; 1822 1823 static int get_event_modifier(struct event_modifier *mod, char *str, 1824 struct evsel *evsel) 1825 { 1826 int eu = evsel ? evsel->core.attr.exclude_user : 0; 1827 int ek = evsel ? evsel->core.attr.exclude_kernel : 0; 1828 int eh = evsel ? evsel->core.attr.exclude_hv : 0; 1829 int eH = evsel ? evsel->core.attr.exclude_host : 0; 1830 int eG = evsel ? evsel->core.attr.exclude_guest : 0; 1831 int eI = evsel ? evsel->core.attr.exclude_idle : 0; 1832 int precise = evsel ? evsel->core.attr.precise_ip : 0; 1833 int precise_max = 0; 1834 int sample_read = 0; 1835 int pinned = evsel ? evsel->core.attr.pinned : 0; 1836 int exclusive = evsel ? evsel->core.attr.exclusive : 0; 1837 1838 int exclude = eu | ek | eh; 1839 int exclude_GH = evsel ? evsel->exclude_GH : 0; 1840 int weak = 0; 1841 int bpf_counter = 0; 1842 1843 memset(mod, 0, sizeof(*mod)); 1844 1845 while (*str) { 1846 if (*str == 'u') { 1847 if (!exclude) 1848 exclude = eu = ek = eh = 1; 1849 if (!exclude_GH && !perf_guest) 1850 eG = 1; 1851 eu = 0; 1852 } else if (*str == 'k') { 1853 if (!exclude) 1854 exclude = eu = ek = eh = 1; 1855 ek = 0; 1856 } else if (*str == 'h') { 1857 if (!exclude) 1858 exclude = eu = ek = eh = 1; 1859 eh = 0; 1860 } else if (*str == 'G') { 1861 if (!exclude_GH) 1862 exclude_GH = eG = eH = 1; 1863 eG = 0; 1864 } else if (*str == 'H') { 1865 if (!exclude_GH) 1866 exclude_GH = eG = eH = 1; 1867 eH = 0; 1868 } else if (*str == 'I') { 1869 eI = 1; 1870 } else if (*str == 'p') { 1871 precise++; 1872 /* use of precise requires exclude_guest */ 1873 if (!exclude_GH) 1874 eG = 1; 1875 } else if (*str == 'P') { 1876 precise_max = 1; 1877 } else if (*str == 'S') { 1878 sample_read = 1; 1879 } else if (*str == 'D') { 1880 pinned = 1; 1881 } else if (*str == 'e') { 1882 exclusive = 1; 1883 } else if (*str == 'W') { 1884 weak = 1; 1885 } else if (*str == 'b') { 1886 bpf_counter = 1; 1887 } else 1888 break; 1889 1890 ++str; 1891 } 1892 1893 /* 1894 * precise ip: 1895 * 1896 * 0 - SAMPLE_IP can have arbitrary skid 1897 * 1 - SAMPLE_IP must have constant skid 1898 * 2 - SAMPLE_IP requested to have 0 skid 1899 * 3 - SAMPLE_IP must have 0 skid 1900 * 1901 * See also PERF_RECORD_MISC_EXACT_IP 1902 */ 1903 if (precise > 3) 1904 return -EINVAL; 1905 1906 mod->eu = eu; 1907 mod->ek = ek; 1908 mod->eh = eh; 1909 mod->eH = eH; 1910 mod->eG = eG; 1911 mod->eI = eI; 1912 mod->precise = precise; 1913 mod->precise_max = precise_max; 1914 mod->exclude_GH = exclude_GH; 1915 mod->sample_read = sample_read; 1916 mod->pinned = pinned; 1917 mod->weak = weak; 1918 mod->bpf_counter = bpf_counter; 1919 mod->exclusive = exclusive; 1920 1921 return 0; 1922 } 1923 1924 /* 1925 * Basic modifier sanity check to validate it contains only one 1926 * instance of any modifier (apart from 'p') present. 1927 */ 1928 static int check_modifier(char *str) 1929 { 1930 char *p = str; 1931 1932 /* The sizeof includes 0 byte as well. */ 1933 if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1)) 1934 return -1; 1935 1936 while (*p) { 1937 if (*p != 'p' && strchr(p + 1, *p)) 1938 return -1; 1939 p++; 1940 } 1941 1942 return 0; 1943 } 1944 1945 int parse_events__modifier_event(struct list_head *list, char *str, bool add) 1946 { 1947 struct evsel *evsel; 1948 struct event_modifier mod; 1949 1950 if (str == NULL) 1951 return 0; 1952 1953 if (check_modifier(str)) 1954 return -EINVAL; 1955 1956 if (!add && get_event_modifier(&mod, str, NULL)) 1957 return -EINVAL; 1958 1959 __evlist__for_each_entry(list, evsel) { 1960 if (add && get_event_modifier(&mod, str, evsel)) 1961 return -EINVAL; 1962 1963 evsel->core.attr.exclude_user = mod.eu; 1964 evsel->core.attr.exclude_kernel = mod.ek; 1965 evsel->core.attr.exclude_hv = mod.eh; 1966 evsel->core.attr.precise_ip = mod.precise; 1967 evsel->core.attr.exclude_host = mod.eH; 1968 evsel->core.attr.exclude_guest = mod.eG; 1969 evsel->core.attr.exclude_idle = mod.eI; 1970 evsel->exclude_GH = mod.exclude_GH; 1971 evsel->sample_read = mod.sample_read; 1972 evsel->precise_max = mod.precise_max; 1973 evsel->weak_group = mod.weak; 1974 evsel->bpf_counter = mod.bpf_counter; 1975 1976 if (evsel__is_group_leader(evsel)) { 1977 evsel->core.attr.pinned = mod.pinned; 1978 evsel->core.attr.exclusive = mod.exclusive; 1979 } 1980 } 1981 1982 return 0; 1983 } 1984 1985 int parse_events_name(struct list_head *list, const char *name) 1986 { 1987 struct evsel *evsel; 1988 1989 __evlist__for_each_entry(list, evsel) { 1990 if (!evsel->name) 1991 evsel->name = strdup(name); 1992 } 1993 1994 return 0; 1995 } 1996 1997 static int 1998 comp_pmu(const void *p1, const void *p2) 1999 { 2000 struct perf_pmu_event_symbol *pmu1 = (struct perf_pmu_event_symbol *) p1; 2001 struct perf_pmu_event_symbol *pmu2 = (struct perf_pmu_event_symbol *) p2; 2002 2003 return strcasecmp(pmu1->symbol, pmu2->symbol); 2004 } 2005 2006 static void perf_pmu__parse_cleanup(void) 2007 { 2008 if (perf_pmu_events_list_num > 0) { 2009 struct perf_pmu_event_symbol *p; 2010 int i; 2011 2012 for (i = 0; i < perf_pmu_events_list_num; i++) { 2013 p = perf_pmu_events_list + i; 2014 zfree(&p->symbol); 2015 } 2016 zfree(&perf_pmu_events_list); 2017 perf_pmu_events_list_num = 0; 2018 } 2019 } 2020 2021 #define SET_SYMBOL(str, stype) \ 2022 do { \ 2023 p->symbol = str; \ 2024 if (!p->symbol) \ 2025 goto err; \ 2026 p->type = stype; \ 2027 } while (0) 2028 2029 /* 2030 * Read the pmu events list from sysfs 2031 * Save it into perf_pmu_events_list 2032 */ 2033 static void perf_pmu__parse_init(void) 2034 { 2035 2036 struct perf_pmu *pmu = NULL; 2037 struct perf_pmu_alias *alias; 2038 int len = 0; 2039 2040 pmu = NULL; 2041 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 2042 list_for_each_entry(alias, &pmu->aliases, list) { 2043 char *tmp = strchr(alias->name, '-'); 2044 2045 if (tmp) { 2046 char *tmp2 = NULL; 2047 2048 tmp2 = strchr(tmp + 1, '-'); 2049 len++; 2050 if (tmp2) 2051 len++; 2052 } 2053 2054 len++; 2055 } 2056 } 2057 2058 if (len == 0) { 2059 perf_pmu_events_list_num = -1; 2060 return; 2061 } 2062 perf_pmu_events_list = malloc(sizeof(struct perf_pmu_event_symbol) * len); 2063 if (!perf_pmu_events_list) 2064 return; 2065 perf_pmu_events_list_num = len; 2066 2067 len = 0; 2068 pmu = NULL; 2069 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 2070 list_for_each_entry(alias, &pmu->aliases, list) { 2071 struct perf_pmu_event_symbol *p = perf_pmu_events_list + len; 2072 char *tmp = strchr(alias->name, '-'); 2073 char *tmp2 = NULL; 2074 2075 if (tmp) 2076 tmp2 = strchr(tmp + 1, '-'); 2077 if (tmp2) { 2078 SET_SYMBOL(strndup(alias->name, tmp - alias->name), 2079 PMU_EVENT_SYMBOL_PREFIX); 2080 p++; 2081 tmp++; 2082 SET_SYMBOL(strndup(tmp, tmp2 - tmp), PMU_EVENT_SYMBOL_SUFFIX); 2083 p++; 2084 SET_SYMBOL(strdup(++tmp2), PMU_EVENT_SYMBOL_SUFFIX2); 2085 len += 3; 2086 } else if (tmp) { 2087 SET_SYMBOL(strndup(alias->name, tmp - alias->name), 2088 PMU_EVENT_SYMBOL_PREFIX); 2089 p++; 2090 SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX); 2091 len += 2; 2092 } else { 2093 SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL); 2094 len++; 2095 } 2096 } 2097 } 2098 qsort(perf_pmu_events_list, len, 2099 sizeof(struct perf_pmu_event_symbol), comp_pmu); 2100 2101 return; 2102 err: 2103 perf_pmu__parse_cleanup(); 2104 } 2105 2106 /* 2107 * This function injects special term in 2108 * perf_pmu_events_list so the test code 2109 * can check on this functionality. 2110 */ 2111 int perf_pmu__test_parse_init(void) 2112 { 2113 struct perf_pmu_event_symbol *list, *tmp, symbols[] = { 2114 {(char *)"read", PMU_EVENT_SYMBOL}, 2115 {(char *)"event", PMU_EVENT_SYMBOL_PREFIX}, 2116 {(char *)"two", PMU_EVENT_SYMBOL_SUFFIX}, 2117 {(char *)"hyphen", PMU_EVENT_SYMBOL_SUFFIX}, 2118 {(char *)"hyph", PMU_EVENT_SYMBOL_SUFFIX2}, 2119 }; 2120 unsigned long i, j; 2121 2122 tmp = list = malloc(sizeof(*list) * ARRAY_SIZE(symbols)); 2123 if (!list) 2124 return -ENOMEM; 2125 2126 for (i = 0; i < ARRAY_SIZE(symbols); i++, tmp++) { 2127 tmp->type = symbols[i].type; 2128 tmp->symbol = strdup(symbols[i].symbol); 2129 if (!tmp->symbol) 2130 goto err_free; 2131 } 2132 2133 perf_pmu_events_list = list; 2134 perf_pmu_events_list_num = ARRAY_SIZE(symbols); 2135 2136 qsort(perf_pmu_events_list, ARRAY_SIZE(symbols), 2137 sizeof(struct perf_pmu_event_symbol), comp_pmu); 2138 return 0; 2139 2140 err_free: 2141 for (j = 0, tmp = list; j < i; j++, tmp++) 2142 free(tmp->symbol); 2143 free(list); 2144 return -ENOMEM; 2145 } 2146 2147 enum perf_pmu_event_symbol_type 2148 perf_pmu__parse_check(const char *name) 2149 { 2150 struct perf_pmu_event_symbol p, *r; 2151 2152 /* scan kernel pmu events from sysfs if needed */ 2153 if (perf_pmu_events_list_num == 0) 2154 perf_pmu__parse_init(); 2155 /* 2156 * name "cpu" could be prefix of cpu-cycles or cpu// events. 2157 * cpu-cycles has been handled by hardcode. 2158 * So it must be cpu// events, not kernel pmu event. 2159 */ 2160 if ((perf_pmu_events_list_num <= 0) || !strcmp(name, "cpu")) 2161 return PMU_EVENT_SYMBOL_ERR; 2162 2163 p.symbol = strdup(name); 2164 r = bsearch(&p, perf_pmu_events_list, 2165 (size_t) perf_pmu_events_list_num, 2166 sizeof(struct perf_pmu_event_symbol), comp_pmu); 2167 zfree(&p.symbol); 2168 return r ? r->type : PMU_EVENT_SYMBOL_ERR; 2169 } 2170 2171 static int parse_events__scanner(const char *str, 2172 struct parse_events_state *parse_state) 2173 { 2174 YY_BUFFER_STATE buffer; 2175 void *scanner; 2176 int ret; 2177 2178 ret = parse_events_lex_init_extra(parse_state, &scanner); 2179 if (ret) 2180 return ret; 2181 2182 buffer = parse_events__scan_string(str, scanner); 2183 2184 #ifdef PARSER_DEBUG 2185 parse_events_debug = 1; 2186 parse_events_set_debug(1, scanner); 2187 #endif 2188 ret = parse_events_parse(parse_state, scanner); 2189 2190 parse_events__flush_buffer(buffer, scanner); 2191 parse_events__delete_buffer(buffer, scanner); 2192 parse_events_lex_destroy(scanner); 2193 return ret; 2194 } 2195 2196 /* 2197 * parse event config string, return a list of event terms. 2198 */ 2199 int parse_events_terms(struct list_head *terms, const char *str) 2200 { 2201 struct parse_events_state parse_state = { 2202 .terms = NULL, 2203 .stoken = PE_START_TERMS, 2204 }; 2205 int ret; 2206 2207 ret = parse_events__scanner(str, &parse_state); 2208 perf_pmu__parse_cleanup(); 2209 2210 if (!ret) { 2211 list_splice(parse_state.terms, terms); 2212 zfree(&parse_state.terms); 2213 return 0; 2214 } 2215 2216 parse_events_terms__delete(parse_state.terms); 2217 return ret; 2218 } 2219 2220 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state, 2221 const char *str, char *pmu_name, 2222 struct list_head *list) 2223 { 2224 struct parse_events_state ps = { 2225 .list = LIST_HEAD_INIT(ps.list), 2226 .stoken = PE_START_EVENTS, 2227 .hybrid_pmu_name = pmu_name, 2228 .idx = parse_state->idx, 2229 }; 2230 int ret; 2231 2232 ret = parse_events__scanner(str, &ps); 2233 perf_pmu__parse_cleanup(); 2234 2235 if (!ret) { 2236 if (!list_empty(&ps.list)) { 2237 list_splice(&ps.list, list); 2238 parse_state->idx = ps.idx; 2239 return 0; 2240 } else 2241 return -1; 2242 } 2243 2244 return ret; 2245 } 2246 2247 int __parse_events(struct evlist *evlist, const char *str, 2248 struct parse_events_error *err, struct perf_pmu *fake_pmu) 2249 { 2250 struct parse_events_state parse_state = { 2251 .list = LIST_HEAD_INIT(parse_state.list), 2252 .idx = evlist->core.nr_entries, 2253 .error = err, 2254 .evlist = evlist, 2255 .stoken = PE_START_EVENTS, 2256 .fake_pmu = fake_pmu, 2257 }; 2258 int ret; 2259 2260 ret = parse_events__scanner(str, &parse_state); 2261 perf_pmu__parse_cleanup(); 2262 2263 if (!ret && list_empty(&parse_state.list)) { 2264 WARN_ONCE(true, "WARNING: event parser found nothing\n"); 2265 return -1; 2266 } 2267 2268 /* 2269 * Add list to the evlist even with errors to allow callers to clean up. 2270 */ 2271 evlist__splice_list_tail(evlist, &parse_state.list); 2272 2273 if (!ret) { 2274 struct evsel *last; 2275 2276 evlist->core.nr_groups += parse_state.nr_groups; 2277 last = evlist__last(evlist); 2278 last->cmdline_group_boundary = true; 2279 2280 return 0; 2281 } 2282 2283 /* 2284 * There are 2 users - builtin-record and builtin-test objects. 2285 * Both call evlist__delete in case of error, so we dont 2286 * need to bother. 2287 */ 2288 return ret; 2289 } 2290 2291 int parse_event(struct evlist *evlist, const char *str) 2292 { 2293 struct parse_events_error err; 2294 int ret; 2295 2296 parse_events_error__init(&err); 2297 ret = parse_events(evlist, str, &err); 2298 parse_events_error__exit(&err); 2299 return ret; 2300 } 2301 2302 void parse_events_error__init(struct parse_events_error *err) 2303 { 2304 bzero(err, sizeof(*err)); 2305 } 2306 2307 void parse_events_error__exit(struct parse_events_error *err) 2308 { 2309 zfree(&err->str); 2310 zfree(&err->help); 2311 zfree(&err->first_str); 2312 zfree(&err->first_help); 2313 } 2314 2315 void parse_events_error__handle(struct parse_events_error *err, int idx, 2316 char *str, char *help) 2317 { 2318 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) 2319 goto out_free; 2320 switch (err->num_errors) { 2321 case 0: 2322 err->idx = idx; 2323 err->str = str; 2324 err->help = help; 2325 break; 2326 case 1: 2327 err->first_idx = err->idx; 2328 err->idx = idx; 2329 err->first_str = err->str; 2330 err->str = str; 2331 err->first_help = err->help; 2332 err->help = help; 2333 break; 2334 default: 2335 pr_debug("Multiple errors dropping message: %s (%s)\n", 2336 err->str, err->help); 2337 free(err->str); 2338 err->str = str; 2339 free(err->help); 2340 err->help = help; 2341 break; 2342 } 2343 err->num_errors++; 2344 return; 2345 2346 out_free: 2347 free(str); 2348 free(help); 2349 } 2350 2351 #define MAX_WIDTH 1000 2352 static int get_term_width(void) 2353 { 2354 struct winsize ws; 2355 2356 get_term_dimensions(&ws); 2357 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; 2358 } 2359 2360 static void __parse_events_error__print(int err_idx, const char *err_str, 2361 const char *err_help, const char *event) 2362 { 2363 const char *str = "invalid or unsupported event: "; 2364 char _buf[MAX_WIDTH]; 2365 char *buf = (char *) event; 2366 int idx = 0; 2367 if (err_str) { 2368 /* -2 for extra '' in the final fprintf */ 2369 int width = get_term_width() - 2; 2370 int len_event = strlen(event); 2371 int len_str, max_len, cut = 0; 2372 2373 /* 2374 * Maximum error index indent, we will cut 2375 * the event string if it's bigger. 2376 */ 2377 int max_err_idx = 13; 2378 2379 /* 2380 * Let's be specific with the message when 2381 * we have the precise error. 2382 */ 2383 str = "event syntax error: "; 2384 len_str = strlen(str); 2385 max_len = width - len_str; 2386 2387 buf = _buf; 2388 2389 /* We're cutting from the beginning. */ 2390 if (err_idx > max_err_idx) 2391 cut = err_idx - max_err_idx; 2392 2393 strncpy(buf, event + cut, max_len); 2394 2395 /* Mark cut parts with '..' on both sides. */ 2396 if (cut) 2397 buf[0] = buf[1] = '.'; 2398 2399 if ((len_event - cut) > max_len) { 2400 buf[max_len - 1] = buf[max_len - 2] = '.'; 2401 buf[max_len] = 0; 2402 } 2403 2404 idx = len_str + err_idx - cut; 2405 } 2406 2407 fprintf(stderr, "%s'%s'\n", str, buf); 2408 if (idx) { 2409 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str); 2410 if (err_help) 2411 fprintf(stderr, "\n%s\n", err_help); 2412 } 2413 } 2414 2415 void parse_events_error__print(struct parse_events_error *err, 2416 const char *event) 2417 { 2418 if (!err->num_errors) 2419 return; 2420 2421 __parse_events_error__print(err->idx, err->str, err->help, event); 2422 2423 if (err->num_errors > 1) { 2424 fputs("\nInitial error:\n", stderr); 2425 __parse_events_error__print(err->first_idx, err->first_str, 2426 err->first_help, event); 2427 } 2428 } 2429 2430 #undef MAX_WIDTH 2431 2432 int parse_events_option(const struct option *opt, const char *str, 2433 int unset __maybe_unused) 2434 { 2435 struct evlist *evlist = *(struct evlist **)opt->value; 2436 struct parse_events_error err; 2437 int ret; 2438 2439 parse_events_error__init(&err); 2440 ret = parse_events(evlist, str, &err); 2441 2442 if (ret) { 2443 parse_events_error__print(&err, str); 2444 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 2445 } 2446 parse_events_error__exit(&err); 2447 2448 return ret; 2449 } 2450 2451 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset) 2452 { 2453 struct evlist **evlistp = opt->value; 2454 int ret; 2455 2456 if (*evlistp == NULL) { 2457 *evlistp = evlist__new(); 2458 2459 if (*evlistp == NULL) { 2460 fprintf(stderr, "Not enough memory to create evlist\n"); 2461 return -1; 2462 } 2463 } 2464 2465 ret = parse_events_option(opt, str, unset); 2466 if (ret) { 2467 evlist__delete(*evlistp); 2468 *evlistp = NULL; 2469 } 2470 2471 return ret; 2472 } 2473 2474 static int 2475 foreach_evsel_in_last_glob(struct evlist *evlist, 2476 int (*func)(struct evsel *evsel, 2477 const void *arg), 2478 const void *arg) 2479 { 2480 struct evsel *last = NULL; 2481 int err; 2482 2483 /* 2484 * Don't return when list_empty, give func a chance to report 2485 * error when it found last == NULL. 2486 * 2487 * So no need to WARN here, let *func do this. 2488 */ 2489 if (evlist->core.nr_entries > 0) 2490 last = evlist__last(evlist); 2491 2492 do { 2493 err = (*func)(last, arg); 2494 if (err) 2495 return -1; 2496 if (!last) 2497 return 0; 2498 2499 if (last->core.node.prev == &evlist->core.entries) 2500 return 0; 2501 last = list_entry(last->core.node.prev, struct evsel, core.node); 2502 } while (!last->cmdline_group_boundary); 2503 2504 return 0; 2505 } 2506 2507 static int set_filter(struct evsel *evsel, const void *arg) 2508 { 2509 const char *str = arg; 2510 bool found = false; 2511 int nr_addr_filters = 0; 2512 struct perf_pmu *pmu = NULL; 2513 2514 if (evsel == NULL) { 2515 fprintf(stderr, 2516 "--filter option should follow a -e tracepoint or HW tracer option\n"); 2517 return -1; 2518 } 2519 2520 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 2521 if (evsel__append_tp_filter(evsel, str) < 0) { 2522 fprintf(stderr, 2523 "not enough memory to hold filter string\n"); 2524 return -1; 2525 } 2526 2527 return 0; 2528 } 2529 2530 while ((pmu = perf_pmu__scan(pmu)) != NULL) 2531 if (pmu->type == evsel->core.attr.type) { 2532 found = true; 2533 break; 2534 } 2535 2536 if (found) 2537 perf_pmu__scan_file(pmu, "nr_addr_filters", 2538 "%d", &nr_addr_filters); 2539 2540 if (!nr_addr_filters) { 2541 fprintf(stderr, 2542 "This CPU does not support address filtering\n"); 2543 return -1; 2544 } 2545 2546 if (evsel__append_addr_filter(evsel, str) < 0) { 2547 fprintf(stderr, 2548 "not enough memory to hold filter string\n"); 2549 return -1; 2550 } 2551 2552 return 0; 2553 } 2554 2555 int parse_filter(const struct option *opt, const char *str, 2556 int unset __maybe_unused) 2557 { 2558 struct evlist *evlist = *(struct evlist **)opt->value; 2559 2560 return foreach_evsel_in_last_glob(evlist, set_filter, 2561 (const void *)str); 2562 } 2563 2564 static int add_exclude_perf_filter(struct evsel *evsel, 2565 const void *arg __maybe_unused) 2566 { 2567 char new_filter[64]; 2568 2569 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2570 fprintf(stderr, 2571 "--exclude-perf option should follow a -e tracepoint option\n"); 2572 return -1; 2573 } 2574 2575 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid()); 2576 2577 if (evsel__append_tp_filter(evsel, new_filter) < 0) { 2578 fprintf(stderr, 2579 "not enough memory to hold filter string\n"); 2580 return -1; 2581 } 2582 2583 return 0; 2584 } 2585 2586 int exclude_perf(const struct option *opt, 2587 const char *arg __maybe_unused, 2588 int unset __maybe_unused) 2589 { 2590 struct evlist *evlist = *(struct evlist **)opt->value; 2591 2592 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter, 2593 NULL); 2594 } 2595 2596 int parse_events__is_hardcoded_term(struct parse_events_term *term) 2597 { 2598 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; 2599 } 2600 2601 static int new_term(struct parse_events_term **_term, 2602 struct parse_events_term *temp, 2603 char *str, u64 num) 2604 { 2605 struct parse_events_term *term; 2606 2607 term = malloc(sizeof(*term)); 2608 if (!term) 2609 return -ENOMEM; 2610 2611 *term = *temp; 2612 INIT_LIST_HEAD(&term->list); 2613 term->weak = false; 2614 2615 switch (term->type_val) { 2616 case PARSE_EVENTS__TERM_TYPE_NUM: 2617 term->val.num = num; 2618 break; 2619 case PARSE_EVENTS__TERM_TYPE_STR: 2620 term->val.str = str; 2621 break; 2622 default: 2623 free(term); 2624 return -EINVAL; 2625 } 2626 2627 *_term = term; 2628 return 0; 2629 } 2630 2631 int parse_events_term__num(struct parse_events_term **term, 2632 int type_term, char *config, u64 num, 2633 bool no_value, 2634 void *loc_term_, void *loc_val_) 2635 { 2636 YYLTYPE *loc_term = loc_term_; 2637 YYLTYPE *loc_val = loc_val_; 2638 2639 struct parse_events_term temp = { 2640 .type_val = PARSE_EVENTS__TERM_TYPE_NUM, 2641 .type_term = type_term, 2642 .config = config ? : strdup(config_term_names[type_term]), 2643 .no_value = no_value, 2644 .err_term = loc_term ? loc_term->first_column : 0, 2645 .err_val = loc_val ? loc_val->first_column : 0, 2646 }; 2647 2648 return new_term(term, &temp, NULL, num); 2649 } 2650 2651 int parse_events_term__str(struct parse_events_term **term, 2652 int type_term, char *config, char *str, 2653 void *loc_term_, void *loc_val_) 2654 { 2655 YYLTYPE *loc_term = loc_term_; 2656 YYLTYPE *loc_val = loc_val_; 2657 2658 struct parse_events_term temp = { 2659 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2660 .type_term = type_term, 2661 .config = config, 2662 .err_term = loc_term ? loc_term->first_column : 0, 2663 .err_val = loc_val ? loc_val->first_column : 0, 2664 }; 2665 2666 return new_term(term, &temp, str, 0); 2667 } 2668 2669 int parse_events_term__sym_hw(struct parse_events_term **term, 2670 char *config, unsigned idx) 2671 { 2672 struct event_symbol *sym; 2673 char *str; 2674 struct parse_events_term temp = { 2675 .type_val = PARSE_EVENTS__TERM_TYPE_STR, 2676 .type_term = PARSE_EVENTS__TERM_TYPE_USER, 2677 .config = config, 2678 }; 2679 2680 if (!temp.config) { 2681 temp.config = strdup("event"); 2682 if (!temp.config) 2683 return -ENOMEM; 2684 } 2685 BUG_ON(idx >= PERF_COUNT_HW_MAX); 2686 sym = &event_symbols_hw[idx]; 2687 2688 str = strdup(sym->symbol); 2689 if (!str) 2690 return -ENOMEM; 2691 return new_term(term, &temp, str, 0); 2692 } 2693 2694 int parse_events_term__clone(struct parse_events_term **new, 2695 struct parse_events_term *term) 2696 { 2697 char *str; 2698 struct parse_events_term temp = { 2699 .type_val = term->type_val, 2700 .type_term = term->type_term, 2701 .config = NULL, 2702 .err_term = term->err_term, 2703 .err_val = term->err_val, 2704 }; 2705 2706 if (term->config) { 2707 temp.config = strdup(term->config); 2708 if (!temp.config) 2709 return -ENOMEM; 2710 } 2711 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) 2712 return new_term(new, &temp, NULL, term->val.num); 2713 2714 str = strdup(term->val.str); 2715 if (!str) 2716 return -ENOMEM; 2717 return new_term(new, &temp, str, 0); 2718 } 2719 2720 void parse_events_term__delete(struct parse_events_term *term) 2721 { 2722 if (term->array.nr_ranges) 2723 zfree(&term->array.ranges); 2724 2725 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) 2726 zfree(&term->val.str); 2727 2728 zfree(&term->config); 2729 free(term); 2730 } 2731 2732 int parse_events_copy_term_list(struct list_head *old, 2733 struct list_head **new) 2734 { 2735 struct parse_events_term *term, *n; 2736 int ret; 2737 2738 if (!old) { 2739 *new = NULL; 2740 return 0; 2741 } 2742 2743 *new = malloc(sizeof(struct list_head)); 2744 if (!*new) 2745 return -ENOMEM; 2746 INIT_LIST_HEAD(*new); 2747 2748 list_for_each_entry (term, old, list) { 2749 ret = parse_events_term__clone(&n, term); 2750 if (ret) 2751 return ret; 2752 list_add_tail(&n->list, *new); 2753 } 2754 return 0; 2755 } 2756 2757 void parse_events_terms__purge(struct list_head *terms) 2758 { 2759 struct parse_events_term *term, *h; 2760 2761 list_for_each_entry_safe(term, h, terms, list) { 2762 list_del_init(&term->list); 2763 parse_events_term__delete(term); 2764 } 2765 } 2766 2767 void parse_events_terms__delete(struct list_head *terms) 2768 { 2769 if (!terms) 2770 return; 2771 parse_events_terms__purge(terms); 2772 free(terms); 2773 } 2774 2775 void parse_events__clear_array(struct parse_events_array *a) 2776 { 2777 zfree(&a->ranges); 2778 } 2779 2780 void parse_events_evlist_error(struct parse_events_state *parse_state, 2781 int idx, const char *str) 2782 { 2783 if (!parse_state->error) 2784 return; 2785 2786 parse_events_error__handle(parse_state->error, idx, strdup(str), NULL); 2787 } 2788 2789 static void config_terms_list(char *buf, size_t buf_sz) 2790 { 2791 int i; 2792 bool first = true; 2793 2794 buf[0] = '\0'; 2795 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) { 2796 const char *name = config_term_names[i]; 2797 2798 if (!config_term_avail(i, NULL)) 2799 continue; 2800 if (!name) 2801 continue; 2802 if (name[0] == '<') 2803 continue; 2804 2805 if (strlen(buf) + strlen(name) + 2 >= buf_sz) 2806 return; 2807 2808 if (!first) 2809 strcat(buf, ","); 2810 else 2811 first = false; 2812 strcat(buf, name); 2813 } 2814 } 2815 2816 /* 2817 * Return string contains valid config terms of an event. 2818 * @additional_terms: For terms such as PMU sysfs terms. 2819 */ 2820 char *parse_events_formats_error_string(char *additional_terms) 2821 { 2822 char *str; 2823 /* "no-overwrite" is the longest name */ 2824 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR * 2825 (sizeof("no-overwrite") - 1)]; 2826 2827 config_terms_list(static_terms, sizeof(static_terms)); 2828 /* valid terms */ 2829 if (additional_terms) { 2830 if (asprintf(&str, "valid terms: %s,%s", 2831 additional_terms, static_terms) < 0) 2832 goto fail; 2833 } else { 2834 if (asprintf(&str, "valid terms: %s", static_terms) < 0) 2835 goto fail; 2836 } 2837 return str; 2838 2839 fail: 2840 return NULL; 2841 } 2842 2843 struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx, 2844 struct perf_event_attr *attr, 2845 const char *name, 2846 const char *metric_id, 2847 struct perf_pmu *pmu, 2848 struct list_head *config_terms) 2849 { 2850 return __add_event(list, idx, attr, /*init_attr=*/true, name, metric_id, 2851 pmu, config_terms, /*auto_merge_stats=*/false, 2852 /*cpu_list=*/NULL); 2853 } 2854