1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-ftrace.c 4 * 5 * Copyright (c) 2013 LG Electronics, Namhyung Kim <namhyung@kernel.org> 6 * Copyright (c) 2020 Changbin Du <changbin.du@gmail.com>, significant enhancement. 7 */ 8 9 #include "builtin.h" 10 11 #include <errno.h> 12 #include <unistd.h> 13 #include <signal.h> 14 #include <stdlib.h> 15 #include <fcntl.h> 16 #include <inttypes.h> 17 #include <math.h> 18 #include <poll.h> 19 #include <ctype.h> 20 #include <linux/capability.h> 21 #include <linux/string.h> 22 23 #include "debug.h" 24 #include <subcmd/pager.h> 25 #include <subcmd/parse-options.h> 26 #include <api/io.h> 27 #include <api/fs/tracing_path.h> 28 #include "evlist.h" 29 #include "target.h" 30 #include "cpumap.h" 31 #include "hashmap.h" 32 #include "thread_map.h" 33 #include "strfilter.h" 34 #include "util/cap.h" 35 #include "util/config.h" 36 #include "util/ftrace.h" 37 #include "util/stat.h" 38 #include "util/units.h" 39 #include "util/parse-sublevel-options.h" 40 41 #define DEFAULT_TRACER "function_graph" 42 43 static volatile sig_atomic_t workload_exec_errno; 44 static volatile sig_atomic_t done; 45 46 static void sig_handler(int sig __maybe_unused) 47 { 48 done = true; 49 } 50 51 /* 52 * evlist__prepare_workload will send a SIGUSR1 if the fork fails, since 53 * we asked by setting its exec_error to the function below, 54 * ftrace__workload_exec_failed_signal. 55 * 56 * XXX We need to handle this more appropriately, emitting an error, etc. 57 */ 58 static void ftrace__workload_exec_failed_signal(int signo __maybe_unused, 59 siginfo_t *info __maybe_unused, 60 void *ucontext __maybe_unused) 61 { 62 workload_exec_errno = info->si_value.sival_int; 63 done = true; 64 } 65 66 static bool check_ftrace_capable(void) 67 { 68 bool used_root; 69 70 if (perf_cap__capable(CAP_PERFMON, &used_root)) 71 return true; 72 73 if (!used_root && perf_cap__capable(CAP_SYS_ADMIN, &used_root)) 74 return true; 75 76 pr_err("ftrace only works for %s!\n", 77 used_root ? "root" 78 : "users with the CAP_PERFMON or CAP_SYS_ADMIN capability" 79 ); 80 return false; 81 } 82 83 static bool is_ftrace_supported(void) 84 { 85 char *file; 86 bool supported = false; 87 88 file = get_tracing_file("set_ftrace_pid"); 89 if (!file) { 90 pr_debug("cannot get tracing file set_ftrace_pid\n"); 91 return false; 92 } 93 94 if (!access(file, F_OK)) 95 supported = true; 96 97 put_tracing_file(file); 98 return supported; 99 } 100 101 static int __write_tracing_file(const char *name, const char *val, bool append) 102 { 103 char *file; 104 int fd, ret = -1; 105 ssize_t size = strlen(val); 106 int flags = O_WRONLY; 107 char errbuf[512]; 108 char *val_copy; 109 110 file = get_tracing_file(name); 111 if (!file) { 112 pr_debug("cannot get tracing file: %s\n", name); 113 return -1; 114 } 115 116 if (append) 117 flags |= O_APPEND; 118 else 119 flags |= O_TRUNC; 120 121 fd = open(file, flags); 122 if (fd < 0) { 123 pr_debug("cannot open tracing file: %s: %s\n", 124 name, str_error_r(errno, errbuf, sizeof(errbuf))); 125 goto out; 126 } 127 128 /* 129 * Copy the original value and append a '\n'. Without this, 130 * the kernel can hide possible errors. 131 */ 132 val_copy = strdup(val); 133 if (!val_copy) 134 goto out_close; 135 val_copy[size] = '\n'; 136 137 if (write(fd, val_copy, size + 1) == size + 1) 138 ret = 0; 139 else 140 pr_debug("write '%s' to tracing/%s failed: %s\n", 141 val, name, str_error_r(errno, errbuf, sizeof(errbuf))); 142 143 free(val_copy); 144 out_close: 145 close(fd); 146 out: 147 put_tracing_file(file); 148 return ret; 149 } 150 151 static int write_tracing_file(const char *name, const char *val) 152 { 153 return __write_tracing_file(name, val, false); 154 } 155 156 static int append_tracing_file(const char *name, const char *val) 157 { 158 return __write_tracing_file(name, val, true); 159 } 160 161 static int read_tracing_file_to_stdout(const char *name) 162 { 163 char buf[4096]; 164 char *file; 165 int fd; 166 int ret = -1; 167 168 file = get_tracing_file(name); 169 if (!file) { 170 pr_debug("cannot get tracing file: %s\n", name); 171 return -1; 172 } 173 174 fd = open(file, O_RDONLY); 175 if (fd < 0) { 176 pr_debug("cannot open tracing file: %s: %s\n", 177 name, str_error_r(errno, buf, sizeof(buf))); 178 goto out; 179 } 180 181 /* read contents to stdout */ 182 while (true) { 183 int n = read(fd, buf, sizeof(buf)); 184 if (n == 0) 185 break; 186 else if (n < 0) 187 goto out_close; 188 189 if (fwrite(buf, n, 1, stdout) != 1) 190 goto out_close; 191 } 192 ret = 0; 193 194 out_close: 195 close(fd); 196 out: 197 put_tracing_file(file); 198 return ret; 199 } 200 201 static int read_tracing_file_by_line(const char *name, 202 void (*cb)(char *str, void *arg), 203 void *cb_arg) 204 { 205 char *line = NULL; 206 size_t len = 0; 207 char *file; 208 FILE *fp; 209 210 file = get_tracing_file(name); 211 if (!file) { 212 pr_debug("cannot get tracing file: %s\n", name); 213 return -1; 214 } 215 216 fp = fopen(file, "r"); 217 if (fp == NULL) { 218 pr_debug("cannot open tracing file: %s\n", name); 219 put_tracing_file(file); 220 return -1; 221 } 222 223 while (getline(&line, &len, fp) != -1) { 224 cb(line, cb_arg); 225 } 226 227 if (line) 228 free(line); 229 230 fclose(fp); 231 put_tracing_file(file); 232 return 0; 233 } 234 235 static int write_tracing_file_int(const char *name, int value) 236 { 237 char buf[16]; 238 239 snprintf(buf, sizeof(buf), "%d", value); 240 if (write_tracing_file(name, buf) < 0) 241 return -1; 242 243 return 0; 244 } 245 246 static int write_tracing_option_file(const char *name, const char *val) 247 { 248 char *file; 249 int ret; 250 251 if (asprintf(&file, "options/%s", name) < 0) 252 return -1; 253 254 ret = __write_tracing_file(file, val, false); 255 free(file); 256 return ret; 257 } 258 259 static int reset_tracing_cpu(void); 260 static void reset_tracing_filters(void); 261 262 static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused) 263 { 264 write_tracing_option_file("function-fork", "0"); 265 write_tracing_option_file("func_stack_trace", "0"); 266 write_tracing_option_file("sleep-time", "1"); 267 write_tracing_option_file("funcgraph-irqs", "1"); 268 write_tracing_option_file("funcgraph-proc", "0"); 269 write_tracing_option_file("funcgraph-abstime", "0"); 270 write_tracing_option_file("funcgraph-tail", "0"); 271 write_tracing_option_file("latency-format", "0"); 272 write_tracing_option_file("irq-info", "0"); 273 } 274 275 static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused) 276 { 277 if (write_tracing_file("tracing_on", "0") < 0) 278 return -1; 279 280 if (write_tracing_file("current_tracer", "nop") < 0) 281 return -1; 282 283 if (write_tracing_file("set_ftrace_pid", " ") < 0) 284 return -1; 285 286 if (reset_tracing_cpu() < 0) 287 return -1; 288 289 if (write_tracing_file("max_graph_depth", "0") < 0) 290 return -1; 291 292 if (write_tracing_file("tracing_thresh", "0") < 0) 293 return -1; 294 295 reset_tracing_filters(); 296 reset_tracing_options(ftrace); 297 return 0; 298 } 299 300 static int set_tracing_pid(struct perf_ftrace *ftrace) 301 { 302 int i; 303 char buf[16]; 304 305 if (target__has_cpu(&ftrace->target)) 306 return 0; 307 308 for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) { 309 scnprintf(buf, sizeof(buf), "%d", 310 perf_thread_map__pid(ftrace->evlist->core.threads, i)); 311 if (append_tracing_file("set_ftrace_pid", buf) < 0) 312 return -1; 313 } 314 return 0; 315 } 316 317 static int set_tracing_cpumask(struct perf_cpu_map *cpumap) 318 { 319 char *cpumask; 320 size_t mask_size; 321 int ret; 322 int last_cpu; 323 324 last_cpu = perf_cpu_map__cpu(cpumap, perf_cpu_map__nr(cpumap) - 1).cpu; 325 mask_size = last_cpu / 4 + 2; /* one more byte for EOS */ 326 mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */ 327 328 cpumask = malloc(mask_size); 329 if (cpumask == NULL) { 330 pr_debug("failed to allocate cpu mask\n"); 331 return -1; 332 } 333 334 cpu_map__snprint_mask(cpumap, cpumask, mask_size); 335 336 ret = write_tracing_file("tracing_cpumask", cpumask); 337 338 free(cpumask); 339 return ret; 340 } 341 342 static int set_tracing_cpu(struct perf_ftrace *ftrace) 343 { 344 struct perf_cpu_map *cpumap = ftrace->evlist->core.user_requested_cpus; 345 346 if (!target__has_cpu(&ftrace->target)) 347 return 0; 348 349 return set_tracing_cpumask(cpumap); 350 } 351 352 static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace) 353 { 354 if (!ftrace->func_stack_trace) 355 return 0; 356 357 if (write_tracing_option_file("func_stack_trace", "1") < 0) 358 return -1; 359 360 return 0; 361 } 362 363 static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace) 364 { 365 if (!ftrace->func_irq_info) 366 return 0; 367 368 if (write_tracing_option_file("irq-info", "1") < 0) 369 return -1; 370 371 return 0; 372 } 373 374 static int reset_tracing_cpu(void) 375 { 376 struct perf_cpu_map *cpumap = perf_cpu_map__new_online_cpus(); 377 int ret; 378 379 ret = set_tracing_cpumask(cpumap); 380 perf_cpu_map__put(cpumap); 381 return ret; 382 } 383 384 static int __set_tracing_filter(const char *filter_file, struct list_head *funcs) 385 { 386 struct filter_entry *pos; 387 388 list_for_each_entry(pos, funcs, list) { 389 if (append_tracing_file(filter_file, pos->name) < 0) 390 return -1; 391 } 392 393 return 0; 394 } 395 396 static int set_tracing_filters(struct perf_ftrace *ftrace) 397 { 398 int ret; 399 400 ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters); 401 if (ret < 0) 402 return ret; 403 404 ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace); 405 if (ret < 0) 406 return ret; 407 408 ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs); 409 if (ret < 0) 410 return ret; 411 412 /* old kernels do not have this filter */ 413 __set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs); 414 415 return ret; 416 } 417 418 static void reset_tracing_filters(void) 419 { 420 write_tracing_file("set_ftrace_filter", " "); 421 write_tracing_file("set_ftrace_notrace", " "); 422 write_tracing_file("set_graph_function", " "); 423 write_tracing_file("set_graph_notrace", " "); 424 } 425 426 static int set_tracing_depth(struct perf_ftrace *ftrace) 427 { 428 if (ftrace->graph_depth == 0) 429 return 0; 430 431 if (ftrace->graph_depth < 0) { 432 pr_err("invalid graph depth: %d\n", ftrace->graph_depth); 433 return -1; 434 } 435 436 if (write_tracing_file_int("max_graph_depth", ftrace->graph_depth) < 0) 437 return -1; 438 439 return 0; 440 } 441 442 static int set_tracing_percpu_buffer_size(struct perf_ftrace *ftrace) 443 { 444 int ret; 445 446 if (ftrace->percpu_buffer_size == 0) 447 return 0; 448 449 ret = write_tracing_file_int("buffer_size_kb", 450 ftrace->percpu_buffer_size / 1024); 451 if (ret < 0) 452 return ret; 453 454 return 0; 455 } 456 457 static int set_tracing_trace_inherit(struct perf_ftrace *ftrace) 458 { 459 if (!ftrace->inherit) 460 return 0; 461 462 if (write_tracing_option_file("function-fork", "1") < 0) 463 return -1; 464 465 return 0; 466 } 467 468 static int set_tracing_sleep_time(struct perf_ftrace *ftrace) 469 { 470 if (!ftrace->graph_nosleep_time) 471 return 0; 472 473 if (write_tracing_option_file("sleep-time", "0") < 0) 474 return -1; 475 476 return 0; 477 } 478 479 static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace) 480 { 481 if (!ftrace->graph_noirqs) 482 return 0; 483 484 if (write_tracing_option_file("funcgraph-irqs", "0") < 0) 485 return -1; 486 487 return 0; 488 } 489 490 static int set_tracing_funcgraph_verbose(struct perf_ftrace *ftrace) 491 { 492 if (!ftrace->graph_verbose) 493 return 0; 494 495 if (write_tracing_option_file("funcgraph-proc", "1") < 0) 496 return -1; 497 498 if (write_tracing_option_file("funcgraph-abstime", "1") < 0) 499 return -1; 500 501 if (write_tracing_option_file("latency-format", "1") < 0) 502 return -1; 503 504 return 0; 505 } 506 507 static int set_tracing_funcgraph_tail(struct perf_ftrace *ftrace) 508 { 509 if (!ftrace->graph_tail) 510 return 0; 511 512 if (write_tracing_option_file("funcgraph-tail", "1") < 0) 513 return -1; 514 515 return 0; 516 } 517 518 static int set_tracing_thresh(struct perf_ftrace *ftrace) 519 { 520 int ret; 521 522 if (ftrace->graph_thresh == 0) 523 return 0; 524 525 ret = write_tracing_file_int("tracing_thresh", ftrace->graph_thresh); 526 if (ret < 0) 527 return ret; 528 529 return 0; 530 } 531 532 static int set_tracing_options(struct perf_ftrace *ftrace) 533 { 534 if (set_tracing_pid(ftrace) < 0) { 535 pr_err("failed to set ftrace pid\n"); 536 return -1; 537 } 538 539 if (set_tracing_cpu(ftrace) < 0) { 540 pr_err("failed to set tracing cpumask\n"); 541 return -1; 542 } 543 544 if (set_tracing_func_stack_trace(ftrace) < 0) { 545 pr_err("failed to set tracing option func_stack_trace\n"); 546 return -1; 547 } 548 549 if (set_tracing_func_irqinfo(ftrace) < 0) { 550 pr_err("failed to set tracing option irq-info\n"); 551 return -1; 552 } 553 554 if (set_tracing_filters(ftrace) < 0) { 555 pr_err("failed to set tracing filters\n"); 556 return -1; 557 } 558 559 if (set_tracing_depth(ftrace) < 0) { 560 pr_err("failed to set graph depth\n"); 561 return -1; 562 } 563 564 if (set_tracing_percpu_buffer_size(ftrace) < 0) { 565 pr_err("failed to set tracing per-cpu buffer size\n"); 566 return -1; 567 } 568 569 if (set_tracing_trace_inherit(ftrace) < 0) { 570 pr_err("failed to set tracing option function-fork\n"); 571 return -1; 572 } 573 574 if (set_tracing_sleep_time(ftrace) < 0) { 575 pr_err("failed to set tracing option sleep-time\n"); 576 return -1; 577 } 578 579 if (set_tracing_funcgraph_irqs(ftrace) < 0) { 580 pr_err("failed to set tracing option funcgraph-irqs\n"); 581 return -1; 582 } 583 584 if (set_tracing_funcgraph_verbose(ftrace) < 0) { 585 pr_err("failed to set tracing option funcgraph-proc/funcgraph-abstime\n"); 586 return -1; 587 } 588 589 if (set_tracing_thresh(ftrace) < 0) { 590 pr_err("failed to set tracing thresh\n"); 591 return -1; 592 } 593 594 if (set_tracing_funcgraph_tail(ftrace) < 0) { 595 pr_err("failed to set tracing option funcgraph-tail\n"); 596 return -1; 597 } 598 599 return 0; 600 } 601 602 static void select_tracer(struct perf_ftrace *ftrace) 603 { 604 bool graph = !list_empty(&ftrace->graph_funcs) || 605 !list_empty(&ftrace->nograph_funcs); 606 bool func = !list_empty(&ftrace->filters) || 607 !list_empty(&ftrace->notrace); 608 609 /* The function_graph has priority over function tracer. */ 610 if (graph) 611 ftrace->tracer = "function_graph"; 612 else if (func) 613 ftrace->tracer = "function"; 614 /* Otherwise, the default tracer is used. */ 615 616 pr_debug("%s tracer is used\n", ftrace->tracer); 617 } 618 619 static int __cmd_ftrace(struct perf_ftrace *ftrace) 620 { 621 char *trace_file; 622 int trace_fd; 623 char buf[4096]; 624 struct pollfd pollfd = { 625 .events = POLLIN, 626 }; 627 628 select_tracer(ftrace); 629 630 if (reset_tracing_files(ftrace) < 0) { 631 pr_err("failed to reset ftrace\n"); 632 goto out; 633 } 634 635 /* reset ftrace buffer */ 636 if (write_tracing_file("trace", "0") < 0) 637 goto out; 638 639 if (set_tracing_options(ftrace) < 0) 640 goto out_reset; 641 642 if (write_tracing_file("current_tracer", ftrace->tracer) < 0) { 643 pr_err("failed to set current_tracer to %s\n", ftrace->tracer); 644 goto out_reset; 645 } 646 647 setup_pager(); 648 649 trace_file = get_tracing_file("trace_pipe"); 650 if (!trace_file) { 651 pr_err("failed to open trace_pipe\n"); 652 goto out_reset; 653 } 654 655 trace_fd = open(trace_file, O_RDONLY); 656 657 put_tracing_file(trace_file); 658 659 if (trace_fd < 0) { 660 pr_err("failed to open trace_pipe\n"); 661 goto out_reset; 662 } 663 664 fcntl(trace_fd, F_SETFL, O_NONBLOCK); 665 pollfd.fd = trace_fd; 666 667 /* display column headers */ 668 read_tracing_file_to_stdout("trace"); 669 670 if (!ftrace->target.initial_delay) { 671 if (write_tracing_file("tracing_on", "1") < 0) { 672 pr_err("can't enable tracing\n"); 673 goto out_close_fd; 674 } 675 } 676 677 evlist__start_workload(ftrace->evlist); 678 679 if (ftrace->target.initial_delay > 0) { 680 usleep(ftrace->target.initial_delay * 1000); 681 if (write_tracing_file("tracing_on", "1") < 0) { 682 pr_err("can't enable tracing\n"); 683 goto out_close_fd; 684 } 685 } 686 687 while (!done) { 688 if (poll(&pollfd, 1, -1) < 0) 689 break; 690 691 if (pollfd.revents & POLLIN) { 692 int n = read(trace_fd, buf, sizeof(buf)); 693 if (n < 0) 694 break; 695 if (fwrite(buf, n, 1, stdout) != 1) 696 break; 697 /* flush output since stdout is in full buffering mode due to pager */ 698 fflush(stdout); 699 } 700 } 701 702 write_tracing_file("tracing_on", "0"); 703 704 if (workload_exec_errno) { 705 const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf)); 706 /* flush stdout first so below error msg appears at the end. */ 707 fflush(stdout); 708 pr_err("workload failed: %s\n", emsg); 709 goto out_close_fd; 710 } 711 712 /* read remaining buffer contents */ 713 while (true) { 714 int n = read(trace_fd, buf, sizeof(buf)); 715 if (n <= 0) 716 break; 717 if (fwrite(buf, n, 1, stdout) != 1) 718 break; 719 } 720 721 out_close_fd: 722 close(trace_fd); 723 out_reset: 724 reset_tracing_files(ftrace); 725 out: 726 return (done && !workload_exec_errno) ? 0 : -1; 727 } 728 729 static void make_histogram(int buckets[], char *buf, size_t len, char *linebuf, 730 bool use_nsec) 731 { 732 char *p, *q; 733 char *unit; 734 double num; 735 int i; 736 737 /* ensure NUL termination */ 738 buf[len] = '\0'; 739 740 /* handle data line by line */ 741 for (p = buf; (q = strchr(p, '\n')) != NULL; p = q + 1) { 742 *q = '\0'; 743 /* move it to the line buffer */ 744 strcat(linebuf, p); 745 746 /* 747 * parse trace output to get function duration like in 748 * 749 * # tracer: function_graph 750 * # 751 * # CPU DURATION FUNCTION CALLS 752 * # | | | | | | | 753 * 1) + 10.291 us | do_filp_open(); 754 * 1) 4.889 us | do_filp_open(); 755 * 1) 6.086 us | do_filp_open(); 756 * 757 */ 758 if (linebuf[0] == '#') 759 goto next; 760 761 /* ignore CPU */ 762 p = strchr(linebuf, ')'); 763 if (p == NULL) 764 p = linebuf; 765 766 while (*p && !isdigit(*p) && (*p != '|')) 767 p++; 768 769 /* no duration */ 770 if (*p == '\0' || *p == '|') 771 goto next; 772 773 num = strtod(p, &unit); 774 if (!unit || strncmp(unit, " us", 3)) 775 goto next; 776 777 if (use_nsec) 778 num *= 1000; 779 780 i = log2(num); 781 if (i < 0) 782 i = 0; 783 if (i >= NUM_BUCKET) 784 i = NUM_BUCKET - 1; 785 786 buckets[i]++; 787 788 next: 789 /* empty the line buffer for the next output */ 790 linebuf[0] = '\0'; 791 } 792 793 /* preserve any remaining output (before newline) */ 794 strcat(linebuf, p); 795 } 796 797 static void display_histogram(int buckets[], bool use_nsec) 798 { 799 int i; 800 int total = 0; 801 int bar_total = 46; /* to fit in 80 column */ 802 char bar[] = "###############################################"; 803 int bar_len; 804 805 for (i = 0; i < NUM_BUCKET; i++) 806 total += buckets[i]; 807 808 if (total == 0) { 809 printf("No data found\n"); 810 return; 811 } 812 813 printf("# %14s | %10s | %-*s |\n", 814 " DURATION ", "COUNT", bar_total, "GRAPH"); 815 816 bar_len = buckets[0] * bar_total / total; 817 printf(" %4d - %-4d %s | %10d | %.*s%*s |\n", 818 0, 1, "us", buckets[0], bar_len, bar, bar_total - bar_len, ""); 819 820 for (i = 1; i < NUM_BUCKET - 1; i++) { 821 int start = (1 << (i - 1)); 822 int stop = 1 << i; 823 const char *unit = use_nsec ? "ns" : "us"; 824 825 if (start >= 1024) { 826 start >>= 10; 827 stop >>= 10; 828 unit = use_nsec ? "us" : "ms"; 829 } 830 bar_len = buckets[i] * bar_total / total; 831 printf(" %4d - %-4d %s | %10d | %.*s%*s |\n", 832 start, stop, unit, buckets[i], bar_len, bar, 833 bar_total - bar_len, ""); 834 } 835 836 bar_len = buckets[NUM_BUCKET - 1] * bar_total / total; 837 printf(" %4d - %-4s %s | %10d | %.*s%*s |\n", 838 1, "...", use_nsec ? "ms" : " s", buckets[NUM_BUCKET - 1], 839 bar_len, bar, bar_total - bar_len, ""); 840 841 } 842 843 static int prepare_func_latency(struct perf_ftrace *ftrace) 844 { 845 char *trace_file; 846 int fd; 847 848 if (ftrace->target.use_bpf) 849 return perf_ftrace__latency_prepare_bpf(ftrace); 850 851 if (reset_tracing_files(ftrace) < 0) { 852 pr_err("failed to reset ftrace\n"); 853 return -1; 854 } 855 856 /* reset ftrace buffer */ 857 if (write_tracing_file("trace", "0") < 0) 858 return -1; 859 860 if (set_tracing_options(ftrace) < 0) 861 return -1; 862 863 /* force to use the function_graph tracer to track duration */ 864 if (write_tracing_file("current_tracer", "function_graph") < 0) { 865 pr_err("failed to set current_tracer to function_graph\n"); 866 return -1; 867 } 868 869 trace_file = get_tracing_file("trace_pipe"); 870 if (!trace_file) { 871 pr_err("failed to open trace_pipe\n"); 872 return -1; 873 } 874 875 fd = open(trace_file, O_RDONLY); 876 if (fd < 0) 877 pr_err("failed to open trace_pipe\n"); 878 879 put_tracing_file(trace_file); 880 return fd; 881 } 882 883 static int start_func_latency(struct perf_ftrace *ftrace) 884 { 885 if (ftrace->target.use_bpf) 886 return perf_ftrace__latency_start_bpf(ftrace); 887 888 if (write_tracing_file("tracing_on", "1") < 0) { 889 pr_err("can't enable tracing\n"); 890 return -1; 891 } 892 893 return 0; 894 } 895 896 static int stop_func_latency(struct perf_ftrace *ftrace) 897 { 898 if (ftrace->target.use_bpf) 899 return perf_ftrace__latency_stop_bpf(ftrace); 900 901 write_tracing_file("tracing_on", "0"); 902 return 0; 903 } 904 905 static int read_func_latency(struct perf_ftrace *ftrace, int buckets[]) 906 { 907 if (ftrace->target.use_bpf) 908 return perf_ftrace__latency_read_bpf(ftrace, buckets); 909 910 return 0; 911 } 912 913 static int cleanup_func_latency(struct perf_ftrace *ftrace) 914 { 915 if (ftrace->target.use_bpf) 916 return perf_ftrace__latency_cleanup_bpf(ftrace); 917 918 reset_tracing_files(ftrace); 919 return 0; 920 } 921 922 static int __cmd_latency(struct perf_ftrace *ftrace) 923 { 924 int trace_fd; 925 char buf[4096]; 926 char line[256]; 927 struct pollfd pollfd = { 928 .events = POLLIN, 929 }; 930 int buckets[NUM_BUCKET] = { }; 931 932 trace_fd = prepare_func_latency(ftrace); 933 if (trace_fd < 0) 934 goto out; 935 936 fcntl(trace_fd, F_SETFL, O_NONBLOCK); 937 pollfd.fd = trace_fd; 938 939 if (start_func_latency(ftrace) < 0) 940 goto out; 941 942 evlist__start_workload(ftrace->evlist); 943 944 line[0] = '\0'; 945 while (!done) { 946 if (poll(&pollfd, 1, -1) < 0) 947 break; 948 949 if (pollfd.revents & POLLIN) { 950 int n = read(trace_fd, buf, sizeof(buf) - 1); 951 if (n < 0) 952 break; 953 954 make_histogram(buckets, buf, n, line, ftrace->use_nsec); 955 } 956 } 957 958 stop_func_latency(ftrace); 959 960 if (workload_exec_errno) { 961 const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf)); 962 pr_err("workload failed: %s\n", emsg); 963 goto out; 964 } 965 966 /* read remaining buffer contents */ 967 while (!ftrace->target.use_bpf) { 968 int n = read(trace_fd, buf, sizeof(buf) - 1); 969 if (n <= 0) 970 break; 971 make_histogram(buckets, buf, n, line, ftrace->use_nsec); 972 } 973 974 read_func_latency(ftrace, buckets); 975 976 display_histogram(buckets, ftrace->use_nsec); 977 978 out: 979 close(trace_fd); 980 cleanup_func_latency(ftrace); 981 982 return (done && !workload_exec_errno) ? 0 : -1; 983 } 984 985 static size_t profile_hash(long func, void *ctx __maybe_unused) 986 { 987 return str_hash((char *)func); 988 } 989 990 static bool profile_equal(long func1, long func2, void *ctx __maybe_unused) 991 { 992 return !strcmp((char *)func1, (char *)func2); 993 } 994 995 static int prepare_func_profile(struct perf_ftrace *ftrace) 996 { 997 ftrace->tracer = "function_graph"; 998 ftrace->graph_tail = 1; 999 1000 ftrace->profile_hash = hashmap__new(profile_hash, profile_equal, NULL); 1001 if (ftrace->profile_hash == NULL) 1002 return -ENOMEM; 1003 1004 return 0; 1005 } 1006 1007 /* This is saved in a hashmap keyed by the function name */ 1008 struct ftrace_profile_data { 1009 struct stats st; 1010 }; 1011 1012 static int add_func_duration(struct perf_ftrace *ftrace, char *func, double time_ns) 1013 { 1014 struct ftrace_profile_data *prof = NULL; 1015 1016 if (!hashmap__find(ftrace->profile_hash, func, &prof)) { 1017 char *key = strdup(func); 1018 1019 if (key == NULL) 1020 return -ENOMEM; 1021 1022 prof = zalloc(sizeof(*prof)); 1023 if (prof == NULL) { 1024 free(key); 1025 return -ENOMEM; 1026 } 1027 1028 init_stats(&prof->st); 1029 hashmap__add(ftrace->profile_hash, key, prof); 1030 } 1031 1032 update_stats(&prof->st, time_ns); 1033 return 0; 1034 } 1035 1036 /* 1037 * The ftrace function_graph text output normally looks like below: 1038 * 1039 * CPU DURATION FUNCTION 1040 * 1041 * 0) | syscall_trace_enter.isra.0() { 1042 * 0) | __audit_syscall_entry() { 1043 * 0) | auditd_test_task() { 1044 * 0) 0.271 us | __rcu_read_lock(); 1045 * 0) 0.275 us | __rcu_read_unlock(); 1046 * 0) 1.254 us | } /\* auditd_test_task *\/ 1047 * 0) 0.279 us | ktime_get_coarse_real_ts64(); 1048 * 0) 2.227 us | } /\* __audit_syscall_entry *\/ 1049 * 0) 2.713 us | } /\* syscall_trace_enter.isra.0 *\/ 1050 * 1051 * Parse the line and get the duration and function name. 1052 */ 1053 static int parse_func_duration(struct perf_ftrace *ftrace, char *line, size_t len) 1054 { 1055 char *p; 1056 char *func; 1057 double duration; 1058 1059 /* skip CPU */ 1060 p = strchr(line, ')'); 1061 if (p == NULL) 1062 return 0; 1063 1064 /* get duration */ 1065 p = skip_spaces(p + 1); 1066 1067 /* no duration? */ 1068 if (p == NULL || *p == '|') 1069 return 0; 1070 1071 /* skip markers like '*' or '!' for longer than ms */ 1072 if (!isdigit(*p)) 1073 p++; 1074 1075 duration = strtod(p, &p); 1076 1077 if (strncmp(p, " us", 3)) { 1078 pr_debug("non-usec time found.. ignoring\n"); 1079 return 0; 1080 } 1081 1082 /* 1083 * profile stat keeps the max and min values as integer, 1084 * convert to nsec time so that we can have accurate max. 1085 */ 1086 duration *= 1000; 1087 1088 /* skip to the pipe */ 1089 while (p < line + len && *p != '|') 1090 p++; 1091 1092 if (*p++ != '|') 1093 return -EINVAL; 1094 1095 /* get function name */ 1096 func = skip_spaces(p); 1097 1098 /* skip the closing bracket and the start of comment */ 1099 if (*func == '}') 1100 func += 5; 1101 1102 /* remove semi-colon or end of comment at the end */ 1103 p = line + len - 1; 1104 while (!isalnum(*p) && *p != ']') { 1105 *p = '\0'; 1106 --p; 1107 } 1108 1109 return add_func_duration(ftrace, func, duration); 1110 } 1111 1112 enum perf_ftrace_profile_sort_key { 1113 PFP_SORT_TOTAL = 0, 1114 PFP_SORT_AVG, 1115 PFP_SORT_MAX, 1116 PFP_SORT_COUNT, 1117 PFP_SORT_NAME, 1118 }; 1119 1120 static enum perf_ftrace_profile_sort_key profile_sort = PFP_SORT_TOTAL; 1121 1122 static int cmp_profile_data(const void *a, const void *b) 1123 { 1124 const struct hashmap_entry *e1 = *(const struct hashmap_entry **)a; 1125 const struct hashmap_entry *e2 = *(const struct hashmap_entry **)b; 1126 struct ftrace_profile_data *p1 = e1->pvalue; 1127 struct ftrace_profile_data *p2 = e2->pvalue; 1128 double v1, v2; 1129 1130 switch (profile_sort) { 1131 case PFP_SORT_NAME: 1132 return strcmp(e1->pkey, e2->pkey); 1133 case PFP_SORT_AVG: 1134 v1 = p1->st.mean; 1135 v2 = p2->st.mean; 1136 break; 1137 case PFP_SORT_MAX: 1138 v1 = p1->st.max; 1139 v2 = p2->st.max; 1140 break; 1141 case PFP_SORT_COUNT: 1142 v1 = p1->st.n; 1143 v2 = p2->st.n; 1144 break; 1145 case PFP_SORT_TOTAL: 1146 default: 1147 v1 = p1->st.n * p1->st.mean; 1148 v2 = p2->st.n * p2->st.mean; 1149 break; 1150 } 1151 1152 if (v1 > v2) 1153 return -1; 1154 else 1155 return 1; 1156 } 1157 1158 static void print_profile_result(struct perf_ftrace *ftrace) 1159 { 1160 struct hashmap_entry *entry, **profile; 1161 size_t i, nr, bkt; 1162 1163 nr = hashmap__size(ftrace->profile_hash); 1164 if (nr == 0) 1165 return; 1166 1167 profile = calloc(nr, sizeof(*profile)); 1168 if (profile == NULL) { 1169 pr_err("failed to allocate memory for the result\n"); 1170 return; 1171 } 1172 1173 i = 0; 1174 hashmap__for_each_entry(ftrace->profile_hash, entry, bkt) 1175 profile[i++] = entry; 1176 1177 assert(i == nr); 1178 1179 //cmp_profile_data(profile[0], profile[1]); 1180 qsort(profile, nr, sizeof(*profile), cmp_profile_data); 1181 1182 printf("# %10s %10s %10s %10s %s\n", 1183 "Total (us)", "Avg (us)", "Max (us)", "Count", "Function"); 1184 1185 for (i = 0; i < nr; i++) { 1186 const char *name = profile[i]->pkey; 1187 struct ftrace_profile_data *p = profile[i]->pvalue; 1188 1189 printf("%12.3f %10.3f %6"PRIu64".%03"PRIu64" %10.0f %s\n", 1190 p->st.n * p->st.mean / 1000, p->st.mean / 1000, 1191 p->st.max / 1000, p->st.max % 1000, p->st.n, name); 1192 } 1193 1194 free(profile); 1195 1196 hashmap__for_each_entry(ftrace->profile_hash, entry, bkt) { 1197 free((char *)entry->pkey); 1198 free(entry->pvalue); 1199 } 1200 1201 hashmap__free(ftrace->profile_hash); 1202 ftrace->profile_hash = NULL; 1203 } 1204 1205 static int __cmd_profile(struct perf_ftrace *ftrace) 1206 { 1207 char *trace_file; 1208 int trace_fd; 1209 char buf[4096]; 1210 struct io io; 1211 char *line = NULL; 1212 size_t line_len = 0; 1213 1214 if (prepare_func_profile(ftrace) < 0) { 1215 pr_err("failed to prepare func profiler\n"); 1216 goto out; 1217 } 1218 1219 if (reset_tracing_files(ftrace) < 0) { 1220 pr_err("failed to reset ftrace\n"); 1221 goto out; 1222 } 1223 1224 /* reset ftrace buffer */ 1225 if (write_tracing_file("trace", "0") < 0) 1226 goto out; 1227 1228 if (set_tracing_options(ftrace) < 0) 1229 return -1; 1230 1231 if (write_tracing_file("current_tracer", ftrace->tracer) < 0) { 1232 pr_err("failed to set current_tracer to %s\n", ftrace->tracer); 1233 goto out_reset; 1234 } 1235 1236 setup_pager(); 1237 1238 trace_file = get_tracing_file("trace_pipe"); 1239 if (!trace_file) { 1240 pr_err("failed to open trace_pipe\n"); 1241 goto out_reset; 1242 } 1243 1244 trace_fd = open(trace_file, O_RDONLY); 1245 1246 put_tracing_file(trace_file); 1247 1248 if (trace_fd < 0) { 1249 pr_err("failed to open trace_pipe\n"); 1250 goto out_reset; 1251 } 1252 1253 fcntl(trace_fd, F_SETFL, O_NONBLOCK); 1254 1255 if (write_tracing_file("tracing_on", "1") < 0) { 1256 pr_err("can't enable tracing\n"); 1257 goto out_close_fd; 1258 } 1259 1260 evlist__start_workload(ftrace->evlist); 1261 1262 io__init(&io, trace_fd, buf, sizeof(buf)); 1263 io.timeout_ms = -1; 1264 1265 while (!done && !io.eof) { 1266 if (io__getline(&io, &line, &line_len) < 0) 1267 break; 1268 1269 if (parse_func_duration(ftrace, line, line_len) < 0) 1270 break; 1271 } 1272 1273 write_tracing_file("tracing_on", "0"); 1274 1275 if (workload_exec_errno) { 1276 const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf)); 1277 /* flush stdout first so below error msg appears at the end. */ 1278 fflush(stdout); 1279 pr_err("workload failed: %s\n", emsg); 1280 goto out_free_line; 1281 } 1282 1283 /* read remaining buffer contents */ 1284 io.timeout_ms = 0; 1285 while (!io.eof) { 1286 if (io__getline(&io, &line, &line_len) < 0) 1287 break; 1288 1289 if (parse_func_duration(ftrace, line, line_len) < 0) 1290 break; 1291 } 1292 1293 print_profile_result(ftrace); 1294 1295 out_free_line: 1296 free(line); 1297 out_close_fd: 1298 close(trace_fd); 1299 out_reset: 1300 reset_tracing_files(ftrace); 1301 out: 1302 return (done && !workload_exec_errno) ? 0 : -1; 1303 } 1304 1305 static int perf_ftrace_config(const char *var, const char *value, void *cb) 1306 { 1307 struct perf_ftrace *ftrace = cb; 1308 1309 if (!strstarts(var, "ftrace.")) 1310 return 0; 1311 1312 if (strcmp(var, "ftrace.tracer")) 1313 return -1; 1314 1315 if (!strcmp(value, "function_graph") || 1316 !strcmp(value, "function")) { 1317 ftrace->tracer = value; 1318 return 0; 1319 } 1320 1321 pr_err("Please select \"function_graph\" (default) or \"function\"\n"); 1322 return -1; 1323 } 1324 1325 static void list_function_cb(char *str, void *arg) 1326 { 1327 struct strfilter *filter = (struct strfilter *)arg; 1328 1329 if (strfilter__compare(filter, str)) 1330 printf("%s", str); 1331 } 1332 1333 static int opt_list_avail_functions(const struct option *opt __maybe_unused, 1334 const char *str, int unset) 1335 { 1336 struct strfilter *filter; 1337 const char *err = NULL; 1338 int ret; 1339 1340 if (unset || !str) 1341 return -1; 1342 1343 filter = strfilter__new(str, &err); 1344 if (!filter) 1345 return err ? -EINVAL : -ENOMEM; 1346 1347 ret = strfilter__or(filter, str, &err); 1348 if (ret == -EINVAL) { 1349 pr_err("Filter parse error at %td.\n", err - str + 1); 1350 pr_err("Source: \"%s\"\n", str); 1351 pr_err(" %*c\n", (int)(err - str + 1), '^'); 1352 strfilter__delete(filter); 1353 return ret; 1354 } 1355 1356 ret = read_tracing_file_by_line("available_filter_functions", 1357 list_function_cb, filter); 1358 strfilter__delete(filter); 1359 if (ret < 0) 1360 return ret; 1361 1362 exit(0); 1363 } 1364 1365 static int parse_filter_func(const struct option *opt, const char *str, 1366 int unset __maybe_unused) 1367 { 1368 struct list_head *head = opt->value; 1369 struct filter_entry *entry; 1370 1371 entry = malloc(sizeof(*entry) + strlen(str) + 1); 1372 if (entry == NULL) 1373 return -ENOMEM; 1374 1375 strcpy(entry->name, str); 1376 list_add_tail(&entry->list, head); 1377 1378 return 0; 1379 } 1380 1381 static void delete_filter_func(struct list_head *head) 1382 { 1383 struct filter_entry *pos, *tmp; 1384 1385 list_for_each_entry_safe(pos, tmp, head, list) { 1386 list_del_init(&pos->list); 1387 free(pos); 1388 } 1389 } 1390 1391 static int parse_buffer_size(const struct option *opt, 1392 const char *str, int unset) 1393 { 1394 unsigned long *s = (unsigned long *)opt->value; 1395 static struct parse_tag tags_size[] = { 1396 { .tag = 'B', .mult = 1 }, 1397 { .tag = 'K', .mult = 1 << 10 }, 1398 { .tag = 'M', .mult = 1 << 20 }, 1399 { .tag = 'G', .mult = 1 << 30 }, 1400 { .tag = 0 }, 1401 }; 1402 unsigned long val; 1403 1404 if (unset) { 1405 *s = 0; 1406 return 0; 1407 } 1408 1409 val = parse_tag_value(str, tags_size); 1410 if (val != (unsigned long) -1) { 1411 if (val < 1024) { 1412 pr_err("buffer size too small, must larger than 1KB."); 1413 return -1; 1414 } 1415 *s = val; 1416 return 0; 1417 } 1418 1419 return -1; 1420 } 1421 1422 static int parse_func_tracer_opts(const struct option *opt, 1423 const char *str, int unset) 1424 { 1425 int ret; 1426 struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value; 1427 struct sublevel_option func_tracer_opts[] = { 1428 { .name = "call-graph", .value_ptr = &ftrace->func_stack_trace }, 1429 { .name = "irq-info", .value_ptr = &ftrace->func_irq_info }, 1430 { .name = NULL, } 1431 }; 1432 1433 if (unset) 1434 return 0; 1435 1436 ret = perf_parse_sublevel_options(str, func_tracer_opts); 1437 if (ret) 1438 return ret; 1439 1440 return 0; 1441 } 1442 1443 static int parse_graph_tracer_opts(const struct option *opt, 1444 const char *str, int unset) 1445 { 1446 int ret; 1447 struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value; 1448 struct sublevel_option graph_tracer_opts[] = { 1449 { .name = "nosleep-time", .value_ptr = &ftrace->graph_nosleep_time }, 1450 { .name = "noirqs", .value_ptr = &ftrace->graph_noirqs }, 1451 { .name = "verbose", .value_ptr = &ftrace->graph_verbose }, 1452 { .name = "thresh", .value_ptr = &ftrace->graph_thresh }, 1453 { .name = "depth", .value_ptr = &ftrace->graph_depth }, 1454 { .name = "tail", .value_ptr = &ftrace->graph_tail }, 1455 { .name = NULL, } 1456 }; 1457 1458 if (unset) 1459 return 0; 1460 1461 ret = perf_parse_sublevel_options(str, graph_tracer_opts); 1462 if (ret) 1463 return ret; 1464 1465 return 0; 1466 } 1467 1468 static int parse_sort_key(const struct option *opt, const char *str, int unset) 1469 { 1470 enum perf_ftrace_profile_sort_key *key = (void *)opt->value; 1471 1472 if (unset) 1473 return 0; 1474 1475 if (!strcmp(str, "total")) 1476 *key = PFP_SORT_TOTAL; 1477 else if (!strcmp(str, "avg")) 1478 *key = PFP_SORT_AVG; 1479 else if (!strcmp(str, "max")) 1480 *key = PFP_SORT_MAX; 1481 else if (!strcmp(str, "count")) 1482 *key = PFP_SORT_COUNT; 1483 else if (!strcmp(str, "name")) 1484 *key = PFP_SORT_NAME; 1485 else { 1486 pr_err("Unknown sort key: %s\n", str); 1487 return -1; 1488 } 1489 return 0; 1490 } 1491 1492 enum perf_ftrace_subcommand { 1493 PERF_FTRACE_NONE, 1494 PERF_FTRACE_TRACE, 1495 PERF_FTRACE_LATENCY, 1496 PERF_FTRACE_PROFILE, 1497 }; 1498 1499 int cmd_ftrace(int argc, const char **argv) 1500 { 1501 int ret; 1502 int (*cmd_func)(struct perf_ftrace *) = NULL; 1503 struct perf_ftrace ftrace = { 1504 .tracer = DEFAULT_TRACER, 1505 .target = { .uid = UINT_MAX, }, 1506 }; 1507 const struct option common_options[] = { 1508 OPT_STRING('p', "pid", &ftrace.target.pid, "pid", 1509 "Trace on existing process id"), 1510 /* TODO: Add short option -t after -t/--tracer can be removed. */ 1511 OPT_STRING(0, "tid", &ftrace.target.tid, "tid", 1512 "Trace on existing thread id (exclusive to --pid)"), 1513 OPT_INCR('v', "verbose", &verbose, 1514 "Be more verbose"), 1515 OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide, 1516 "System-wide collection from all CPUs"), 1517 OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu", 1518 "List of cpus to monitor"), 1519 OPT_END() 1520 }; 1521 const struct option ftrace_options[] = { 1522 OPT_STRING('t', "tracer", &ftrace.tracer, "tracer", 1523 "Tracer to use: function_graph(default) or function"), 1524 OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]", 1525 "Show available functions to filter", 1526 opt_list_avail_functions, "*"), 1527 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", 1528 "Trace given functions using function tracer", 1529 parse_filter_func), 1530 OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func", 1531 "Do not trace given functions", parse_filter_func), 1532 OPT_CALLBACK(0, "func-opts", &ftrace, "options", 1533 "Function tracer options, available options: call-graph,irq-info", 1534 parse_func_tracer_opts), 1535 OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func", 1536 "Trace given functions using function_graph tracer", 1537 parse_filter_func), 1538 OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func", 1539 "Set nograph filter on given functions", parse_filter_func), 1540 OPT_CALLBACK(0, "graph-opts", &ftrace, "options", 1541 "Graph tracer options, available options: nosleep-time,noirqs,verbose,thresh=<n>,depth=<n>", 1542 parse_graph_tracer_opts), 1543 OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size", 1544 "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size), 1545 OPT_BOOLEAN(0, "inherit", &ftrace.inherit, 1546 "Trace children processes"), 1547 OPT_INTEGER('D', "delay", &ftrace.target.initial_delay, 1548 "Number of milliseconds to wait before starting tracing after program start"), 1549 OPT_PARENT(common_options), 1550 }; 1551 const struct option latency_options[] = { 1552 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", 1553 "Show latency of given function", parse_filter_func), 1554 #ifdef HAVE_BPF_SKEL 1555 OPT_BOOLEAN('b', "use-bpf", &ftrace.target.use_bpf, 1556 "Use BPF to measure function latency"), 1557 #endif 1558 OPT_BOOLEAN('n', "use-nsec", &ftrace.use_nsec, 1559 "Use nano-second histogram"), 1560 OPT_PARENT(common_options), 1561 }; 1562 const struct option profile_options[] = { 1563 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", 1564 "Trace given functions using function tracer", 1565 parse_filter_func), 1566 OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func", 1567 "Do not trace given functions", parse_filter_func), 1568 OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func", 1569 "Trace given functions using function_graph tracer", 1570 parse_filter_func), 1571 OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func", 1572 "Set nograph filter on given functions", parse_filter_func), 1573 OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size", 1574 "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size), 1575 OPT_CALLBACK('s', "sort", &profile_sort, "key", 1576 "Sort result by key: total (default), avg, max, count, name.", 1577 parse_sort_key), 1578 OPT_PARENT(common_options), 1579 }; 1580 const struct option *options = ftrace_options; 1581 1582 const char * const ftrace_usage[] = { 1583 "perf ftrace [<options>] [<command>]", 1584 "perf ftrace [<options>] -- [<command>] [<options>]", 1585 "perf ftrace {trace|latency|profile} [<options>] [<command>]", 1586 "perf ftrace {trace|latency|profile} [<options>] -- [<command>] [<options>]", 1587 NULL 1588 }; 1589 enum perf_ftrace_subcommand subcmd = PERF_FTRACE_NONE; 1590 1591 INIT_LIST_HEAD(&ftrace.filters); 1592 INIT_LIST_HEAD(&ftrace.notrace); 1593 INIT_LIST_HEAD(&ftrace.graph_funcs); 1594 INIT_LIST_HEAD(&ftrace.nograph_funcs); 1595 1596 signal(SIGINT, sig_handler); 1597 signal(SIGUSR1, sig_handler); 1598 signal(SIGCHLD, sig_handler); 1599 signal(SIGPIPE, sig_handler); 1600 1601 if (!check_ftrace_capable()) 1602 return -1; 1603 1604 if (!is_ftrace_supported()) { 1605 pr_err("ftrace is not supported on this system\n"); 1606 return -ENOTSUP; 1607 } 1608 1609 ret = perf_config(perf_ftrace_config, &ftrace); 1610 if (ret < 0) 1611 return -1; 1612 1613 if (argc > 1) { 1614 if (!strcmp(argv[1], "trace")) { 1615 subcmd = PERF_FTRACE_TRACE; 1616 } else if (!strcmp(argv[1], "latency")) { 1617 subcmd = PERF_FTRACE_LATENCY; 1618 options = latency_options; 1619 } else if (!strcmp(argv[1], "profile")) { 1620 subcmd = PERF_FTRACE_PROFILE; 1621 options = profile_options; 1622 } 1623 1624 if (subcmd != PERF_FTRACE_NONE) { 1625 argc--; 1626 argv++; 1627 } 1628 } 1629 /* for backward compatibility */ 1630 if (subcmd == PERF_FTRACE_NONE) 1631 subcmd = PERF_FTRACE_TRACE; 1632 1633 argc = parse_options(argc, argv, options, ftrace_usage, 1634 PARSE_OPT_STOP_AT_NON_OPTION); 1635 if (argc < 0) { 1636 ret = -EINVAL; 1637 goto out_delete_filters; 1638 } 1639 1640 /* Make system wide (-a) the default target. */ 1641 if (!argc && target__none(&ftrace.target)) 1642 ftrace.target.system_wide = true; 1643 1644 switch (subcmd) { 1645 case PERF_FTRACE_TRACE: 1646 cmd_func = __cmd_ftrace; 1647 break; 1648 case PERF_FTRACE_LATENCY: 1649 if (list_empty(&ftrace.filters)) { 1650 pr_err("Should provide a function to measure\n"); 1651 parse_options_usage(ftrace_usage, options, "T", 1); 1652 ret = -EINVAL; 1653 goto out_delete_filters; 1654 } 1655 cmd_func = __cmd_latency; 1656 break; 1657 case PERF_FTRACE_PROFILE: 1658 cmd_func = __cmd_profile; 1659 break; 1660 case PERF_FTRACE_NONE: 1661 default: 1662 pr_err("Invalid subcommand\n"); 1663 ret = -EINVAL; 1664 goto out_delete_filters; 1665 } 1666 1667 ret = target__validate(&ftrace.target); 1668 if (ret) { 1669 char errbuf[512]; 1670 1671 target__strerror(&ftrace.target, ret, errbuf, 512); 1672 pr_err("%s\n", errbuf); 1673 goto out_delete_filters; 1674 } 1675 1676 ftrace.evlist = evlist__new(); 1677 if (ftrace.evlist == NULL) { 1678 ret = -ENOMEM; 1679 goto out_delete_filters; 1680 } 1681 1682 ret = evlist__create_maps(ftrace.evlist, &ftrace.target); 1683 if (ret < 0) 1684 goto out_delete_evlist; 1685 1686 if (argc) { 1687 ret = evlist__prepare_workload(ftrace.evlist, &ftrace.target, 1688 argv, false, 1689 ftrace__workload_exec_failed_signal); 1690 if (ret < 0) 1691 goto out_delete_evlist; 1692 } 1693 1694 ret = cmd_func(&ftrace); 1695 1696 out_delete_evlist: 1697 evlist__delete(ftrace.evlist); 1698 1699 out_delete_filters: 1700 delete_filter_func(&ftrace.filters); 1701 delete_filter_func(&ftrace.notrace); 1702 delete_filter_func(&ftrace.graph_funcs); 1703 delete_filter_func(&ftrace.nograph_funcs); 1704 1705 return ret; 1706 } 1707