1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-ftrace.c 4 * 5 * Copyright (c) 2013 LG Electronics, Namhyung Kim <namhyung@kernel.org> 6 * Copyright (c) 2020 Changbin Du <changbin.du@gmail.com>, significant enhancement. 7 */ 8 9 #include "builtin.h" 10 11 #include <errno.h> 12 #include <unistd.h> 13 #include <signal.h> 14 #include <stdlib.h> 15 #include <fcntl.h> 16 #include <inttypes.h> 17 #include <math.h> 18 #include <poll.h> 19 #include <ctype.h> 20 #include <linux/capability.h> 21 #include <linux/string.h> 22 23 #include "debug.h" 24 #include <subcmd/pager.h> 25 #include <subcmd/parse-options.h> 26 #include <api/io.h> 27 #include <api/fs/tracing_path.h> 28 #include "evlist.h" 29 #include "target.h" 30 #include "cpumap.h" 31 #include "hashmap.h" 32 #include "thread_map.h" 33 #include "strfilter.h" 34 #include "util/cap.h" 35 #include "util/config.h" 36 #include "util/ftrace.h" 37 #include "util/stat.h" 38 #include "util/units.h" 39 #include "util/parse-sublevel-options.h" 40 41 #define DEFAULT_TRACER "function_graph" 42 43 static volatile sig_atomic_t workload_exec_errno; 44 static volatile sig_atomic_t done; 45 46 static void sig_handler(int sig __maybe_unused) 47 { 48 done = true; 49 } 50 51 /* 52 * evlist__prepare_workload will send a SIGUSR1 if the fork fails, since 53 * we asked by setting its exec_error to the function below, 54 * ftrace__workload_exec_failed_signal. 55 * 56 * XXX We need to handle this more appropriately, emitting an error, etc. 57 */ 58 static void ftrace__workload_exec_failed_signal(int signo __maybe_unused, 59 siginfo_t *info __maybe_unused, 60 void *ucontext __maybe_unused) 61 { 62 workload_exec_errno = info->si_value.sival_int; 63 done = true; 64 } 65 66 static bool check_ftrace_capable(void) 67 { 68 bool used_root; 69 70 if (perf_cap__capable(CAP_PERFMON, &used_root)) 71 return true; 72 73 if (!used_root && perf_cap__capable(CAP_SYS_ADMIN, &used_root)) 74 return true; 75 76 pr_err("ftrace only works for %s!\n", 77 used_root ? "root" 78 : "users with the CAP_PERFMON or CAP_SYS_ADMIN capability" 79 ); 80 return false; 81 } 82 83 static bool is_ftrace_supported(void) 84 { 85 char *file; 86 bool supported = false; 87 88 file = get_tracing_file("set_ftrace_pid"); 89 if (!file) { 90 pr_debug("cannot get tracing file set_ftrace_pid\n"); 91 return false; 92 } 93 94 if (!access(file, F_OK)) 95 supported = true; 96 97 put_tracing_file(file); 98 return supported; 99 } 100 101 static int __write_tracing_file(const char *name, const char *val, bool append) 102 { 103 char *file; 104 int fd, ret = -1; 105 ssize_t size = strlen(val); 106 int flags = O_WRONLY; 107 char errbuf[512]; 108 char *val_copy; 109 110 file = get_tracing_file(name); 111 if (!file) { 112 pr_debug("cannot get tracing file: %s\n", name); 113 return -1; 114 } 115 116 if (append) 117 flags |= O_APPEND; 118 else 119 flags |= O_TRUNC; 120 121 fd = open(file, flags); 122 if (fd < 0) { 123 pr_debug("cannot open tracing file: %s: %s\n", 124 name, str_error_r(errno, errbuf, sizeof(errbuf))); 125 goto out; 126 } 127 128 /* 129 * Copy the original value and append a '\n'. Without this, 130 * the kernel can hide possible errors. 131 */ 132 val_copy = strdup(val); 133 if (!val_copy) 134 goto out_close; 135 val_copy[size] = '\n'; 136 137 if (write(fd, val_copy, size + 1) == size + 1) 138 ret = 0; 139 else 140 pr_debug("write '%s' to tracing/%s failed: %s\n", 141 val, name, str_error_r(errno, errbuf, sizeof(errbuf))); 142 143 free(val_copy); 144 out_close: 145 close(fd); 146 out: 147 put_tracing_file(file); 148 return ret; 149 } 150 151 static int write_tracing_file(const char *name, const char *val) 152 { 153 return __write_tracing_file(name, val, false); 154 } 155 156 static int append_tracing_file(const char *name, const char *val) 157 { 158 return __write_tracing_file(name, val, true); 159 } 160 161 static int read_tracing_file_to_stdout(const char *name) 162 { 163 char buf[4096]; 164 char *file; 165 int fd; 166 int ret = -1; 167 168 file = get_tracing_file(name); 169 if (!file) { 170 pr_debug("cannot get tracing file: %s\n", name); 171 return -1; 172 } 173 174 fd = open(file, O_RDONLY); 175 if (fd < 0) { 176 pr_debug("cannot open tracing file: %s: %s\n", 177 name, str_error_r(errno, buf, sizeof(buf))); 178 goto out; 179 } 180 181 /* read contents to stdout */ 182 while (true) { 183 int n = read(fd, buf, sizeof(buf)); 184 if (n == 0) 185 break; 186 else if (n < 0) 187 goto out_close; 188 189 if (fwrite(buf, n, 1, stdout) != 1) 190 goto out_close; 191 } 192 ret = 0; 193 194 out_close: 195 close(fd); 196 out: 197 put_tracing_file(file); 198 return ret; 199 } 200 201 static int read_tracing_file_by_line(const char *name, 202 void (*cb)(char *str, void *arg), 203 void *cb_arg) 204 { 205 char *line = NULL; 206 size_t len = 0; 207 char *file; 208 FILE *fp; 209 210 file = get_tracing_file(name); 211 if (!file) { 212 pr_debug("cannot get tracing file: %s\n", name); 213 return -1; 214 } 215 216 fp = fopen(file, "r"); 217 if (fp == NULL) { 218 pr_debug("cannot open tracing file: %s\n", name); 219 put_tracing_file(file); 220 return -1; 221 } 222 223 while (getline(&line, &len, fp) != -1) { 224 cb(line, cb_arg); 225 } 226 227 if (line) 228 free(line); 229 230 fclose(fp); 231 put_tracing_file(file); 232 return 0; 233 } 234 235 static int write_tracing_file_int(const char *name, int value) 236 { 237 char buf[16]; 238 239 snprintf(buf, sizeof(buf), "%d", value); 240 if (write_tracing_file(name, buf) < 0) 241 return -1; 242 243 return 0; 244 } 245 246 static int write_tracing_option_file(const char *name, const char *val) 247 { 248 char *file; 249 int ret; 250 251 if (asprintf(&file, "options/%s", name) < 0) 252 return -1; 253 254 ret = __write_tracing_file(file, val, false); 255 free(file); 256 return ret; 257 } 258 259 static int reset_tracing_cpu(void); 260 static void reset_tracing_filters(void); 261 262 static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused) 263 { 264 write_tracing_option_file("function-fork", "0"); 265 write_tracing_option_file("func_stack_trace", "0"); 266 write_tracing_option_file("sleep-time", "1"); 267 write_tracing_option_file("funcgraph-irqs", "1"); 268 write_tracing_option_file("funcgraph-proc", "0"); 269 write_tracing_option_file("funcgraph-abstime", "0"); 270 write_tracing_option_file("funcgraph-tail", "0"); 271 write_tracing_option_file("latency-format", "0"); 272 write_tracing_option_file("irq-info", "0"); 273 } 274 275 static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused) 276 { 277 if (write_tracing_file("tracing_on", "0") < 0) 278 return -1; 279 280 if (write_tracing_file("current_tracer", "nop") < 0) 281 return -1; 282 283 if (write_tracing_file("set_ftrace_pid", " ") < 0) 284 return -1; 285 286 if (reset_tracing_cpu() < 0) 287 return -1; 288 289 if (write_tracing_file("max_graph_depth", "0") < 0) 290 return -1; 291 292 if (write_tracing_file("tracing_thresh", "0") < 0) 293 return -1; 294 295 reset_tracing_filters(); 296 reset_tracing_options(ftrace); 297 return 0; 298 } 299 300 static int set_tracing_pid(struct perf_ftrace *ftrace) 301 { 302 int i; 303 char buf[16]; 304 305 if (target__has_cpu(&ftrace->target)) 306 return 0; 307 308 for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) { 309 scnprintf(buf, sizeof(buf), "%d", 310 perf_thread_map__pid(ftrace->evlist->core.threads, i)); 311 if (append_tracing_file("set_ftrace_pid", buf) < 0) 312 return -1; 313 } 314 return 0; 315 } 316 317 static int set_tracing_cpumask(struct perf_cpu_map *cpumap) 318 { 319 char *cpumask; 320 size_t mask_size; 321 int ret; 322 int last_cpu; 323 324 last_cpu = perf_cpu_map__cpu(cpumap, perf_cpu_map__nr(cpumap) - 1).cpu; 325 mask_size = last_cpu / 4 + 2; /* one more byte for EOS */ 326 mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */ 327 328 cpumask = malloc(mask_size); 329 if (cpumask == NULL) { 330 pr_debug("failed to allocate cpu mask\n"); 331 return -1; 332 } 333 334 cpu_map__snprint_mask(cpumap, cpumask, mask_size); 335 336 ret = write_tracing_file("tracing_cpumask", cpumask); 337 338 free(cpumask); 339 return ret; 340 } 341 342 static int set_tracing_cpu(struct perf_ftrace *ftrace) 343 { 344 struct perf_cpu_map *cpumap = ftrace->evlist->core.user_requested_cpus; 345 346 if (!target__has_cpu(&ftrace->target)) 347 return 0; 348 349 return set_tracing_cpumask(cpumap); 350 } 351 352 static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace) 353 { 354 if (!ftrace->func_stack_trace) 355 return 0; 356 357 if (write_tracing_option_file("func_stack_trace", "1") < 0) 358 return -1; 359 360 return 0; 361 } 362 363 static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace) 364 { 365 if (!ftrace->func_irq_info) 366 return 0; 367 368 if (write_tracing_option_file("irq-info", "1") < 0) 369 return -1; 370 371 return 0; 372 } 373 374 static int reset_tracing_cpu(void) 375 { 376 struct perf_cpu_map *cpumap = perf_cpu_map__new_online_cpus(); 377 int ret; 378 379 ret = set_tracing_cpumask(cpumap); 380 perf_cpu_map__put(cpumap); 381 return ret; 382 } 383 384 static int __set_tracing_filter(const char *filter_file, struct list_head *funcs) 385 { 386 struct filter_entry *pos; 387 388 list_for_each_entry(pos, funcs, list) { 389 if (append_tracing_file(filter_file, pos->name) < 0) 390 return -1; 391 } 392 393 return 0; 394 } 395 396 static int set_tracing_filters(struct perf_ftrace *ftrace) 397 { 398 int ret; 399 400 ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters); 401 if (ret < 0) 402 return ret; 403 404 ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace); 405 if (ret < 0) 406 return ret; 407 408 ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs); 409 if (ret < 0) 410 return ret; 411 412 /* old kernels do not have this filter */ 413 __set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs); 414 415 return ret; 416 } 417 418 static void reset_tracing_filters(void) 419 { 420 write_tracing_file("set_ftrace_filter", " "); 421 write_tracing_file("set_ftrace_notrace", " "); 422 write_tracing_file("set_graph_function", " "); 423 write_tracing_file("set_graph_notrace", " "); 424 } 425 426 static int set_tracing_depth(struct perf_ftrace *ftrace) 427 { 428 if (ftrace->graph_depth == 0) 429 return 0; 430 431 if (ftrace->graph_depth < 0) { 432 pr_err("invalid graph depth: %d\n", ftrace->graph_depth); 433 return -1; 434 } 435 436 if (write_tracing_file_int("max_graph_depth", ftrace->graph_depth) < 0) 437 return -1; 438 439 return 0; 440 } 441 442 static int set_tracing_percpu_buffer_size(struct perf_ftrace *ftrace) 443 { 444 int ret; 445 446 if (ftrace->percpu_buffer_size == 0) 447 return 0; 448 449 ret = write_tracing_file_int("buffer_size_kb", 450 ftrace->percpu_buffer_size / 1024); 451 if (ret < 0) 452 return ret; 453 454 return 0; 455 } 456 457 static int set_tracing_trace_inherit(struct perf_ftrace *ftrace) 458 { 459 if (!ftrace->inherit) 460 return 0; 461 462 if (write_tracing_option_file("function-fork", "1") < 0) 463 return -1; 464 465 return 0; 466 } 467 468 static int set_tracing_sleep_time(struct perf_ftrace *ftrace) 469 { 470 if (!ftrace->graph_nosleep_time) 471 return 0; 472 473 if (write_tracing_option_file("sleep-time", "0") < 0) 474 return -1; 475 476 return 0; 477 } 478 479 static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace) 480 { 481 if (!ftrace->graph_noirqs) 482 return 0; 483 484 if (write_tracing_option_file("funcgraph-irqs", "0") < 0) 485 return -1; 486 487 return 0; 488 } 489 490 static int set_tracing_funcgraph_verbose(struct perf_ftrace *ftrace) 491 { 492 if (!ftrace->graph_verbose) 493 return 0; 494 495 if (write_tracing_option_file("funcgraph-proc", "1") < 0) 496 return -1; 497 498 if (write_tracing_option_file("funcgraph-abstime", "1") < 0) 499 return -1; 500 501 if (write_tracing_option_file("latency-format", "1") < 0) 502 return -1; 503 504 return 0; 505 } 506 507 static int set_tracing_funcgraph_tail(struct perf_ftrace *ftrace) 508 { 509 if (!ftrace->graph_tail) 510 return 0; 511 512 if (write_tracing_option_file("funcgraph-tail", "1") < 0) 513 return -1; 514 515 return 0; 516 } 517 518 static int set_tracing_thresh(struct perf_ftrace *ftrace) 519 { 520 int ret; 521 522 if (ftrace->graph_thresh == 0) 523 return 0; 524 525 ret = write_tracing_file_int("tracing_thresh", ftrace->graph_thresh); 526 if (ret < 0) 527 return ret; 528 529 return 0; 530 } 531 532 static int set_tracing_options(struct perf_ftrace *ftrace) 533 { 534 if (set_tracing_pid(ftrace) < 0) { 535 pr_err("failed to set ftrace pid\n"); 536 return -1; 537 } 538 539 if (set_tracing_cpu(ftrace) < 0) { 540 pr_err("failed to set tracing cpumask\n"); 541 return -1; 542 } 543 544 if (set_tracing_func_stack_trace(ftrace) < 0) { 545 pr_err("failed to set tracing option func_stack_trace\n"); 546 return -1; 547 } 548 549 if (set_tracing_func_irqinfo(ftrace) < 0) { 550 pr_err("failed to set tracing option irq-info\n"); 551 return -1; 552 } 553 554 if (set_tracing_filters(ftrace) < 0) { 555 pr_err("failed to set tracing filters\n"); 556 return -1; 557 } 558 559 if (set_tracing_depth(ftrace) < 0) { 560 pr_err("failed to set graph depth\n"); 561 return -1; 562 } 563 564 if (set_tracing_percpu_buffer_size(ftrace) < 0) { 565 pr_err("failed to set tracing per-cpu buffer size\n"); 566 return -1; 567 } 568 569 if (set_tracing_trace_inherit(ftrace) < 0) { 570 pr_err("failed to set tracing option function-fork\n"); 571 return -1; 572 } 573 574 if (set_tracing_sleep_time(ftrace) < 0) { 575 pr_err("failed to set tracing option sleep-time\n"); 576 return -1; 577 } 578 579 if (set_tracing_funcgraph_irqs(ftrace) < 0) { 580 pr_err("failed to set tracing option funcgraph-irqs\n"); 581 return -1; 582 } 583 584 if (set_tracing_funcgraph_verbose(ftrace) < 0) { 585 pr_err("failed to set tracing option funcgraph-proc/funcgraph-abstime\n"); 586 return -1; 587 } 588 589 if (set_tracing_thresh(ftrace) < 0) { 590 pr_err("failed to set tracing thresh\n"); 591 return -1; 592 } 593 594 if (set_tracing_funcgraph_tail(ftrace) < 0) { 595 pr_err("failed to set tracing option funcgraph-tail\n"); 596 return -1; 597 } 598 599 return 0; 600 } 601 602 static void select_tracer(struct perf_ftrace *ftrace) 603 { 604 bool graph = !list_empty(&ftrace->graph_funcs) || 605 !list_empty(&ftrace->nograph_funcs); 606 bool func = !list_empty(&ftrace->filters) || 607 !list_empty(&ftrace->notrace); 608 609 /* The function_graph has priority over function tracer. */ 610 if (graph) 611 ftrace->tracer = "function_graph"; 612 else if (func) 613 ftrace->tracer = "function"; 614 /* Otherwise, the default tracer is used. */ 615 616 pr_debug("%s tracer is used\n", ftrace->tracer); 617 } 618 619 static int __cmd_ftrace(struct perf_ftrace *ftrace) 620 { 621 char *trace_file; 622 int trace_fd; 623 char buf[4096]; 624 struct pollfd pollfd = { 625 .events = POLLIN, 626 }; 627 628 select_tracer(ftrace); 629 630 if (reset_tracing_files(ftrace) < 0) { 631 pr_err("failed to reset ftrace\n"); 632 goto out; 633 } 634 635 /* reset ftrace buffer */ 636 if (write_tracing_file("trace", "0") < 0) 637 goto out; 638 639 if (set_tracing_options(ftrace) < 0) 640 goto out_reset; 641 642 if (write_tracing_file("current_tracer", ftrace->tracer) < 0) { 643 pr_err("failed to set current_tracer to %s\n", ftrace->tracer); 644 goto out_reset; 645 } 646 647 setup_pager(); 648 649 trace_file = get_tracing_file("trace_pipe"); 650 if (!trace_file) { 651 pr_err("failed to open trace_pipe\n"); 652 goto out_reset; 653 } 654 655 trace_fd = open(trace_file, O_RDONLY); 656 657 put_tracing_file(trace_file); 658 659 if (trace_fd < 0) { 660 pr_err("failed to open trace_pipe\n"); 661 goto out_reset; 662 } 663 664 fcntl(trace_fd, F_SETFL, O_NONBLOCK); 665 pollfd.fd = trace_fd; 666 667 /* display column headers */ 668 read_tracing_file_to_stdout("trace"); 669 670 if (!ftrace->target.initial_delay) { 671 if (write_tracing_file("tracing_on", "1") < 0) { 672 pr_err("can't enable tracing\n"); 673 goto out_close_fd; 674 } 675 } 676 677 evlist__start_workload(ftrace->evlist); 678 679 if (ftrace->target.initial_delay > 0) { 680 usleep(ftrace->target.initial_delay * 1000); 681 if (write_tracing_file("tracing_on", "1") < 0) { 682 pr_err("can't enable tracing\n"); 683 goto out_close_fd; 684 } 685 } 686 687 while (!done) { 688 if (poll(&pollfd, 1, -1) < 0) 689 break; 690 691 if (pollfd.revents & POLLIN) { 692 int n = read(trace_fd, buf, sizeof(buf)); 693 if (n < 0) 694 break; 695 if (fwrite(buf, n, 1, stdout) != 1) 696 break; 697 /* flush output since stdout is in full buffering mode due to pager */ 698 fflush(stdout); 699 } 700 } 701 702 write_tracing_file("tracing_on", "0"); 703 704 if (workload_exec_errno) { 705 const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf)); 706 /* flush stdout first so below error msg appears at the end. */ 707 fflush(stdout); 708 pr_err("workload failed: %s\n", emsg); 709 goto out_close_fd; 710 } 711 712 /* read remaining buffer contents */ 713 while (true) { 714 int n = read(trace_fd, buf, sizeof(buf)); 715 if (n <= 0) 716 break; 717 if (fwrite(buf, n, 1, stdout) != 1) 718 break; 719 } 720 721 out_close_fd: 722 close(trace_fd); 723 out_reset: 724 reset_tracing_files(ftrace); 725 out: 726 return (done && !workload_exec_errno) ? 0 : -1; 727 } 728 729 static void make_histogram(int buckets[], char *buf, size_t len, char *linebuf, 730 bool use_nsec) 731 { 732 char *p, *q; 733 char *unit; 734 double num; 735 int i; 736 737 /* ensure NUL termination */ 738 buf[len] = '\0'; 739 740 /* handle data line by line */ 741 for (p = buf; (q = strchr(p, '\n')) != NULL; p = q + 1) { 742 *q = '\0'; 743 /* move it to the line buffer */ 744 strcat(linebuf, p); 745 746 /* 747 * parse trace output to get function duration like in 748 * 749 * # tracer: function_graph 750 * # 751 * # CPU DURATION FUNCTION CALLS 752 * # | | | | | | | 753 * 1) + 10.291 us | do_filp_open(); 754 * 1) 4.889 us | do_filp_open(); 755 * 1) 6.086 us | do_filp_open(); 756 * 757 */ 758 if (linebuf[0] == '#') 759 goto next; 760 761 /* ignore CPU */ 762 p = strchr(linebuf, ')'); 763 if (p == NULL) 764 p = linebuf; 765 766 while (*p && !isdigit(*p) && (*p != '|')) 767 p++; 768 769 /* no duration */ 770 if (*p == '\0' || *p == '|') 771 goto next; 772 773 num = strtod(p, &unit); 774 if (!unit || strncmp(unit, " us", 3)) 775 goto next; 776 777 if (use_nsec) 778 num *= 1000; 779 780 i = log2(num); 781 if (i < 0) 782 i = 0; 783 if (i >= NUM_BUCKET) 784 i = NUM_BUCKET - 1; 785 786 buckets[i]++; 787 788 next: 789 /* empty the line buffer for the next output */ 790 linebuf[0] = '\0'; 791 } 792 793 /* preserve any remaining output (before newline) */ 794 strcat(linebuf, p); 795 } 796 797 static void display_histogram(int buckets[], bool use_nsec) 798 { 799 int i; 800 int total = 0; 801 int bar_total = 46; /* to fit in 80 column */ 802 char bar[] = "###############################################"; 803 int bar_len; 804 805 for (i = 0; i < NUM_BUCKET; i++) 806 total += buckets[i]; 807 808 if (total == 0) { 809 printf("No data found\n"); 810 return; 811 } 812 813 printf("# %14s | %10s | %-*s |\n", 814 " DURATION ", "COUNT", bar_total, "GRAPH"); 815 816 bar_len = buckets[0] * bar_total / total; 817 printf(" %4d - %-4d %s | %10d | %.*s%*s |\n", 818 0, 1, use_nsec ? "ns" : "us", buckets[0], bar_len, bar, bar_total - bar_len, ""); 819 820 for (i = 1; i < NUM_BUCKET - 1; i++) { 821 int start = (1 << (i - 1)); 822 int stop = 1 << i; 823 const char *unit = use_nsec ? "ns" : "us"; 824 825 if (start >= 1024) { 826 start >>= 10; 827 stop >>= 10; 828 unit = use_nsec ? "us" : "ms"; 829 } 830 bar_len = buckets[i] * bar_total / total; 831 printf(" %4d - %-4d %s | %10d | %.*s%*s |\n", 832 start, stop, unit, buckets[i], bar_len, bar, 833 bar_total - bar_len, ""); 834 } 835 836 bar_len = buckets[NUM_BUCKET - 1] * bar_total / total; 837 printf(" %4d - %-4s %s | %10d | %.*s%*s |\n", 838 1, "...", use_nsec ? "ms" : " s", buckets[NUM_BUCKET - 1], 839 bar_len, bar, bar_total - bar_len, ""); 840 841 } 842 843 static int prepare_func_latency(struct perf_ftrace *ftrace) 844 { 845 char *trace_file; 846 int fd; 847 848 if (ftrace->target.use_bpf) 849 return perf_ftrace__latency_prepare_bpf(ftrace); 850 851 if (reset_tracing_files(ftrace) < 0) { 852 pr_err("failed to reset ftrace\n"); 853 return -1; 854 } 855 856 /* reset ftrace buffer */ 857 if (write_tracing_file("trace", "0") < 0) 858 return -1; 859 860 if (set_tracing_options(ftrace) < 0) 861 return -1; 862 863 /* force to use the function_graph tracer to track duration */ 864 if (write_tracing_file("current_tracer", "function_graph") < 0) { 865 pr_err("failed to set current_tracer to function_graph\n"); 866 return -1; 867 } 868 869 trace_file = get_tracing_file("trace_pipe"); 870 if (!trace_file) { 871 pr_err("failed to open trace_pipe\n"); 872 return -1; 873 } 874 875 fd = open(trace_file, O_RDONLY); 876 if (fd < 0) 877 pr_err("failed to open trace_pipe\n"); 878 879 put_tracing_file(trace_file); 880 return fd; 881 } 882 883 static int start_func_latency(struct perf_ftrace *ftrace) 884 { 885 if (ftrace->target.use_bpf) 886 return perf_ftrace__latency_start_bpf(ftrace); 887 888 if (write_tracing_file("tracing_on", "1") < 0) { 889 pr_err("can't enable tracing\n"); 890 return -1; 891 } 892 893 return 0; 894 } 895 896 static int stop_func_latency(struct perf_ftrace *ftrace) 897 { 898 if (ftrace->target.use_bpf) 899 return perf_ftrace__latency_stop_bpf(ftrace); 900 901 write_tracing_file("tracing_on", "0"); 902 return 0; 903 } 904 905 static int read_func_latency(struct perf_ftrace *ftrace, int buckets[]) 906 { 907 if (ftrace->target.use_bpf) 908 return perf_ftrace__latency_read_bpf(ftrace, buckets); 909 910 return 0; 911 } 912 913 static int cleanup_func_latency(struct perf_ftrace *ftrace) 914 { 915 if (ftrace->target.use_bpf) 916 return perf_ftrace__latency_cleanup_bpf(ftrace); 917 918 reset_tracing_files(ftrace); 919 return 0; 920 } 921 922 static int __cmd_latency(struct perf_ftrace *ftrace) 923 { 924 int trace_fd; 925 char buf[4096]; 926 char line[256]; 927 struct pollfd pollfd = { 928 .events = POLLIN, 929 }; 930 int buckets[NUM_BUCKET] = { }; 931 932 trace_fd = prepare_func_latency(ftrace); 933 if (trace_fd < 0) 934 goto out; 935 936 fcntl(trace_fd, F_SETFL, O_NONBLOCK); 937 pollfd.fd = trace_fd; 938 939 if (start_func_latency(ftrace) < 0) 940 goto out; 941 942 evlist__start_workload(ftrace->evlist); 943 944 line[0] = '\0'; 945 while (!done) { 946 if (poll(&pollfd, 1, -1) < 0) 947 break; 948 949 if (pollfd.revents & POLLIN) { 950 int n = read(trace_fd, buf, sizeof(buf) - 1); 951 if (n < 0) 952 break; 953 954 make_histogram(buckets, buf, n, line, ftrace->use_nsec); 955 } 956 } 957 958 stop_func_latency(ftrace); 959 960 if (workload_exec_errno) { 961 const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf)); 962 pr_err("workload failed: %s\n", emsg); 963 goto out; 964 } 965 966 /* read remaining buffer contents */ 967 while (!ftrace->target.use_bpf) { 968 int n = read(trace_fd, buf, sizeof(buf) - 1); 969 if (n <= 0) 970 break; 971 make_histogram(buckets, buf, n, line, ftrace->use_nsec); 972 } 973 974 read_func_latency(ftrace, buckets); 975 976 display_histogram(buckets, ftrace->use_nsec); 977 978 out: 979 close(trace_fd); 980 cleanup_func_latency(ftrace); 981 982 return (done && !workload_exec_errno) ? 0 : -1; 983 } 984 985 static size_t profile_hash(long func, void *ctx __maybe_unused) 986 { 987 return str_hash((char *)func); 988 } 989 990 static bool profile_equal(long func1, long func2, void *ctx __maybe_unused) 991 { 992 return !strcmp((char *)func1, (char *)func2); 993 } 994 995 static int prepare_func_profile(struct perf_ftrace *ftrace) 996 { 997 ftrace->tracer = "function_graph"; 998 ftrace->graph_tail = 1; 999 1000 ftrace->profile_hash = hashmap__new(profile_hash, profile_equal, NULL); 1001 if (ftrace->profile_hash == NULL) 1002 return -ENOMEM; 1003 1004 return 0; 1005 } 1006 1007 /* This is saved in a hashmap keyed by the function name */ 1008 struct ftrace_profile_data { 1009 struct stats st; 1010 }; 1011 1012 static int add_func_duration(struct perf_ftrace *ftrace, char *func, double time_ns) 1013 { 1014 struct ftrace_profile_data *prof = NULL; 1015 1016 if (!hashmap__find(ftrace->profile_hash, func, &prof)) { 1017 char *key = strdup(func); 1018 1019 if (key == NULL) 1020 return -ENOMEM; 1021 1022 prof = zalloc(sizeof(*prof)); 1023 if (prof == NULL) { 1024 free(key); 1025 return -ENOMEM; 1026 } 1027 1028 init_stats(&prof->st); 1029 hashmap__add(ftrace->profile_hash, key, prof); 1030 } 1031 1032 update_stats(&prof->st, time_ns); 1033 return 0; 1034 } 1035 1036 /* 1037 * The ftrace function_graph text output normally looks like below: 1038 * 1039 * CPU DURATION FUNCTION 1040 * 1041 * 0) | syscall_trace_enter.isra.0() { 1042 * 0) | __audit_syscall_entry() { 1043 * 0) | auditd_test_task() { 1044 * 0) 0.271 us | __rcu_read_lock(); 1045 * 0) 0.275 us | __rcu_read_unlock(); 1046 * 0) 1.254 us | } /\* auditd_test_task *\/ 1047 * 0) 0.279 us | ktime_get_coarse_real_ts64(); 1048 * 0) 2.227 us | } /\* __audit_syscall_entry *\/ 1049 * 0) 2.713 us | } /\* syscall_trace_enter.isra.0 *\/ 1050 * 1051 * Parse the line and get the duration and function name. 1052 */ 1053 static int parse_func_duration(struct perf_ftrace *ftrace, char *line, size_t len) 1054 { 1055 char *p; 1056 char *func; 1057 double duration; 1058 1059 /* skip CPU */ 1060 p = strchr(line, ')'); 1061 if (p == NULL) 1062 return 0; 1063 1064 /* get duration */ 1065 p = skip_spaces(p + 1); 1066 1067 /* no duration? */ 1068 if (p == NULL || *p == '|') 1069 return 0; 1070 1071 /* skip markers like '*' or '!' for longer than ms */ 1072 if (!isdigit(*p)) 1073 p++; 1074 1075 duration = strtod(p, &p); 1076 1077 if (strncmp(p, " us", 3)) { 1078 pr_debug("non-usec time found.. ignoring\n"); 1079 return 0; 1080 } 1081 1082 /* 1083 * profile stat keeps the max and min values as integer, 1084 * convert to nsec time so that we can have accurate max. 1085 */ 1086 duration *= 1000; 1087 1088 /* skip to the pipe */ 1089 while (p < line + len && *p != '|') 1090 p++; 1091 1092 if (*p++ != '|') 1093 return -EINVAL; 1094 1095 /* get function name */ 1096 func = skip_spaces(p); 1097 1098 /* skip the closing bracket and the start of comment */ 1099 if (*func == '}') 1100 func += 5; 1101 1102 /* remove semi-colon or end of comment at the end */ 1103 p = line + len - 1; 1104 while (!isalnum(*p) && *p != ']') { 1105 *p = '\0'; 1106 --p; 1107 } 1108 1109 return add_func_duration(ftrace, func, duration); 1110 } 1111 1112 enum perf_ftrace_profile_sort_key { 1113 PFP_SORT_TOTAL = 0, 1114 PFP_SORT_AVG, 1115 PFP_SORT_MAX, 1116 PFP_SORT_COUNT, 1117 PFP_SORT_NAME, 1118 }; 1119 1120 static enum perf_ftrace_profile_sort_key profile_sort = PFP_SORT_TOTAL; 1121 1122 static int cmp_profile_data(const void *a, const void *b) 1123 { 1124 const struct hashmap_entry *e1 = *(const struct hashmap_entry **)a; 1125 const struct hashmap_entry *e2 = *(const struct hashmap_entry **)b; 1126 struct ftrace_profile_data *p1 = e1->pvalue; 1127 struct ftrace_profile_data *p2 = e2->pvalue; 1128 double v1, v2; 1129 1130 switch (profile_sort) { 1131 case PFP_SORT_NAME: 1132 return strcmp(e1->pkey, e2->pkey); 1133 case PFP_SORT_AVG: 1134 v1 = p1->st.mean; 1135 v2 = p2->st.mean; 1136 break; 1137 case PFP_SORT_MAX: 1138 v1 = p1->st.max; 1139 v2 = p2->st.max; 1140 break; 1141 case PFP_SORT_COUNT: 1142 v1 = p1->st.n; 1143 v2 = p2->st.n; 1144 break; 1145 case PFP_SORT_TOTAL: 1146 default: 1147 v1 = p1->st.n * p1->st.mean; 1148 v2 = p2->st.n * p2->st.mean; 1149 break; 1150 } 1151 1152 if (v1 > v2) 1153 return -1; 1154 if (v1 < v2) 1155 return 1; 1156 return 0; 1157 } 1158 1159 static void print_profile_result(struct perf_ftrace *ftrace) 1160 { 1161 struct hashmap_entry *entry, **profile; 1162 size_t i, nr, bkt; 1163 1164 nr = hashmap__size(ftrace->profile_hash); 1165 if (nr == 0) 1166 return; 1167 1168 profile = calloc(nr, sizeof(*profile)); 1169 if (profile == NULL) { 1170 pr_err("failed to allocate memory for the result\n"); 1171 return; 1172 } 1173 1174 i = 0; 1175 hashmap__for_each_entry(ftrace->profile_hash, entry, bkt) 1176 profile[i++] = entry; 1177 1178 assert(i == nr); 1179 1180 //cmp_profile_data(profile[0], profile[1]); 1181 qsort(profile, nr, sizeof(*profile), cmp_profile_data); 1182 1183 printf("# %10s %10s %10s %10s %s\n", 1184 "Total (us)", "Avg (us)", "Max (us)", "Count", "Function"); 1185 1186 for (i = 0; i < nr; i++) { 1187 const char *name = profile[i]->pkey; 1188 struct ftrace_profile_data *p = profile[i]->pvalue; 1189 1190 printf("%12.3f %10.3f %6"PRIu64".%03"PRIu64" %10.0f %s\n", 1191 p->st.n * p->st.mean / 1000, p->st.mean / 1000, 1192 p->st.max / 1000, p->st.max % 1000, p->st.n, name); 1193 } 1194 1195 free(profile); 1196 1197 hashmap__for_each_entry(ftrace->profile_hash, entry, bkt) { 1198 free((char *)entry->pkey); 1199 free(entry->pvalue); 1200 } 1201 1202 hashmap__free(ftrace->profile_hash); 1203 ftrace->profile_hash = NULL; 1204 } 1205 1206 static int __cmd_profile(struct perf_ftrace *ftrace) 1207 { 1208 char *trace_file; 1209 int trace_fd; 1210 char buf[4096]; 1211 struct io io; 1212 char *line = NULL; 1213 size_t line_len = 0; 1214 1215 if (prepare_func_profile(ftrace) < 0) { 1216 pr_err("failed to prepare func profiler\n"); 1217 goto out; 1218 } 1219 1220 if (reset_tracing_files(ftrace) < 0) { 1221 pr_err("failed to reset ftrace\n"); 1222 goto out; 1223 } 1224 1225 /* reset ftrace buffer */ 1226 if (write_tracing_file("trace", "0") < 0) 1227 goto out; 1228 1229 if (set_tracing_options(ftrace) < 0) 1230 return -1; 1231 1232 if (write_tracing_file("current_tracer", ftrace->tracer) < 0) { 1233 pr_err("failed to set current_tracer to %s\n", ftrace->tracer); 1234 goto out_reset; 1235 } 1236 1237 setup_pager(); 1238 1239 trace_file = get_tracing_file("trace_pipe"); 1240 if (!trace_file) { 1241 pr_err("failed to open trace_pipe\n"); 1242 goto out_reset; 1243 } 1244 1245 trace_fd = open(trace_file, O_RDONLY); 1246 1247 put_tracing_file(trace_file); 1248 1249 if (trace_fd < 0) { 1250 pr_err("failed to open trace_pipe\n"); 1251 goto out_reset; 1252 } 1253 1254 fcntl(trace_fd, F_SETFL, O_NONBLOCK); 1255 1256 if (write_tracing_file("tracing_on", "1") < 0) { 1257 pr_err("can't enable tracing\n"); 1258 goto out_close_fd; 1259 } 1260 1261 evlist__start_workload(ftrace->evlist); 1262 1263 io__init(&io, trace_fd, buf, sizeof(buf)); 1264 io.timeout_ms = -1; 1265 1266 while (!done && !io.eof) { 1267 if (io__getline(&io, &line, &line_len) < 0) 1268 break; 1269 1270 if (parse_func_duration(ftrace, line, line_len) < 0) 1271 break; 1272 } 1273 1274 write_tracing_file("tracing_on", "0"); 1275 1276 if (workload_exec_errno) { 1277 const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf)); 1278 /* flush stdout first so below error msg appears at the end. */ 1279 fflush(stdout); 1280 pr_err("workload failed: %s\n", emsg); 1281 goto out_free_line; 1282 } 1283 1284 /* read remaining buffer contents */ 1285 io.timeout_ms = 0; 1286 while (!io.eof) { 1287 if (io__getline(&io, &line, &line_len) < 0) 1288 break; 1289 1290 if (parse_func_duration(ftrace, line, line_len) < 0) 1291 break; 1292 } 1293 1294 print_profile_result(ftrace); 1295 1296 out_free_line: 1297 free(line); 1298 out_close_fd: 1299 close(trace_fd); 1300 out_reset: 1301 reset_tracing_files(ftrace); 1302 out: 1303 return (done && !workload_exec_errno) ? 0 : -1; 1304 } 1305 1306 static int perf_ftrace_config(const char *var, const char *value, void *cb) 1307 { 1308 struct perf_ftrace *ftrace = cb; 1309 1310 if (!strstarts(var, "ftrace.")) 1311 return 0; 1312 1313 if (strcmp(var, "ftrace.tracer")) 1314 return -1; 1315 1316 if (!strcmp(value, "function_graph") || 1317 !strcmp(value, "function")) { 1318 ftrace->tracer = value; 1319 return 0; 1320 } 1321 1322 pr_err("Please select \"function_graph\" (default) or \"function\"\n"); 1323 return -1; 1324 } 1325 1326 static void list_function_cb(char *str, void *arg) 1327 { 1328 struct strfilter *filter = (struct strfilter *)arg; 1329 1330 if (strfilter__compare(filter, str)) 1331 printf("%s", str); 1332 } 1333 1334 static int opt_list_avail_functions(const struct option *opt __maybe_unused, 1335 const char *str, int unset) 1336 { 1337 struct strfilter *filter; 1338 const char *err = NULL; 1339 int ret; 1340 1341 if (unset || !str) 1342 return -1; 1343 1344 filter = strfilter__new(str, &err); 1345 if (!filter) 1346 return err ? -EINVAL : -ENOMEM; 1347 1348 ret = strfilter__or(filter, str, &err); 1349 if (ret == -EINVAL) { 1350 pr_err("Filter parse error at %td.\n", err - str + 1); 1351 pr_err("Source: \"%s\"\n", str); 1352 pr_err(" %*c\n", (int)(err - str + 1), '^'); 1353 strfilter__delete(filter); 1354 return ret; 1355 } 1356 1357 ret = read_tracing_file_by_line("available_filter_functions", 1358 list_function_cb, filter); 1359 strfilter__delete(filter); 1360 if (ret < 0) 1361 return ret; 1362 1363 exit(0); 1364 } 1365 1366 static int parse_filter_func(const struct option *opt, const char *str, 1367 int unset __maybe_unused) 1368 { 1369 struct list_head *head = opt->value; 1370 struct filter_entry *entry; 1371 1372 entry = malloc(sizeof(*entry) + strlen(str) + 1); 1373 if (entry == NULL) 1374 return -ENOMEM; 1375 1376 strcpy(entry->name, str); 1377 list_add_tail(&entry->list, head); 1378 1379 return 0; 1380 } 1381 1382 static void delete_filter_func(struct list_head *head) 1383 { 1384 struct filter_entry *pos, *tmp; 1385 1386 list_for_each_entry_safe(pos, tmp, head, list) { 1387 list_del_init(&pos->list); 1388 free(pos); 1389 } 1390 } 1391 1392 static int parse_buffer_size(const struct option *opt, 1393 const char *str, int unset) 1394 { 1395 unsigned long *s = (unsigned long *)opt->value; 1396 static struct parse_tag tags_size[] = { 1397 { .tag = 'B', .mult = 1 }, 1398 { .tag = 'K', .mult = 1 << 10 }, 1399 { .tag = 'M', .mult = 1 << 20 }, 1400 { .tag = 'G', .mult = 1 << 30 }, 1401 { .tag = 0 }, 1402 }; 1403 unsigned long val; 1404 1405 if (unset) { 1406 *s = 0; 1407 return 0; 1408 } 1409 1410 val = parse_tag_value(str, tags_size); 1411 if (val != (unsigned long) -1) { 1412 if (val < 1024) { 1413 pr_err("buffer size too small, must larger than 1KB."); 1414 return -1; 1415 } 1416 *s = val; 1417 return 0; 1418 } 1419 1420 return -1; 1421 } 1422 1423 static int parse_func_tracer_opts(const struct option *opt, 1424 const char *str, int unset) 1425 { 1426 int ret; 1427 struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value; 1428 struct sublevel_option func_tracer_opts[] = { 1429 { .name = "call-graph", .value_ptr = &ftrace->func_stack_trace }, 1430 { .name = "irq-info", .value_ptr = &ftrace->func_irq_info }, 1431 { .name = NULL, } 1432 }; 1433 1434 if (unset) 1435 return 0; 1436 1437 ret = perf_parse_sublevel_options(str, func_tracer_opts); 1438 if (ret) 1439 return ret; 1440 1441 return 0; 1442 } 1443 1444 static int parse_graph_tracer_opts(const struct option *opt, 1445 const char *str, int unset) 1446 { 1447 int ret; 1448 struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value; 1449 struct sublevel_option graph_tracer_opts[] = { 1450 { .name = "nosleep-time", .value_ptr = &ftrace->graph_nosleep_time }, 1451 { .name = "noirqs", .value_ptr = &ftrace->graph_noirqs }, 1452 { .name = "verbose", .value_ptr = &ftrace->graph_verbose }, 1453 { .name = "thresh", .value_ptr = &ftrace->graph_thresh }, 1454 { .name = "depth", .value_ptr = &ftrace->graph_depth }, 1455 { .name = "tail", .value_ptr = &ftrace->graph_tail }, 1456 { .name = NULL, } 1457 }; 1458 1459 if (unset) 1460 return 0; 1461 1462 ret = perf_parse_sublevel_options(str, graph_tracer_opts); 1463 if (ret) 1464 return ret; 1465 1466 return 0; 1467 } 1468 1469 static int parse_sort_key(const struct option *opt, const char *str, int unset) 1470 { 1471 enum perf_ftrace_profile_sort_key *key = (void *)opt->value; 1472 1473 if (unset) 1474 return 0; 1475 1476 if (!strcmp(str, "total")) 1477 *key = PFP_SORT_TOTAL; 1478 else if (!strcmp(str, "avg")) 1479 *key = PFP_SORT_AVG; 1480 else if (!strcmp(str, "max")) 1481 *key = PFP_SORT_MAX; 1482 else if (!strcmp(str, "count")) 1483 *key = PFP_SORT_COUNT; 1484 else if (!strcmp(str, "name")) 1485 *key = PFP_SORT_NAME; 1486 else { 1487 pr_err("Unknown sort key: %s\n", str); 1488 return -1; 1489 } 1490 return 0; 1491 } 1492 1493 enum perf_ftrace_subcommand { 1494 PERF_FTRACE_NONE, 1495 PERF_FTRACE_TRACE, 1496 PERF_FTRACE_LATENCY, 1497 PERF_FTRACE_PROFILE, 1498 }; 1499 1500 int cmd_ftrace(int argc, const char **argv) 1501 { 1502 int ret; 1503 int (*cmd_func)(struct perf_ftrace *) = NULL; 1504 struct perf_ftrace ftrace = { 1505 .tracer = DEFAULT_TRACER, 1506 .target = { .uid = UINT_MAX, }, 1507 }; 1508 const struct option common_options[] = { 1509 OPT_STRING('p', "pid", &ftrace.target.pid, "pid", 1510 "Trace on existing process id"), 1511 /* TODO: Add short option -t after -t/--tracer can be removed. */ 1512 OPT_STRING(0, "tid", &ftrace.target.tid, "tid", 1513 "Trace on existing thread id (exclusive to --pid)"), 1514 OPT_INCR('v', "verbose", &verbose, 1515 "Be more verbose"), 1516 OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide, 1517 "System-wide collection from all CPUs"), 1518 OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu", 1519 "List of cpus to monitor"), 1520 OPT_END() 1521 }; 1522 const struct option ftrace_options[] = { 1523 OPT_STRING('t', "tracer", &ftrace.tracer, "tracer", 1524 "Tracer to use: function_graph(default) or function"), 1525 OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]", 1526 "Show available functions to filter", 1527 opt_list_avail_functions, "*"), 1528 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", 1529 "Trace given functions using function tracer", 1530 parse_filter_func), 1531 OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func", 1532 "Do not trace given functions", parse_filter_func), 1533 OPT_CALLBACK(0, "func-opts", &ftrace, "options", 1534 "Function tracer options, available options: call-graph,irq-info", 1535 parse_func_tracer_opts), 1536 OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func", 1537 "Trace given functions using function_graph tracer", 1538 parse_filter_func), 1539 OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func", 1540 "Set nograph filter on given functions", parse_filter_func), 1541 OPT_CALLBACK(0, "graph-opts", &ftrace, "options", 1542 "Graph tracer options, available options: nosleep-time,noirqs,verbose,thresh=<n>,depth=<n>", 1543 parse_graph_tracer_opts), 1544 OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size", 1545 "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size), 1546 OPT_BOOLEAN(0, "inherit", &ftrace.inherit, 1547 "Trace children processes"), 1548 OPT_INTEGER('D', "delay", &ftrace.target.initial_delay, 1549 "Number of milliseconds to wait before starting tracing after program start"), 1550 OPT_PARENT(common_options), 1551 }; 1552 const struct option latency_options[] = { 1553 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", 1554 "Show latency of given function", parse_filter_func), 1555 #ifdef HAVE_BPF_SKEL 1556 OPT_BOOLEAN('b', "use-bpf", &ftrace.target.use_bpf, 1557 "Use BPF to measure function latency"), 1558 #endif 1559 OPT_BOOLEAN('n', "use-nsec", &ftrace.use_nsec, 1560 "Use nano-second histogram"), 1561 OPT_PARENT(common_options), 1562 }; 1563 const struct option profile_options[] = { 1564 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", 1565 "Trace given functions using function tracer", 1566 parse_filter_func), 1567 OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func", 1568 "Do not trace given functions", parse_filter_func), 1569 OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func", 1570 "Trace given functions using function_graph tracer", 1571 parse_filter_func), 1572 OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func", 1573 "Set nograph filter on given functions", parse_filter_func), 1574 OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size", 1575 "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size), 1576 OPT_CALLBACK('s', "sort", &profile_sort, "key", 1577 "Sort result by key: total (default), avg, max, count, name.", 1578 parse_sort_key), 1579 OPT_PARENT(common_options), 1580 }; 1581 const struct option *options = ftrace_options; 1582 1583 const char * const ftrace_usage[] = { 1584 "perf ftrace [<options>] [<command>]", 1585 "perf ftrace [<options>] -- [<command>] [<options>]", 1586 "perf ftrace {trace|latency|profile} [<options>] [<command>]", 1587 "perf ftrace {trace|latency|profile} [<options>] -- [<command>] [<options>]", 1588 NULL 1589 }; 1590 enum perf_ftrace_subcommand subcmd = PERF_FTRACE_NONE; 1591 1592 INIT_LIST_HEAD(&ftrace.filters); 1593 INIT_LIST_HEAD(&ftrace.notrace); 1594 INIT_LIST_HEAD(&ftrace.graph_funcs); 1595 INIT_LIST_HEAD(&ftrace.nograph_funcs); 1596 1597 signal(SIGINT, sig_handler); 1598 signal(SIGUSR1, sig_handler); 1599 signal(SIGCHLD, sig_handler); 1600 signal(SIGPIPE, sig_handler); 1601 1602 if (!check_ftrace_capable()) 1603 return -1; 1604 1605 if (!is_ftrace_supported()) { 1606 pr_err("ftrace is not supported on this system\n"); 1607 return -ENOTSUP; 1608 } 1609 1610 ret = perf_config(perf_ftrace_config, &ftrace); 1611 if (ret < 0) 1612 return -1; 1613 1614 if (argc > 1) { 1615 if (!strcmp(argv[1], "trace")) { 1616 subcmd = PERF_FTRACE_TRACE; 1617 } else if (!strcmp(argv[1], "latency")) { 1618 subcmd = PERF_FTRACE_LATENCY; 1619 options = latency_options; 1620 } else if (!strcmp(argv[1], "profile")) { 1621 subcmd = PERF_FTRACE_PROFILE; 1622 options = profile_options; 1623 } 1624 1625 if (subcmd != PERF_FTRACE_NONE) { 1626 argc--; 1627 argv++; 1628 } 1629 } 1630 /* for backward compatibility */ 1631 if (subcmd == PERF_FTRACE_NONE) 1632 subcmd = PERF_FTRACE_TRACE; 1633 1634 argc = parse_options(argc, argv, options, ftrace_usage, 1635 PARSE_OPT_STOP_AT_NON_OPTION); 1636 if (argc < 0) { 1637 ret = -EINVAL; 1638 goto out_delete_filters; 1639 } 1640 1641 /* Make system wide (-a) the default target. */ 1642 if (!argc && target__none(&ftrace.target)) 1643 ftrace.target.system_wide = true; 1644 1645 switch (subcmd) { 1646 case PERF_FTRACE_TRACE: 1647 cmd_func = __cmd_ftrace; 1648 break; 1649 case PERF_FTRACE_LATENCY: 1650 if (list_empty(&ftrace.filters)) { 1651 pr_err("Should provide a function to measure\n"); 1652 parse_options_usage(ftrace_usage, options, "T", 1); 1653 ret = -EINVAL; 1654 goto out_delete_filters; 1655 } 1656 cmd_func = __cmd_latency; 1657 break; 1658 case PERF_FTRACE_PROFILE: 1659 cmd_func = __cmd_profile; 1660 break; 1661 case PERF_FTRACE_NONE: 1662 default: 1663 pr_err("Invalid subcommand\n"); 1664 ret = -EINVAL; 1665 goto out_delete_filters; 1666 } 1667 1668 ret = target__validate(&ftrace.target); 1669 if (ret) { 1670 char errbuf[512]; 1671 1672 target__strerror(&ftrace.target, ret, errbuf, 512); 1673 pr_err("%s\n", errbuf); 1674 goto out_delete_filters; 1675 } 1676 1677 ftrace.evlist = evlist__new(); 1678 if (ftrace.evlist == NULL) { 1679 ret = -ENOMEM; 1680 goto out_delete_filters; 1681 } 1682 1683 ret = evlist__create_maps(ftrace.evlist, &ftrace.target); 1684 if (ret < 0) 1685 goto out_delete_evlist; 1686 1687 if (argc) { 1688 ret = evlist__prepare_workload(ftrace.evlist, &ftrace.target, 1689 argv, false, 1690 ftrace__workload_exec_failed_signal); 1691 if (ret < 0) 1692 goto out_delete_evlist; 1693 } 1694 1695 ret = cmd_func(&ftrace); 1696 1697 out_delete_evlist: 1698 evlist__delete(ftrace.evlist); 1699 1700 out_delete_filters: 1701 delete_filter_func(&ftrace.filters); 1702 delete_filter_func(&ftrace.notrace); 1703 delete_filter_func(&ftrace.graph_funcs); 1704 delete_filter_func(&ftrace.nograph_funcs); 1705 1706 return ret; 1707 } 1708