1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * builtin-ftrace.c 4 * 5 * Copyright (c) 2013 LG Electronics, Namhyung Kim <namhyung@kernel.org> 6 * Copyright (c) 2020 Changbin Du <changbin.du@gmail.com>, significant enhancement. 7 */ 8 9 #include "builtin.h" 10 11 #include <errno.h> 12 #include <unistd.h> 13 #include <signal.h> 14 #include <stdlib.h> 15 #include <fcntl.h> 16 #include <inttypes.h> 17 #include <math.h> 18 #include <poll.h> 19 #include <ctype.h> 20 #include <linux/capability.h> 21 #include <linux/string.h> 22 23 #include "debug.h" 24 #include <subcmd/pager.h> 25 #include <subcmd/parse-options.h> 26 #include <api/io.h> 27 #include <api/fs/tracing_path.h> 28 #include "evlist.h" 29 #include "target.h" 30 #include "cpumap.h" 31 #include "hashmap.h" 32 #include "thread_map.h" 33 #include "strfilter.h" 34 #include "util/cap.h" 35 #include "util/config.h" 36 #include "util/ftrace.h" 37 #include "util/stat.h" 38 #include "util/units.h" 39 #include "util/parse-sublevel-options.h" 40 41 #define DEFAULT_TRACER "function_graph" 42 43 static volatile sig_atomic_t workload_exec_errno; 44 static volatile sig_atomic_t done; 45 46 static void sig_handler(int sig __maybe_unused) 47 { 48 done = true; 49 } 50 51 /* 52 * evlist__prepare_workload will send a SIGUSR1 if the fork fails, since 53 * we asked by setting its exec_error to the function below, 54 * ftrace__workload_exec_failed_signal. 55 * 56 * XXX We need to handle this more appropriately, emitting an error, etc. 57 */ 58 static void ftrace__workload_exec_failed_signal(int signo __maybe_unused, 59 siginfo_t *info __maybe_unused, 60 void *ucontext __maybe_unused) 61 { 62 workload_exec_errno = info->si_value.sival_int; 63 done = true; 64 } 65 66 static int check_ftrace_capable(void) 67 { 68 if (!(perf_cap__capable(CAP_PERFMON) || 69 perf_cap__capable(CAP_SYS_ADMIN))) { 70 pr_err("ftrace only works for %s!\n", 71 #ifdef HAVE_LIBCAP_SUPPORT 72 "users with the CAP_PERFMON or CAP_SYS_ADMIN capability" 73 #else 74 "root" 75 #endif 76 ); 77 return -1; 78 } 79 return 0; 80 } 81 82 static int __write_tracing_file(const char *name, const char *val, bool append) 83 { 84 char *file; 85 int fd, ret = -1; 86 ssize_t size = strlen(val); 87 int flags = O_WRONLY; 88 char errbuf[512]; 89 char *val_copy; 90 91 file = get_tracing_file(name); 92 if (!file) { 93 pr_debug("cannot get tracing file: %s\n", name); 94 return -1; 95 } 96 97 if (append) 98 flags |= O_APPEND; 99 else 100 flags |= O_TRUNC; 101 102 fd = open(file, flags); 103 if (fd < 0) { 104 pr_debug("cannot open tracing file: %s: %s\n", 105 name, str_error_r(errno, errbuf, sizeof(errbuf))); 106 goto out; 107 } 108 109 /* 110 * Copy the original value and append a '\n'. Without this, 111 * the kernel can hide possible errors. 112 */ 113 val_copy = strdup(val); 114 if (!val_copy) 115 goto out_close; 116 val_copy[size] = '\n'; 117 118 if (write(fd, val_copy, size + 1) == size + 1) 119 ret = 0; 120 else 121 pr_debug("write '%s' to tracing/%s failed: %s\n", 122 val, name, str_error_r(errno, errbuf, sizeof(errbuf))); 123 124 free(val_copy); 125 out_close: 126 close(fd); 127 out: 128 put_tracing_file(file); 129 return ret; 130 } 131 132 static int write_tracing_file(const char *name, const char *val) 133 { 134 return __write_tracing_file(name, val, false); 135 } 136 137 static int append_tracing_file(const char *name, const char *val) 138 { 139 return __write_tracing_file(name, val, true); 140 } 141 142 static int read_tracing_file_to_stdout(const char *name) 143 { 144 char buf[4096]; 145 char *file; 146 int fd; 147 int ret = -1; 148 149 file = get_tracing_file(name); 150 if (!file) { 151 pr_debug("cannot get tracing file: %s\n", name); 152 return -1; 153 } 154 155 fd = open(file, O_RDONLY); 156 if (fd < 0) { 157 pr_debug("cannot open tracing file: %s: %s\n", 158 name, str_error_r(errno, buf, sizeof(buf))); 159 goto out; 160 } 161 162 /* read contents to stdout */ 163 while (true) { 164 int n = read(fd, buf, sizeof(buf)); 165 if (n == 0) 166 break; 167 else if (n < 0) 168 goto out_close; 169 170 if (fwrite(buf, n, 1, stdout) != 1) 171 goto out_close; 172 } 173 ret = 0; 174 175 out_close: 176 close(fd); 177 out: 178 put_tracing_file(file); 179 return ret; 180 } 181 182 static int read_tracing_file_by_line(const char *name, 183 void (*cb)(char *str, void *arg), 184 void *cb_arg) 185 { 186 char *line = NULL; 187 size_t len = 0; 188 char *file; 189 FILE *fp; 190 191 file = get_tracing_file(name); 192 if (!file) { 193 pr_debug("cannot get tracing file: %s\n", name); 194 return -1; 195 } 196 197 fp = fopen(file, "r"); 198 if (fp == NULL) { 199 pr_debug("cannot open tracing file: %s\n", name); 200 put_tracing_file(file); 201 return -1; 202 } 203 204 while (getline(&line, &len, fp) != -1) { 205 cb(line, cb_arg); 206 } 207 208 if (line) 209 free(line); 210 211 fclose(fp); 212 put_tracing_file(file); 213 return 0; 214 } 215 216 static int write_tracing_file_int(const char *name, int value) 217 { 218 char buf[16]; 219 220 snprintf(buf, sizeof(buf), "%d", value); 221 if (write_tracing_file(name, buf) < 0) 222 return -1; 223 224 return 0; 225 } 226 227 static int write_tracing_option_file(const char *name, const char *val) 228 { 229 char *file; 230 int ret; 231 232 if (asprintf(&file, "options/%s", name) < 0) 233 return -1; 234 235 ret = __write_tracing_file(file, val, false); 236 free(file); 237 return ret; 238 } 239 240 static int reset_tracing_cpu(void); 241 static void reset_tracing_filters(void); 242 243 static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused) 244 { 245 write_tracing_option_file("function-fork", "0"); 246 write_tracing_option_file("func_stack_trace", "0"); 247 write_tracing_option_file("sleep-time", "1"); 248 write_tracing_option_file("funcgraph-irqs", "1"); 249 write_tracing_option_file("funcgraph-proc", "0"); 250 write_tracing_option_file("funcgraph-abstime", "0"); 251 write_tracing_option_file("funcgraph-tail", "0"); 252 write_tracing_option_file("latency-format", "0"); 253 write_tracing_option_file("irq-info", "0"); 254 } 255 256 static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused) 257 { 258 if (write_tracing_file("tracing_on", "0") < 0) 259 return -1; 260 261 if (write_tracing_file("current_tracer", "nop") < 0) 262 return -1; 263 264 if (write_tracing_file("set_ftrace_pid", " ") < 0) 265 return -1; 266 267 if (reset_tracing_cpu() < 0) 268 return -1; 269 270 if (write_tracing_file("max_graph_depth", "0") < 0) 271 return -1; 272 273 if (write_tracing_file("tracing_thresh", "0") < 0) 274 return -1; 275 276 reset_tracing_filters(); 277 reset_tracing_options(ftrace); 278 return 0; 279 } 280 281 static int set_tracing_pid(struct perf_ftrace *ftrace) 282 { 283 int i; 284 char buf[16]; 285 286 if (target__has_cpu(&ftrace->target)) 287 return 0; 288 289 for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) { 290 scnprintf(buf, sizeof(buf), "%d", 291 perf_thread_map__pid(ftrace->evlist->core.threads, i)); 292 if (append_tracing_file("set_ftrace_pid", buf) < 0) 293 return -1; 294 } 295 return 0; 296 } 297 298 static int set_tracing_cpumask(struct perf_cpu_map *cpumap) 299 { 300 char *cpumask; 301 size_t mask_size; 302 int ret; 303 int last_cpu; 304 305 last_cpu = perf_cpu_map__cpu(cpumap, perf_cpu_map__nr(cpumap) - 1).cpu; 306 mask_size = last_cpu / 4 + 2; /* one more byte for EOS */ 307 mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */ 308 309 cpumask = malloc(mask_size); 310 if (cpumask == NULL) { 311 pr_debug("failed to allocate cpu mask\n"); 312 return -1; 313 } 314 315 cpu_map__snprint_mask(cpumap, cpumask, mask_size); 316 317 ret = write_tracing_file("tracing_cpumask", cpumask); 318 319 free(cpumask); 320 return ret; 321 } 322 323 static int set_tracing_cpu(struct perf_ftrace *ftrace) 324 { 325 struct perf_cpu_map *cpumap = ftrace->evlist->core.user_requested_cpus; 326 327 if (!target__has_cpu(&ftrace->target)) 328 return 0; 329 330 return set_tracing_cpumask(cpumap); 331 } 332 333 static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace) 334 { 335 if (!ftrace->func_stack_trace) 336 return 0; 337 338 if (write_tracing_option_file("func_stack_trace", "1") < 0) 339 return -1; 340 341 return 0; 342 } 343 344 static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace) 345 { 346 if (!ftrace->func_irq_info) 347 return 0; 348 349 if (write_tracing_option_file("irq-info", "1") < 0) 350 return -1; 351 352 return 0; 353 } 354 355 static int reset_tracing_cpu(void) 356 { 357 struct perf_cpu_map *cpumap = perf_cpu_map__new_online_cpus(); 358 int ret; 359 360 ret = set_tracing_cpumask(cpumap); 361 perf_cpu_map__put(cpumap); 362 return ret; 363 } 364 365 static int __set_tracing_filter(const char *filter_file, struct list_head *funcs) 366 { 367 struct filter_entry *pos; 368 369 list_for_each_entry(pos, funcs, list) { 370 if (append_tracing_file(filter_file, pos->name) < 0) 371 return -1; 372 } 373 374 return 0; 375 } 376 377 static int set_tracing_filters(struct perf_ftrace *ftrace) 378 { 379 int ret; 380 381 ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters); 382 if (ret < 0) 383 return ret; 384 385 ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace); 386 if (ret < 0) 387 return ret; 388 389 ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs); 390 if (ret < 0) 391 return ret; 392 393 /* old kernels do not have this filter */ 394 __set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs); 395 396 return ret; 397 } 398 399 static void reset_tracing_filters(void) 400 { 401 write_tracing_file("set_ftrace_filter", " "); 402 write_tracing_file("set_ftrace_notrace", " "); 403 write_tracing_file("set_graph_function", " "); 404 write_tracing_file("set_graph_notrace", " "); 405 } 406 407 static int set_tracing_depth(struct perf_ftrace *ftrace) 408 { 409 if (ftrace->graph_depth == 0) 410 return 0; 411 412 if (ftrace->graph_depth < 0) { 413 pr_err("invalid graph depth: %d\n", ftrace->graph_depth); 414 return -1; 415 } 416 417 if (write_tracing_file_int("max_graph_depth", ftrace->graph_depth) < 0) 418 return -1; 419 420 return 0; 421 } 422 423 static int set_tracing_percpu_buffer_size(struct perf_ftrace *ftrace) 424 { 425 int ret; 426 427 if (ftrace->percpu_buffer_size == 0) 428 return 0; 429 430 ret = write_tracing_file_int("buffer_size_kb", 431 ftrace->percpu_buffer_size / 1024); 432 if (ret < 0) 433 return ret; 434 435 return 0; 436 } 437 438 static int set_tracing_trace_inherit(struct perf_ftrace *ftrace) 439 { 440 if (!ftrace->inherit) 441 return 0; 442 443 if (write_tracing_option_file("function-fork", "1") < 0) 444 return -1; 445 446 return 0; 447 } 448 449 static int set_tracing_sleep_time(struct perf_ftrace *ftrace) 450 { 451 if (!ftrace->graph_nosleep_time) 452 return 0; 453 454 if (write_tracing_option_file("sleep-time", "0") < 0) 455 return -1; 456 457 return 0; 458 } 459 460 static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace) 461 { 462 if (!ftrace->graph_noirqs) 463 return 0; 464 465 if (write_tracing_option_file("funcgraph-irqs", "0") < 0) 466 return -1; 467 468 return 0; 469 } 470 471 static int set_tracing_funcgraph_verbose(struct perf_ftrace *ftrace) 472 { 473 if (!ftrace->graph_verbose) 474 return 0; 475 476 if (write_tracing_option_file("funcgraph-proc", "1") < 0) 477 return -1; 478 479 if (write_tracing_option_file("funcgraph-abstime", "1") < 0) 480 return -1; 481 482 if (write_tracing_option_file("latency-format", "1") < 0) 483 return -1; 484 485 return 0; 486 } 487 488 static int set_tracing_funcgraph_tail(struct perf_ftrace *ftrace) 489 { 490 if (!ftrace->graph_tail) 491 return 0; 492 493 if (write_tracing_option_file("funcgraph-tail", "1") < 0) 494 return -1; 495 496 return 0; 497 } 498 499 static int set_tracing_thresh(struct perf_ftrace *ftrace) 500 { 501 int ret; 502 503 if (ftrace->graph_thresh == 0) 504 return 0; 505 506 ret = write_tracing_file_int("tracing_thresh", ftrace->graph_thresh); 507 if (ret < 0) 508 return ret; 509 510 return 0; 511 } 512 513 static int set_tracing_options(struct perf_ftrace *ftrace) 514 { 515 if (set_tracing_pid(ftrace) < 0) { 516 pr_err("failed to set ftrace pid\n"); 517 return -1; 518 } 519 520 if (set_tracing_cpu(ftrace) < 0) { 521 pr_err("failed to set tracing cpumask\n"); 522 return -1; 523 } 524 525 if (set_tracing_func_stack_trace(ftrace) < 0) { 526 pr_err("failed to set tracing option func_stack_trace\n"); 527 return -1; 528 } 529 530 if (set_tracing_func_irqinfo(ftrace) < 0) { 531 pr_err("failed to set tracing option irq-info\n"); 532 return -1; 533 } 534 535 if (set_tracing_filters(ftrace) < 0) { 536 pr_err("failed to set tracing filters\n"); 537 return -1; 538 } 539 540 if (set_tracing_depth(ftrace) < 0) { 541 pr_err("failed to set graph depth\n"); 542 return -1; 543 } 544 545 if (set_tracing_percpu_buffer_size(ftrace) < 0) { 546 pr_err("failed to set tracing per-cpu buffer size\n"); 547 return -1; 548 } 549 550 if (set_tracing_trace_inherit(ftrace) < 0) { 551 pr_err("failed to set tracing option function-fork\n"); 552 return -1; 553 } 554 555 if (set_tracing_sleep_time(ftrace) < 0) { 556 pr_err("failed to set tracing option sleep-time\n"); 557 return -1; 558 } 559 560 if (set_tracing_funcgraph_irqs(ftrace) < 0) { 561 pr_err("failed to set tracing option funcgraph-irqs\n"); 562 return -1; 563 } 564 565 if (set_tracing_funcgraph_verbose(ftrace) < 0) { 566 pr_err("failed to set tracing option funcgraph-proc/funcgraph-abstime\n"); 567 return -1; 568 } 569 570 if (set_tracing_thresh(ftrace) < 0) { 571 pr_err("failed to set tracing thresh\n"); 572 return -1; 573 } 574 575 if (set_tracing_funcgraph_tail(ftrace) < 0) { 576 pr_err("failed to set tracing option funcgraph-tail\n"); 577 return -1; 578 } 579 580 return 0; 581 } 582 583 static void select_tracer(struct perf_ftrace *ftrace) 584 { 585 bool graph = !list_empty(&ftrace->graph_funcs) || 586 !list_empty(&ftrace->nograph_funcs); 587 bool func = !list_empty(&ftrace->filters) || 588 !list_empty(&ftrace->notrace); 589 590 /* The function_graph has priority over function tracer. */ 591 if (graph) 592 ftrace->tracer = "function_graph"; 593 else if (func) 594 ftrace->tracer = "function"; 595 /* Otherwise, the default tracer is used. */ 596 597 pr_debug("%s tracer is used\n", ftrace->tracer); 598 } 599 600 static int __cmd_ftrace(struct perf_ftrace *ftrace) 601 { 602 char *trace_file; 603 int trace_fd; 604 char buf[4096]; 605 struct pollfd pollfd = { 606 .events = POLLIN, 607 }; 608 609 select_tracer(ftrace); 610 611 if (reset_tracing_files(ftrace) < 0) { 612 pr_err("failed to reset ftrace\n"); 613 goto out; 614 } 615 616 /* reset ftrace buffer */ 617 if (write_tracing_file("trace", "0") < 0) 618 goto out; 619 620 if (set_tracing_options(ftrace) < 0) 621 goto out_reset; 622 623 if (write_tracing_file("current_tracer", ftrace->tracer) < 0) { 624 pr_err("failed to set current_tracer to %s\n", ftrace->tracer); 625 goto out_reset; 626 } 627 628 setup_pager(); 629 630 trace_file = get_tracing_file("trace_pipe"); 631 if (!trace_file) { 632 pr_err("failed to open trace_pipe\n"); 633 goto out_reset; 634 } 635 636 trace_fd = open(trace_file, O_RDONLY); 637 638 put_tracing_file(trace_file); 639 640 if (trace_fd < 0) { 641 pr_err("failed to open trace_pipe\n"); 642 goto out_reset; 643 } 644 645 fcntl(trace_fd, F_SETFL, O_NONBLOCK); 646 pollfd.fd = trace_fd; 647 648 /* display column headers */ 649 read_tracing_file_to_stdout("trace"); 650 651 if (!ftrace->target.initial_delay) { 652 if (write_tracing_file("tracing_on", "1") < 0) { 653 pr_err("can't enable tracing\n"); 654 goto out_close_fd; 655 } 656 } 657 658 evlist__start_workload(ftrace->evlist); 659 660 if (ftrace->target.initial_delay > 0) { 661 usleep(ftrace->target.initial_delay * 1000); 662 if (write_tracing_file("tracing_on", "1") < 0) { 663 pr_err("can't enable tracing\n"); 664 goto out_close_fd; 665 } 666 } 667 668 while (!done) { 669 if (poll(&pollfd, 1, -1) < 0) 670 break; 671 672 if (pollfd.revents & POLLIN) { 673 int n = read(trace_fd, buf, sizeof(buf)); 674 if (n < 0) 675 break; 676 if (fwrite(buf, n, 1, stdout) != 1) 677 break; 678 /* flush output since stdout is in full buffering mode due to pager */ 679 fflush(stdout); 680 } 681 } 682 683 write_tracing_file("tracing_on", "0"); 684 685 if (workload_exec_errno) { 686 const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf)); 687 /* flush stdout first so below error msg appears at the end. */ 688 fflush(stdout); 689 pr_err("workload failed: %s\n", emsg); 690 goto out_close_fd; 691 } 692 693 /* read remaining buffer contents */ 694 while (true) { 695 int n = read(trace_fd, buf, sizeof(buf)); 696 if (n <= 0) 697 break; 698 if (fwrite(buf, n, 1, stdout) != 1) 699 break; 700 } 701 702 out_close_fd: 703 close(trace_fd); 704 out_reset: 705 reset_tracing_files(ftrace); 706 out: 707 return (done && !workload_exec_errno) ? 0 : -1; 708 } 709 710 static void make_histogram(int buckets[], char *buf, size_t len, char *linebuf, 711 bool use_nsec) 712 { 713 char *p, *q; 714 char *unit; 715 double num; 716 int i; 717 718 /* ensure NUL termination */ 719 buf[len] = '\0'; 720 721 /* handle data line by line */ 722 for (p = buf; (q = strchr(p, '\n')) != NULL; p = q + 1) { 723 *q = '\0'; 724 /* move it to the line buffer */ 725 strcat(linebuf, p); 726 727 /* 728 * parse trace output to get function duration like in 729 * 730 * # tracer: function_graph 731 * # 732 * # CPU DURATION FUNCTION CALLS 733 * # | | | | | | | 734 * 1) + 10.291 us | do_filp_open(); 735 * 1) 4.889 us | do_filp_open(); 736 * 1) 6.086 us | do_filp_open(); 737 * 738 */ 739 if (linebuf[0] == '#') 740 goto next; 741 742 /* ignore CPU */ 743 p = strchr(linebuf, ')'); 744 if (p == NULL) 745 p = linebuf; 746 747 while (*p && !isdigit(*p) && (*p != '|')) 748 p++; 749 750 /* no duration */ 751 if (*p == '\0' || *p == '|') 752 goto next; 753 754 num = strtod(p, &unit); 755 if (!unit || strncmp(unit, " us", 3)) 756 goto next; 757 758 if (use_nsec) 759 num *= 1000; 760 761 i = log2(num); 762 if (i < 0) 763 i = 0; 764 if (i >= NUM_BUCKET) 765 i = NUM_BUCKET - 1; 766 767 buckets[i]++; 768 769 next: 770 /* empty the line buffer for the next output */ 771 linebuf[0] = '\0'; 772 } 773 774 /* preserve any remaining output (before newline) */ 775 strcat(linebuf, p); 776 } 777 778 static void display_histogram(int buckets[], bool use_nsec) 779 { 780 int i; 781 int total = 0; 782 int bar_total = 46; /* to fit in 80 column */ 783 char bar[] = "###############################################"; 784 int bar_len; 785 786 for (i = 0; i < NUM_BUCKET; i++) 787 total += buckets[i]; 788 789 if (total == 0) { 790 printf("No data found\n"); 791 return; 792 } 793 794 printf("# %14s | %10s | %-*s |\n", 795 " DURATION ", "COUNT", bar_total, "GRAPH"); 796 797 bar_len = buckets[0] * bar_total / total; 798 printf(" %4d - %-4d %s | %10d | %.*s%*s |\n", 799 0, 1, "us", buckets[0], bar_len, bar, bar_total - bar_len, ""); 800 801 for (i = 1; i < NUM_BUCKET - 1; i++) { 802 int start = (1 << (i - 1)); 803 int stop = 1 << i; 804 const char *unit = use_nsec ? "ns" : "us"; 805 806 if (start >= 1024) { 807 start >>= 10; 808 stop >>= 10; 809 unit = use_nsec ? "us" : "ms"; 810 } 811 bar_len = buckets[i] * bar_total / total; 812 printf(" %4d - %-4d %s | %10d | %.*s%*s |\n", 813 start, stop, unit, buckets[i], bar_len, bar, 814 bar_total - bar_len, ""); 815 } 816 817 bar_len = buckets[NUM_BUCKET - 1] * bar_total / total; 818 printf(" %4d - %-4s %s | %10d | %.*s%*s |\n", 819 1, "...", use_nsec ? "ms" : " s", buckets[NUM_BUCKET - 1], 820 bar_len, bar, bar_total - bar_len, ""); 821 822 } 823 824 static int prepare_func_latency(struct perf_ftrace *ftrace) 825 { 826 char *trace_file; 827 int fd; 828 829 if (ftrace->target.use_bpf) 830 return perf_ftrace__latency_prepare_bpf(ftrace); 831 832 if (reset_tracing_files(ftrace) < 0) { 833 pr_err("failed to reset ftrace\n"); 834 return -1; 835 } 836 837 /* reset ftrace buffer */ 838 if (write_tracing_file("trace", "0") < 0) 839 return -1; 840 841 if (set_tracing_options(ftrace) < 0) 842 return -1; 843 844 /* force to use the function_graph tracer to track duration */ 845 if (write_tracing_file("current_tracer", "function_graph") < 0) { 846 pr_err("failed to set current_tracer to function_graph\n"); 847 return -1; 848 } 849 850 trace_file = get_tracing_file("trace_pipe"); 851 if (!trace_file) { 852 pr_err("failed to open trace_pipe\n"); 853 return -1; 854 } 855 856 fd = open(trace_file, O_RDONLY); 857 if (fd < 0) 858 pr_err("failed to open trace_pipe\n"); 859 860 put_tracing_file(trace_file); 861 return fd; 862 } 863 864 static int start_func_latency(struct perf_ftrace *ftrace) 865 { 866 if (ftrace->target.use_bpf) 867 return perf_ftrace__latency_start_bpf(ftrace); 868 869 if (write_tracing_file("tracing_on", "1") < 0) { 870 pr_err("can't enable tracing\n"); 871 return -1; 872 } 873 874 return 0; 875 } 876 877 static int stop_func_latency(struct perf_ftrace *ftrace) 878 { 879 if (ftrace->target.use_bpf) 880 return perf_ftrace__latency_stop_bpf(ftrace); 881 882 write_tracing_file("tracing_on", "0"); 883 return 0; 884 } 885 886 static int read_func_latency(struct perf_ftrace *ftrace, int buckets[]) 887 { 888 if (ftrace->target.use_bpf) 889 return perf_ftrace__latency_read_bpf(ftrace, buckets); 890 891 return 0; 892 } 893 894 static int cleanup_func_latency(struct perf_ftrace *ftrace) 895 { 896 if (ftrace->target.use_bpf) 897 return perf_ftrace__latency_cleanup_bpf(ftrace); 898 899 reset_tracing_files(ftrace); 900 return 0; 901 } 902 903 static int __cmd_latency(struct perf_ftrace *ftrace) 904 { 905 int trace_fd; 906 char buf[4096]; 907 char line[256]; 908 struct pollfd pollfd = { 909 .events = POLLIN, 910 }; 911 int buckets[NUM_BUCKET] = { }; 912 913 trace_fd = prepare_func_latency(ftrace); 914 if (trace_fd < 0) 915 goto out; 916 917 fcntl(trace_fd, F_SETFL, O_NONBLOCK); 918 pollfd.fd = trace_fd; 919 920 if (start_func_latency(ftrace) < 0) 921 goto out; 922 923 evlist__start_workload(ftrace->evlist); 924 925 line[0] = '\0'; 926 while (!done) { 927 if (poll(&pollfd, 1, -1) < 0) 928 break; 929 930 if (pollfd.revents & POLLIN) { 931 int n = read(trace_fd, buf, sizeof(buf) - 1); 932 if (n < 0) 933 break; 934 935 make_histogram(buckets, buf, n, line, ftrace->use_nsec); 936 } 937 } 938 939 stop_func_latency(ftrace); 940 941 if (workload_exec_errno) { 942 const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf)); 943 pr_err("workload failed: %s\n", emsg); 944 goto out; 945 } 946 947 /* read remaining buffer contents */ 948 while (!ftrace->target.use_bpf) { 949 int n = read(trace_fd, buf, sizeof(buf) - 1); 950 if (n <= 0) 951 break; 952 make_histogram(buckets, buf, n, line, ftrace->use_nsec); 953 } 954 955 read_func_latency(ftrace, buckets); 956 957 display_histogram(buckets, ftrace->use_nsec); 958 959 out: 960 close(trace_fd); 961 cleanup_func_latency(ftrace); 962 963 return (done && !workload_exec_errno) ? 0 : -1; 964 } 965 966 static size_t profile_hash(long func, void *ctx __maybe_unused) 967 { 968 return str_hash((char *)func); 969 } 970 971 static bool profile_equal(long func1, long func2, void *ctx __maybe_unused) 972 { 973 return !strcmp((char *)func1, (char *)func2); 974 } 975 976 static int prepare_func_profile(struct perf_ftrace *ftrace) 977 { 978 ftrace->tracer = "function_graph"; 979 ftrace->graph_tail = 1; 980 981 ftrace->profile_hash = hashmap__new(profile_hash, profile_equal, NULL); 982 if (ftrace->profile_hash == NULL) 983 return -ENOMEM; 984 985 return 0; 986 } 987 988 /* This is saved in a hashmap keyed by the function name */ 989 struct ftrace_profile_data { 990 struct stats st; 991 }; 992 993 static int add_func_duration(struct perf_ftrace *ftrace, char *func, double time_ns) 994 { 995 struct ftrace_profile_data *prof = NULL; 996 997 if (!hashmap__find(ftrace->profile_hash, func, &prof)) { 998 char *key = strdup(func); 999 1000 if (key == NULL) 1001 return -ENOMEM; 1002 1003 prof = zalloc(sizeof(*prof)); 1004 if (prof == NULL) { 1005 free(key); 1006 return -ENOMEM; 1007 } 1008 1009 init_stats(&prof->st); 1010 hashmap__add(ftrace->profile_hash, key, prof); 1011 } 1012 1013 update_stats(&prof->st, time_ns); 1014 return 0; 1015 } 1016 1017 /* 1018 * The ftrace function_graph text output normally looks like below: 1019 * 1020 * CPU DURATION FUNCTION 1021 * 1022 * 0) | syscall_trace_enter.isra.0() { 1023 * 0) | __audit_syscall_entry() { 1024 * 0) | auditd_test_task() { 1025 * 0) 0.271 us | __rcu_read_lock(); 1026 * 0) 0.275 us | __rcu_read_unlock(); 1027 * 0) 1.254 us | } /\* auditd_test_task *\/ 1028 * 0) 0.279 us | ktime_get_coarse_real_ts64(); 1029 * 0) 2.227 us | } /\* __audit_syscall_entry *\/ 1030 * 0) 2.713 us | } /\* syscall_trace_enter.isra.0 *\/ 1031 * 1032 * Parse the line and get the duration and function name. 1033 */ 1034 static int parse_func_duration(struct perf_ftrace *ftrace, char *line, size_t len) 1035 { 1036 char *p; 1037 char *func; 1038 double duration; 1039 1040 /* skip CPU */ 1041 p = strchr(line, ')'); 1042 if (p == NULL) 1043 return 0; 1044 1045 /* get duration */ 1046 p = skip_spaces(p + 1); 1047 1048 /* no duration? */ 1049 if (p == NULL || *p == '|') 1050 return 0; 1051 1052 /* skip markers like '*' or '!' for longer than ms */ 1053 if (!isdigit(*p)) 1054 p++; 1055 1056 duration = strtod(p, &p); 1057 1058 if (strncmp(p, " us", 3)) { 1059 pr_debug("non-usec time found.. ignoring\n"); 1060 return 0; 1061 } 1062 1063 /* 1064 * profile stat keeps the max and min values as integer, 1065 * convert to nsec time so that we can have accurate max. 1066 */ 1067 duration *= 1000; 1068 1069 /* skip to the pipe */ 1070 while (p < line + len && *p != '|') 1071 p++; 1072 1073 if (*p++ != '|') 1074 return -EINVAL; 1075 1076 /* get function name */ 1077 func = skip_spaces(p); 1078 1079 /* skip the closing bracket and the start of comment */ 1080 if (*func == '}') 1081 func += 5; 1082 1083 /* remove semi-colon or end of comment at the end */ 1084 p = line + len - 1; 1085 while (!isalnum(*p) && *p != ']') { 1086 *p = '\0'; 1087 --p; 1088 } 1089 1090 return add_func_duration(ftrace, func, duration); 1091 } 1092 1093 enum perf_ftrace_profile_sort_key { 1094 PFP_SORT_TOTAL = 0, 1095 PFP_SORT_AVG, 1096 PFP_SORT_MAX, 1097 PFP_SORT_COUNT, 1098 PFP_SORT_NAME, 1099 }; 1100 1101 static enum perf_ftrace_profile_sort_key profile_sort = PFP_SORT_TOTAL; 1102 1103 static int cmp_profile_data(const void *a, const void *b) 1104 { 1105 const struct hashmap_entry *e1 = *(const struct hashmap_entry **)a; 1106 const struct hashmap_entry *e2 = *(const struct hashmap_entry **)b; 1107 struct ftrace_profile_data *p1 = e1->pvalue; 1108 struct ftrace_profile_data *p2 = e2->pvalue; 1109 double v1, v2; 1110 1111 switch (profile_sort) { 1112 case PFP_SORT_NAME: 1113 return strcmp(e1->pkey, e2->pkey); 1114 case PFP_SORT_AVG: 1115 v1 = p1->st.mean; 1116 v2 = p2->st.mean; 1117 break; 1118 case PFP_SORT_MAX: 1119 v1 = p1->st.max; 1120 v2 = p2->st.max; 1121 break; 1122 case PFP_SORT_COUNT: 1123 v1 = p1->st.n; 1124 v2 = p2->st.n; 1125 break; 1126 case PFP_SORT_TOTAL: 1127 default: 1128 v1 = p1->st.n * p1->st.mean; 1129 v2 = p2->st.n * p2->st.mean; 1130 break; 1131 } 1132 1133 if (v1 > v2) 1134 return -1; 1135 else 1136 return 1; 1137 } 1138 1139 static void print_profile_result(struct perf_ftrace *ftrace) 1140 { 1141 struct hashmap_entry *entry, **profile; 1142 size_t i, nr, bkt; 1143 1144 nr = hashmap__size(ftrace->profile_hash); 1145 if (nr == 0) 1146 return; 1147 1148 profile = calloc(nr, sizeof(*profile)); 1149 if (profile == NULL) { 1150 pr_err("failed to allocate memory for the result\n"); 1151 return; 1152 } 1153 1154 i = 0; 1155 hashmap__for_each_entry(ftrace->profile_hash, entry, bkt) 1156 profile[i++] = entry; 1157 1158 assert(i == nr); 1159 1160 //cmp_profile_data(profile[0], profile[1]); 1161 qsort(profile, nr, sizeof(*profile), cmp_profile_data); 1162 1163 printf("# %10s %10s %10s %10s %s\n", 1164 "Total (us)", "Avg (us)", "Max (us)", "Count", "Function"); 1165 1166 for (i = 0; i < nr; i++) { 1167 const char *name = profile[i]->pkey; 1168 struct ftrace_profile_data *p = profile[i]->pvalue; 1169 1170 printf("%12.3f %10.3f %6"PRIu64".%03"PRIu64" %10.0f %s\n", 1171 p->st.n * p->st.mean / 1000, p->st.mean / 1000, 1172 p->st.max / 1000, p->st.max % 1000, p->st.n, name); 1173 } 1174 1175 free(profile); 1176 1177 hashmap__for_each_entry(ftrace->profile_hash, entry, bkt) { 1178 free((char *)entry->pkey); 1179 free(entry->pvalue); 1180 } 1181 1182 hashmap__free(ftrace->profile_hash); 1183 ftrace->profile_hash = NULL; 1184 } 1185 1186 static int __cmd_profile(struct perf_ftrace *ftrace) 1187 { 1188 char *trace_file; 1189 int trace_fd; 1190 char buf[4096]; 1191 struct io io; 1192 char *line = NULL; 1193 size_t line_len = 0; 1194 1195 if (prepare_func_profile(ftrace) < 0) { 1196 pr_err("failed to prepare func profiler\n"); 1197 goto out; 1198 } 1199 1200 if (reset_tracing_files(ftrace) < 0) { 1201 pr_err("failed to reset ftrace\n"); 1202 goto out; 1203 } 1204 1205 /* reset ftrace buffer */ 1206 if (write_tracing_file("trace", "0") < 0) 1207 goto out; 1208 1209 if (set_tracing_options(ftrace) < 0) 1210 return -1; 1211 1212 if (write_tracing_file("current_tracer", ftrace->tracer) < 0) { 1213 pr_err("failed to set current_tracer to %s\n", ftrace->tracer); 1214 goto out_reset; 1215 } 1216 1217 setup_pager(); 1218 1219 trace_file = get_tracing_file("trace_pipe"); 1220 if (!trace_file) { 1221 pr_err("failed to open trace_pipe\n"); 1222 goto out_reset; 1223 } 1224 1225 trace_fd = open(trace_file, O_RDONLY); 1226 1227 put_tracing_file(trace_file); 1228 1229 if (trace_fd < 0) { 1230 pr_err("failed to open trace_pipe\n"); 1231 goto out_reset; 1232 } 1233 1234 fcntl(trace_fd, F_SETFL, O_NONBLOCK); 1235 1236 if (write_tracing_file("tracing_on", "1") < 0) { 1237 pr_err("can't enable tracing\n"); 1238 goto out_close_fd; 1239 } 1240 1241 evlist__start_workload(ftrace->evlist); 1242 1243 io__init(&io, trace_fd, buf, sizeof(buf)); 1244 io.timeout_ms = -1; 1245 1246 while (!done && !io.eof) { 1247 if (io__getline(&io, &line, &line_len) < 0) 1248 break; 1249 1250 if (parse_func_duration(ftrace, line, line_len) < 0) 1251 break; 1252 } 1253 1254 write_tracing_file("tracing_on", "0"); 1255 1256 if (workload_exec_errno) { 1257 const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf)); 1258 /* flush stdout first so below error msg appears at the end. */ 1259 fflush(stdout); 1260 pr_err("workload failed: %s\n", emsg); 1261 goto out_free_line; 1262 } 1263 1264 /* read remaining buffer contents */ 1265 io.timeout_ms = 0; 1266 while (!io.eof) { 1267 if (io__getline(&io, &line, &line_len) < 0) 1268 break; 1269 1270 if (parse_func_duration(ftrace, line, line_len) < 0) 1271 break; 1272 } 1273 1274 print_profile_result(ftrace); 1275 1276 out_free_line: 1277 free(line); 1278 out_close_fd: 1279 close(trace_fd); 1280 out_reset: 1281 reset_tracing_files(ftrace); 1282 out: 1283 return (done && !workload_exec_errno) ? 0 : -1; 1284 } 1285 1286 static int perf_ftrace_config(const char *var, const char *value, void *cb) 1287 { 1288 struct perf_ftrace *ftrace = cb; 1289 1290 if (!strstarts(var, "ftrace.")) 1291 return 0; 1292 1293 if (strcmp(var, "ftrace.tracer")) 1294 return -1; 1295 1296 if (!strcmp(value, "function_graph") || 1297 !strcmp(value, "function")) { 1298 ftrace->tracer = value; 1299 return 0; 1300 } 1301 1302 pr_err("Please select \"function_graph\" (default) or \"function\"\n"); 1303 return -1; 1304 } 1305 1306 static void list_function_cb(char *str, void *arg) 1307 { 1308 struct strfilter *filter = (struct strfilter *)arg; 1309 1310 if (strfilter__compare(filter, str)) 1311 printf("%s", str); 1312 } 1313 1314 static int opt_list_avail_functions(const struct option *opt __maybe_unused, 1315 const char *str, int unset) 1316 { 1317 struct strfilter *filter; 1318 const char *err = NULL; 1319 int ret; 1320 1321 if (unset || !str) 1322 return -1; 1323 1324 filter = strfilter__new(str, &err); 1325 if (!filter) 1326 return err ? -EINVAL : -ENOMEM; 1327 1328 ret = strfilter__or(filter, str, &err); 1329 if (ret == -EINVAL) { 1330 pr_err("Filter parse error at %td.\n", err - str + 1); 1331 pr_err("Source: \"%s\"\n", str); 1332 pr_err(" %*c\n", (int)(err - str + 1), '^'); 1333 strfilter__delete(filter); 1334 return ret; 1335 } 1336 1337 ret = read_tracing_file_by_line("available_filter_functions", 1338 list_function_cb, filter); 1339 strfilter__delete(filter); 1340 if (ret < 0) 1341 return ret; 1342 1343 exit(0); 1344 } 1345 1346 static int parse_filter_func(const struct option *opt, const char *str, 1347 int unset __maybe_unused) 1348 { 1349 struct list_head *head = opt->value; 1350 struct filter_entry *entry; 1351 1352 entry = malloc(sizeof(*entry) + strlen(str) + 1); 1353 if (entry == NULL) 1354 return -ENOMEM; 1355 1356 strcpy(entry->name, str); 1357 list_add_tail(&entry->list, head); 1358 1359 return 0; 1360 } 1361 1362 static void delete_filter_func(struct list_head *head) 1363 { 1364 struct filter_entry *pos, *tmp; 1365 1366 list_for_each_entry_safe(pos, tmp, head, list) { 1367 list_del_init(&pos->list); 1368 free(pos); 1369 } 1370 } 1371 1372 static int parse_buffer_size(const struct option *opt, 1373 const char *str, int unset) 1374 { 1375 unsigned long *s = (unsigned long *)opt->value; 1376 static struct parse_tag tags_size[] = { 1377 { .tag = 'B', .mult = 1 }, 1378 { .tag = 'K', .mult = 1 << 10 }, 1379 { .tag = 'M', .mult = 1 << 20 }, 1380 { .tag = 'G', .mult = 1 << 30 }, 1381 { .tag = 0 }, 1382 }; 1383 unsigned long val; 1384 1385 if (unset) { 1386 *s = 0; 1387 return 0; 1388 } 1389 1390 val = parse_tag_value(str, tags_size); 1391 if (val != (unsigned long) -1) { 1392 if (val < 1024) { 1393 pr_err("buffer size too small, must larger than 1KB."); 1394 return -1; 1395 } 1396 *s = val; 1397 return 0; 1398 } 1399 1400 return -1; 1401 } 1402 1403 static int parse_func_tracer_opts(const struct option *opt, 1404 const char *str, int unset) 1405 { 1406 int ret; 1407 struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value; 1408 struct sublevel_option func_tracer_opts[] = { 1409 { .name = "call-graph", .value_ptr = &ftrace->func_stack_trace }, 1410 { .name = "irq-info", .value_ptr = &ftrace->func_irq_info }, 1411 { .name = NULL, } 1412 }; 1413 1414 if (unset) 1415 return 0; 1416 1417 ret = perf_parse_sublevel_options(str, func_tracer_opts); 1418 if (ret) 1419 return ret; 1420 1421 return 0; 1422 } 1423 1424 static int parse_graph_tracer_opts(const struct option *opt, 1425 const char *str, int unset) 1426 { 1427 int ret; 1428 struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value; 1429 struct sublevel_option graph_tracer_opts[] = { 1430 { .name = "nosleep-time", .value_ptr = &ftrace->graph_nosleep_time }, 1431 { .name = "noirqs", .value_ptr = &ftrace->graph_noirqs }, 1432 { .name = "verbose", .value_ptr = &ftrace->graph_verbose }, 1433 { .name = "thresh", .value_ptr = &ftrace->graph_thresh }, 1434 { .name = "depth", .value_ptr = &ftrace->graph_depth }, 1435 { .name = "tail", .value_ptr = &ftrace->graph_tail }, 1436 { .name = NULL, } 1437 }; 1438 1439 if (unset) 1440 return 0; 1441 1442 ret = perf_parse_sublevel_options(str, graph_tracer_opts); 1443 if (ret) 1444 return ret; 1445 1446 return 0; 1447 } 1448 1449 static int parse_sort_key(const struct option *opt, const char *str, int unset) 1450 { 1451 enum perf_ftrace_profile_sort_key *key = (void *)opt->value; 1452 1453 if (unset) 1454 return 0; 1455 1456 if (!strcmp(str, "total")) 1457 *key = PFP_SORT_TOTAL; 1458 else if (!strcmp(str, "avg")) 1459 *key = PFP_SORT_AVG; 1460 else if (!strcmp(str, "max")) 1461 *key = PFP_SORT_MAX; 1462 else if (!strcmp(str, "count")) 1463 *key = PFP_SORT_COUNT; 1464 else if (!strcmp(str, "name")) 1465 *key = PFP_SORT_NAME; 1466 else { 1467 pr_err("Unknown sort key: %s\n", str); 1468 return -1; 1469 } 1470 return 0; 1471 } 1472 1473 enum perf_ftrace_subcommand { 1474 PERF_FTRACE_NONE, 1475 PERF_FTRACE_TRACE, 1476 PERF_FTRACE_LATENCY, 1477 PERF_FTRACE_PROFILE, 1478 }; 1479 1480 int cmd_ftrace(int argc, const char **argv) 1481 { 1482 int ret; 1483 int (*cmd_func)(struct perf_ftrace *) = NULL; 1484 struct perf_ftrace ftrace = { 1485 .tracer = DEFAULT_TRACER, 1486 .target = { .uid = UINT_MAX, }, 1487 }; 1488 const struct option common_options[] = { 1489 OPT_STRING('p', "pid", &ftrace.target.pid, "pid", 1490 "Trace on existing process id"), 1491 /* TODO: Add short option -t after -t/--tracer can be removed. */ 1492 OPT_STRING(0, "tid", &ftrace.target.tid, "tid", 1493 "Trace on existing thread id (exclusive to --pid)"), 1494 OPT_INCR('v', "verbose", &verbose, 1495 "Be more verbose"), 1496 OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide, 1497 "System-wide collection from all CPUs"), 1498 OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu", 1499 "List of cpus to monitor"), 1500 OPT_END() 1501 }; 1502 const struct option ftrace_options[] = { 1503 OPT_STRING('t', "tracer", &ftrace.tracer, "tracer", 1504 "Tracer to use: function_graph(default) or function"), 1505 OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]", 1506 "Show available functions to filter", 1507 opt_list_avail_functions, "*"), 1508 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", 1509 "Trace given functions using function tracer", 1510 parse_filter_func), 1511 OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func", 1512 "Do not trace given functions", parse_filter_func), 1513 OPT_CALLBACK(0, "func-opts", &ftrace, "options", 1514 "Function tracer options, available options: call-graph,irq-info", 1515 parse_func_tracer_opts), 1516 OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func", 1517 "Trace given functions using function_graph tracer", 1518 parse_filter_func), 1519 OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func", 1520 "Set nograph filter on given functions", parse_filter_func), 1521 OPT_CALLBACK(0, "graph-opts", &ftrace, "options", 1522 "Graph tracer options, available options: nosleep-time,noirqs,verbose,thresh=<n>,depth=<n>", 1523 parse_graph_tracer_opts), 1524 OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size", 1525 "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size), 1526 OPT_BOOLEAN(0, "inherit", &ftrace.inherit, 1527 "Trace children processes"), 1528 OPT_INTEGER('D', "delay", &ftrace.target.initial_delay, 1529 "Number of milliseconds to wait before starting tracing after program start"), 1530 OPT_PARENT(common_options), 1531 }; 1532 const struct option latency_options[] = { 1533 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", 1534 "Show latency of given function", parse_filter_func), 1535 #ifdef HAVE_BPF_SKEL 1536 OPT_BOOLEAN('b', "use-bpf", &ftrace.target.use_bpf, 1537 "Use BPF to measure function latency"), 1538 #endif 1539 OPT_BOOLEAN('n', "use-nsec", &ftrace.use_nsec, 1540 "Use nano-second histogram"), 1541 OPT_PARENT(common_options), 1542 }; 1543 const struct option profile_options[] = { 1544 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", 1545 "Trace given functions using function tracer", 1546 parse_filter_func), 1547 OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func", 1548 "Do not trace given functions", parse_filter_func), 1549 OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func", 1550 "Trace given functions using function_graph tracer", 1551 parse_filter_func), 1552 OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func", 1553 "Set nograph filter on given functions", parse_filter_func), 1554 OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size", 1555 "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size), 1556 OPT_CALLBACK('s', "sort", &profile_sort, "key", 1557 "Sort result by key: total (default), avg, max, count, name.", 1558 parse_sort_key), 1559 OPT_PARENT(common_options), 1560 }; 1561 const struct option *options = ftrace_options; 1562 1563 const char * const ftrace_usage[] = { 1564 "perf ftrace [<options>] [<command>]", 1565 "perf ftrace [<options>] -- [<command>] [<options>]", 1566 "perf ftrace {trace|latency|profile} [<options>] [<command>]", 1567 "perf ftrace {trace|latency|profile} [<options>] -- [<command>] [<options>]", 1568 NULL 1569 }; 1570 enum perf_ftrace_subcommand subcmd = PERF_FTRACE_NONE; 1571 1572 INIT_LIST_HEAD(&ftrace.filters); 1573 INIT_LIST_HEAD(&ftrace.notrace); 1574 INIT_LIST_HEAD(&ftrace.graph_funcs); 1575 INIT_LIST_HEAD(&ftrace.nograph_funcs); 1576 1577 signal(SIGINT, sig_handler); 1578 signal(SIGUSR1, sig_handler); 1579 signal(SIGCHLD, sig_handler); 1580 signal(SIGPIPE, sig_handler); 1581 1582 ret = check_ftrace_capable(); 1583 if (ret < 0) 1584 return -1; 1585 1586 ret = perf_config(perf_ftrace_config, &ftrace); 1587 if (ret < 0) 1588 return -1; 1589 1590 if (argc > 1) { 1591 if (!strcmp(argv[1], "trace")) { 1592 subcmd = PERF_FTRACE_TRACE; 1593 } else if (!strcmp(argv[1], "latency")) { 1594 subcmd = PERF_FTRACE_LATENCY; 1595 options = latency_options; 1596 } else if (!strcmp(argv[1], "profile")) { 1597 subcmd = PERF_FTRACE_PROFILE; 1598 options = profile_options; 1599 } 1600 1601 if (subcmd != PERF_FTRACE_NONE) { 1602 argc--; 1603 argv++; 1604 } 1605 } 1606 /* for backward compatibility */ 1607 if (subcmd == PERF_FTRACE_NONE) 1608 subcmd = PERF_FTRACE_TRACE; 1609 1610 argc = parse_options(argc, argv, options, ftrace_usage, 1611 PARSE_OPT_STOP_AT_NON_OPTION); 1612 if (argc < 0) { 1613 ret = -EINVAL; 1614 goto out_delete_filters; 1615 } 1616 1617 /* Make system wide (-a) the default target. */ 1618 if (!argc && target__none(&ftrace.target)) 1619 ftrace.target.system_wide = true; 1620 1621 switch (subcmd) { 1622 case PERF_FTRACE_TRACE: 1623 cmd_func = __cmd_ftrace; 1624 break; 1625 case PERF_FTRACE_LATENCY: 1626 if (list_empty(&ftrace.filters)) { 1627 pr_err("Should provide a function to measure\n"); 1628 parse_options_usage(ftrace_usage, options, "T", 1); 1629 ret = -EINVAL; 1630 goto out_delete_filters; 1631 } 1632 cmd_func = __cmd_latency; 1633 break; 1634 case PERF_FTRACE_PROFILE: 1635 cmd_func = __cmd_profile; 1636 break; 1637 case PERF_FTRACE_NONE: 1638 default: 1639 pr_err("Invalid subcommand\n"); 1640 ret = -EINVAL; 1641 goto out_delete_filters; 1642 } 1643 1644 ret = target__validate(&ftrace.target); 1645 if (ret) { 1646 char errbuf[512]; 1647 1648 target__strerror(&ftrace.target, ret, errbuf, 512); 1649 pr_err("%s\n", errbuf); 1650 goto out_delete_filters; 1651 } 1652 1653 ftrace.evlist = evlist__new(); 1654 if (ftrace.evlist == NULL) { 1655 ret = -ENOMEM; 1656 goto out_delete_filters; 1657 } 1658 1659 ret = evlist__create_maps(ftrace.evlist, &ftrace.target); 1660 if (ret < 0) 1661 goto out_delete_evlist; 1662 1663 if (argc) { 1664 ret = evlist__prepare_workload(ftrace.evlist, &ftrace.target, 1665 argv, false, 1666 ftrace__workload_exec_failed_signal); 1667 if (ret < 0) 1668 goto out_delete_evlist; 1669 } 1670 1671 ret = cmd_func(&ftrace); 1672 1673 out_delete_evlist: 1674 evlist__delete(ftrace.evlist); 1675 1676 out_delete_filters: 1677 delete_filter_func(&ftrace.filters); 1678 delete_filter_func(&ftrace.notrace); 1679 delete_filter_func(&ftrace.graph_funcs); 1680 delete_filter_func(&ftrace.nograph_funcs); 1681 1682 return ret; 1683 } 1684