1 /* 2 * builtin-top.c 3 * 4 * Builtin top command: Display a continuously updated profile of 5 * any workload, CPU or specific PID. 6 * 7 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com> 8 * 9 * Improvements and fixes by: 10 * 11 * Arjan van de Ven <arjan@linux.intel.com> 12 * Yanmin Zhang <yanmin.zhang@intel.com> 13 * Wu Fengguang <fengguang.wu@intel.com> 14 * Mike Galbraith <efault@gmx.de> 15 * Paul Mackerras <paulus@samba.org> 16 * 17 * Released under the GPL v2. (and only v2, not any later version) 18 */ 19 #include "builtin.h" 20 21 #include "perf.h" 22 23 #include "util/color.h" 24 #include "util/session.h" 25 #include "util/symbol.h" 26 #include "util/thread.h" 27 #include "util/util.h" 28 #include <linux/rbtree.h> 29 #include "util/parse-options.h" 30 #include "util/parse-events.h" 31 #include "util/cpumap.h" 32 33 #include "util/debug.h" 34 35 #include <assert.h> 36 #include <fcntl.h> 37 38 #include <stdio.h> 39 #include <termios.h> 40 #include <unistd.h> 41 42 #include <errno.h> 43 #include <time.h> 44 #include <sched.h> 45 #include <pthread.h> 46 47 #include <sys/syscall.h> 48 #include <sys/ioctl.h> 49 #include <sys/poll.h> 50 #include <sys/prctl.h> 51 #include <sys/wait.h> 52 #include <sys/uio.h> 53 #include <sys/mman.h> 54 55 #include <linux/unistd.h> 56 #include <linux/types.h> 57 58 static int *fd[MAX_NR_CPUS][MAX_COUNTERS]; 59 60 static bool system_wide = false; 61 62 static int default_interval = 0; 63 64 static int count_filter = 5; 65 static int print_entries; 66 67 static int target_pid = -1; 68 static int target_tid = -1; 69 static pid_t *all_tids = NULL; 70 static int thread_num = 0; 71 static bool inherit = false; 72 static int profile_cpu = -1; 73 static int nr_cpus = 0; 74 static int realtime_prio = 0; 75 static bool group = false; 76 static unsigned int page_size; 77 static unsigned int mmap_pages = 16; 78 static int freq = 1000; /* 1 KHz */ 79 80 static int delay_secs = 2; 81 static bool zero = false; 82 static bool dump_symtab = false; 83 84 static bool hide_kernel_symbols = false; 85 static bool hide_user_symbols = false; 86 static struct winsize winsize; 87 88 /* 89 * Source 90 */ 91 92 struct source_line { 93 u64 eip; 94 unsigned long count[MAX_COUNTERS]; 95 char *line; 96 struct source_line *next; 97 }; 98 99 static const char *sym_filter = NULL; 100 struct sym_entry *sym_filter_entry = NULL; 101 struct sym_entry *sym_filter_entry_sched = NULL; 102 static int sym_pcnt_filter = 5; 103 static int sym_counter = 0; 104 static int display_weighted = -1; 105 106 /* 107 * Symbols 108 */ 109 110 struct sym_entry_source { 111 struct source_line *source; 112 struct source_line *lines; 113 struct source_line **lines_tail; 114 pthread_mutex_t lock; 115 }; 116 117 struct sym_entry { 118 struct rb_node rb_node; 119 struct list_head node; 120 unsigned long snap_count; 121 double weight; 122 int skip; 123 u16 name_len; 124 u8 origin; 125 struct map *map; 126 struct sym_entry_source *src; 127 unsigned long count[0]; 128 }; 129 130 /* 131 * Source functions 132 */ 133 134 static inline struct symbol *sym_entry__symbol(struct sym_entry *self) 135 { 136 return ((void *)self) + symbol_conf.priv_size; 137 } 138 139 void get_term_dimensions(struct winsize *ws) 140 { 141 char *s = getenv("LINES"); 142 143 if (s != NULL) { 144 ws->ws_row = atoi(s); 145 s = getenv("COLUMNS"); 146 if (s != NULL) { 147 ws->ws_col = atoi(s); 148 if (ws->ws_row && ws->ws_col) 149 return; 150 } 151 } 152 #ifdef TIOCGWINSZ 153 if (ioctl(1, TIOCGWINSZ, ws) == 0 && 154 ws->ws_row && ws->ws_col) 155 return; 156 #endif 157 ws->ws_row = 25; 158 ws->ws_col = 80; 159 } 160 161 static void update_print_entries(struct winsize *ws) 162 { 163 print_entries = ws->ws_row; 164 165 if (print_entries > 9) 166 print_entries -= 9; 167 } 168 169 static void sig_winch_handler(int sig __used) 170 { 171 get_term_dimensions(&winsize); 172 update_print_entries(&winsize); 173 } 174 175 static int parse_source(struct sym_entry *syme) 176 { 177 struct symbol *sym; 178 struct sym_entry_source *source; 179 struct map *map; 180 FILE *file; 181 char command[PATH_MAX*2]; 182 const char *path; 183 u64 len; 184 185 if (!syme) 186 return -1; 187 188 sym = sym_entry__symbol(syme); 189 map = syme->map; 190 191 /* 192 * We can't annotate with just /proc/kallsyms 193 */ 194 if (map->dso->origin == DSO__ORIG_KERNEL) 195 return -1; 196 197 if (syme->src == NULL) { 198 syme->src = zalloc(sizeof(*source)); 199 if (syme->src == NULL) 200 return -1; 201 pthread_mutex_init(&syme->src->lock, NULL); 202 } 203 204 source = syme->src; 205 206 if (source->lines) { 207 pthread_mutex_lock(&source->lock); 208 goto out_assign; 209 } 210 path = map->dso->long_name; 211 212 len = sym->end - sym->start; 213 214 sprintf(command, 215 "objdump --start-address=%#0*Lx --stop-address=%#0*Lx -dS %s", 216 BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start), 217 BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path); 218 219 file = popen(command, "r"); 220 if (!file) 221 return -1; 222 223 pthread_mutex_lock(&source->lock); 224 source->lines_tail = &source->lines; 225 while (!feof(file)) { 226 struct source_line *src; 227 size_t dummy = 0; 228 char *c, *sep; 229 230 src = malloc(sizeof(struct source_line)); 231 assert(src != NULL); 232 memset(src, 0, sizeof(struct source_line)); 233 234 if (getline(&src->line, &dummy, file) < 0) 235 break; 236 if (!src->line) 237 break; 238 239 c = strchr(src->line, '\n'); 240 if (c) 241 *c = 0; 242 243 src->next = NULL; 244 *source->lines_tail = src; 245 source->lines_tail = &src->next; 246 247 src->eip = strtoull(src->line, &sep, 16); 248 if (*sep == ':') 249 src->eip = map__objdump_2ip(map, src->eip); 250 else /* this line has no ip info (e.g. source line) */ 251 src->eip = 0; 252 } 253 pclose(file); 254 out_assign: 255 sym_filter_entry = syme; 256 pthread_mutex_unlock(&source->lock); 257 return 0; 258 } 259 260 static void __zero_source_counters(struct sym_entry *syme) 261 { 262 int i; 263 struct source_line *line; 264 265 line = syme->src->lines; 266 while (line) { 267 for (i = 0; i < nr_counters; i++) 268 line->count[i] = 0; 269 line = line->next; 270 } 271 } 272 273 static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip) 274 { 275 struct source_line *line; 276 277 if (syme != sym_filter_entry) 278 return; 279 280 if (pthread_mutex_trylock(&syme->src->lock)) 281 return; 282 283 if (syme->src == NULL || syme->src->source == NULL) 284 goto out_unlock; 285 286 for (line = syme->src->lines; line; line = line->next) { 287 /* skip lines without IP info */ 288 if (line->eip == 0) 289 continue; 290 if (line->eip == ip) { 291 line->count[counter]++; 292 break; 293 } 294 if (line->eip > ip) 295 break; 296 } 297 out_unlock: 298 pthread_mutex_unlock(&syme->src->lock); 299 } 300 301 #define PATTERN_LEN (BITS_PER_LONG / 4 + 2) 302 303 static void lookup_sym_source(struct sym_entry *syme) 304 { 305 struct symbol *symbol = sym_entry__symbol(syme); 306 struct source_line *line; 307 char pattern[PATTERN_LEN + 1]; 308 309 sprintf(pattern, "%0*Lx <", BITS_PER_LONG / 4, 310 map__rip_2objdump(syme->map, symbol->start)); 311 312 pthread_mutex_lock(&syme->src->lock); 313 for (line = syme->src->lines; line; line = line->next) { 314 if (memcmp(line->line, pattern, PATTERN_LEN) == 0) { 315 syme->src->source = line; 316 break; 317 } 318 } 319 pthread_mutex_unlock(&syme->src->lock); 320 } 321 322 static void show_lines(struct source_line *queue, int count, int total) 323 { 324 int i; 325 struct source_line *line; 326 327 line = queue; 328 for (i = 0; i < count; i++) { 329 float pcnt = 100.0*(float)line->count[sym_counter]/(float)total; 330 331 printf("%8li %4.1f%%\t%s\n", line->count[sym_counter], pcnt, line->line); 332 line = line->next; 333 } 334 } 335 336 #define TRACE_COUNT 3 337 338 static void show_details(struct sym_entry *syme) 339 { 340 struct symbol *symbol; 341 struct source_line *line; 342 struct source_line *line_queue = NULL; 343 int displayed = 0; 344 int line_queue_count = 0, total = 0, more = 0; 345 346 if (!syme) 347 return; 348 349 if (!syme->src->source) 350 lookup_sym_source(syme); 351 352 if (!syme->src->source) 353 return; 354 355 symbol = sym_entry__symbol(syme); 356 printf("Showing %s for %s\n", event_name(sym_counter), symbol->name); 357 printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter); 358 359 pthread_mutex_lock(&syme->src->lock); 360 line = syme->src->source; 361 while (line) { 362 total += line->count[sym_counter]; 363 line = line->next; 364 } 365 366 line = syme->src->source; 367 while (line) { 368 float pcnt = 0.0; 369 370 if (!line_queue_count) 371 line_queue = line; 372 line_queue_count++; 373 374 if (line->count[sym_counter]) 375 pcnt = 100.0 * line->count[sym_counter] / (float)total; 376 if (pcnt >= (float)sym_pcnt_filter) { 377 if (displayed <= print_entries) 378 show_lines(line_queue, line_queue_count, total); 379 else more++; 380 displayed += line_queue_count; 381 line_queue_count = 0; 382 line_queue = NULL; 383 } else if (line_queue_count > TRACE_COUNT) { 384 line_queue = line_queue->next; 385 line_queue_count--; 386 } 387 388 line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8; 389 line = line->next; 390 } 391 pthread_mutex_unlock(&syme->src->lock); 392 if (more) 393 printf("%d lines not displayed, maybe increase display entries [e]\n", more); 394 } 395 396 /* 397 * Symbols will be added here in event__process_sample and will get out 398 * after decayed. 399 */ 400 static LIST_HEAD(active_symbols); 401 static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER; 402 403 /* 404 * Ordering weight: count-1 * count-2 * ... / count-n 405 */ 406 static double sym_weight(const struct sym_entry *sym) 407 { 408 double weight = sym->snap_count; 409 int counter; 410 411 if (!display_weighted) 412 return weight; 413 414 for (counter = 1; counter < nr_counters-1; counter++) 415 weight *= sym->count[counter]; 416 417 weight /= (sym->count[counter] + 1); 418 419 return weight; 420 } 421 422 static long samples; 423 static long kernel_samples, us_samples; 424 static long exact_samples; 425 static long guest_us_samples, guest_kernel_samples; 426 static const char CONSOLE_CLEAR[] = "[H[2J"; 427 428 static void __list_insert_active_sym(struct sym_entry *syme) 429 { 430 list_add(&syme->node, &active_symbols); 431 } 432 433 static void list_remove_active_sym(struct sym_entry *syme) 434 { 435 pthread_mutex_lock(&active_symbols_lock); 436 list_del_init(&syme->node); 437 pthread_mutex_unlock(&active_symbols_lock); 438 } 439 440 static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se) 441 { 442 struct rb_node **p = &tree->rb_node; 443 struct rb_node *parent = NULL; 444 struct sym_entry *iter; 445 446 while (*p != NULL) { 447 parent = *p; 448 iter = rb_entry(parent, struct sym_entry, rb_node); 449 450 if (se->weight > iter->weight) 451 p = &(*p)->rb_left; 452 else 453 p = &(*p)->rb_right; 454 } 455 456 rb_link_node(&se->rb_node, parent, p); 457 rb_insert_color(&se->rb_node, tree); 458 } 459 460 static void print_sym_table(void) 461 { 462 int printed = 0, j; 463 int counter, snap = !display_weighted ? sym_counter : 0; 464 float samples_per_sec = samples/delay_secs; 465 float ksamples_per_sec = kernel_samples/delay_secs; 466 float us_samples_per_sec = (us_samples)/delay_secs; 467 float guest_kernel_samples_per_sec = (guest_kernel_samples)/delay_secs; 468 float guest_us_samples_per_sec = (guest_us_samples)/delay_secs; 469 float esamples_percent = (100.0*exact_samples)/samples; 470 float sum_ksamples = 0.0; 471 struct sym_entry *syme, *n; 472 struct rb_root tmp = RB_ROOT; 473 struct rb_node *nd; 474 int sym_width = 0, dso_width = 0, dso_short_width = 0; 475 const int win_width = winsize.ws_col - 1; 476 477 samples = us_samples = kernel_samples = exact_samples = 0; 478 guest_kernel_samples = guest_us_samples = 0; 479 480 /* Sort the active symbols */ 481 pthread_mutex_lock(&active_symbols_lock); 482 syme = list_entry(active_symbols.next, struct sym_entry, node); 483 pthread_mutex_unlock(&active_symbols_lock); 484 485 list_for_each_entry_safe_from(syme, n, &active_symbols, node) { 486 syme->snap_count = syme->count[snap]; 487 if (syme->snap_count != 0) { 488 489 if ((hide_user_symbols && 490 syme->origin == PERF_RECORD_MISC_USER) || 491 (hide_kernel_symbols && 492 syme->origin == PERF_RECORD_MISC_KERNEL)) { 493 list_remove_active_sym(syme); 494 continue; 495 } 496 syme->weight = sym_weight(syme); 497 rb_insert_active_sym(&tmp, syme); 498 sum_ksamples += syme->snap_count; 499 500 for (j = 0; j < nr_counters; j++) 501 syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8; 502 } else 503 list_remove_active_sym(syme); 504 } 505 506 puts(CONSOLE_CLEAR); 507 508 printf("%-*.*s\n", win_width, win_width, graph_dotted_line); 509 if (!perf_guest) { 510 printf(" PerfTop:%8.0f irqs/sec kernel:%4.1f%%" 511 " exact: %4.1f%% [", 512 samples_per_sec, 513 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) / 514 samples_per_sec)), 515 esamples_percent); 516 } else { 517 printf(" PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%" 518 " guest kernel:%4.1f%% guest us:%4.1f%%" 519 " exact: %4.1f%% [", 520 samples_per_sec, 521 100.0 - (100.0 * ((samples_per_sec-ksamples_per_sec) / 522 samples_per_sec)), 523 100.0 - (100.0 * ((samples_per_sec-us_samples_per_sec) / 524 samples_per_sec)), 525 100.0 - (100.0 * ((samples_per_sec - 526 guest_kernel_samples_per_sec) / 527 samples_per_sec)), 528 100.0 - (100.0 * ((samples_per_sec - 529 guest_us_samples_per_sec) / 530 samples_per_sec)), 531 esamples_percent); 532 } 533 534 if (nr_counters == 1 || !display_weighted) { 535 printf("%Ld", (u64)attrs[0].sample_period); 536 if (freq) 537 printf("Hz "); 538 else 539 printf(" "); 540 } 541 542 if (!display_weighted) 543 printf("%s", event_name(sym_counter)); 544 else for (counter = 0; counter < nr_counters; counter++) { 545 if (counter) 546 printf("/"); 547 548 printf("%s", event_name(counter)); 549 } 550 551 printf( "], "); 552 553 if (target_pid != -1) 554 printf(" (target_pid: %d", target_pid); 555 else if (target_tid != -1) 556 printf(" (target_tid: %d", target_tid); 557 else 558 printf(" (all"); 559 560 if (profile_cpu != -1) 561 printf(", cpu: %d)\n", profile_cpu); 562 else { 563 if (target_tid != -1) 564 printf(")\n"); 565 else 566 printf(", %d CPUs)\n", nr_cpus); 567 } 568 569 printf("%-*.*s\n", win_width, win_width, graph_dotted_line); 570 571 if (sym_filter_entry) { 572 show_details(sym_filter_entry); 573 return; 574 } 575 576 /* 577 * Find the longest symbol name that will be displayed 578 */ 579 for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { 580 syme = rb_entry(nd, struct sym_entry, rb_node); 581 if (++printed > print_entries || 582 (int)syme->snap_count < count_filter) 583 continue; 584 585 if (syme->map->dso->long_name_len > dso_width) 586 dso_width = syme->map->dso->long_name_len; 587 588 if (syme->map->dso->short_name_len > dso_short_width) 589 dso_short_width = syme->map->dso->short_name_len; 590 591 if (syme->name_len > sym_width) 592 sym_width = syme->name_len; 593 } 594 595 printed = 0; 596 597 if (sym_width + dso_width > winsize.ws_col - 29) { 598 dso_width = dso_short_width; 599 if (sym_width + dso_width > winsize.ws_col - 29) 600 sym_width = winsize.ws_col - dso_width - 29; 601 } 602 putchar('\n'); 603 if (nr_counters == 1) 604 printf(" samples pcnt"); 605 else 606 printf(" weight samples pcnt"); 607 608 if (verbose) 609 printf(" RIP "); 610 printf(" %-*.*s DSO\n", sym_width, sym_width, "function"); 611 printf(" %s _______ _____", 612 nr_counters == 1 ? " " : "______"); 613 if (verbose) 614 printf(" ________________"); 615 printf(" %-*.*s", sym_width, sym_width, graph_line); 616 printf(" %-*.*s", dso_width, dso_width, graph_line); 617 puts("\n"); 618 619 for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { 620 struct symbol *sym; 621 double pcnt; 622 623 syme = rb_entry(nd, struct sym_entry, rb_node); 624 sym = sym_entry__symbol(syme); 625 if (++printed > print_entries || (int)syme->snap_count < count_filter) 626 continue; 627 628 pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) / 629 sum_ksamples)); 630 631 if (nr_counters == 1 || !display_weighted) 632 printf("%20.2f ", syme->weight); 633 else 634 printf("%9.1f %10ld ", syme->weight, syme->snap_count); 635 636 percent_color_fprintf(stdout, "%4.1f%%", pcnt); 637 if (verbose) 638 printf(" %016llx", sym->start); 639 printf(" %-*.*s", sym_width, sym_width, sym->name); 640 printf(" %-*.*s\n", dso_width, dso_width, 641 dso_width >= syme->map->dso->long_name_len ? 642 syme->map->dso->long_name : 643 syme->map->dso->short_name); 644 } 645 } 646 647 static void prompt_integer(int *target, const char *msg) 648 { 649 char *buf = malloc(0), *p; 650 size_t dummy = 0; 651 int tmp; 652 653 fprintf(stdout, "\n%s: ", msg); 654 if (getline(&buf, &dummy, stdin) < 0) 655 return; 656 657 p = strchr(buf, '\n'); 658 if (p) 659 *p = 0; 660 661 p = buf; 662 while(*p) { 663 if (!isdigit(*p)) 664 goto out_free; 665 p++; 666 } 667 tmp = strtoul(buf, NULL, 10); 668 *target = tmp; 669 out_free: 670 free(buf); 671 } 672 673 static void prompt_percent(int *target, const char *msg) 674 { 675 int tmp = 0; 676 677 prompt_integer(&tmp, msg); 678 if (tmp >= 0 && tmp <= 100) 679 *target = tmp; 680 } 681 682 static void prompt_symbol(struct sym_entry **target, const char *msg) 683 { 684 char *buf = malloc(0), *p; 685 struct sym_entry *syme = *target, *n, *found = NULL; 686 size_t dummy = 0; 687 688 /* zero counters of active symbol */ 689 if (syme) { 690 pthread_mutex_lock(&syme->src->lock); 691 __zero_source_counters(syme); 692 *target = NULL; 693 pthread_mutex_unlock(&syme->src->lock); 694 } 695 696 fprintf(stdout, "\n%s: ", msg); 697 if (getline(&buf, &dummy, stdin) < 0) 698 goto out_free; 699 700 p = strchr(buf, '\n'); 701 if (p) 702 *p = 0; 703 704 pthread_mutex_lock(&active_symbols_lock); 705 syme = list_entry(active_symbols.next, struct sym_entry, node); 706 pthread_mutex_unlock(&active_symbols_lock); 707 708 list_for_each_entry_safe_from(syme, n, &active_symbols, node) { 709 struct symbol *sym = sym_entry__symbol(syme); 710 711 if (!strcmp(buf, sym->name)) { 712 found = syme; 713 break; 714 } 715 } 716 717 if (!found) { 718 fprintf(stderr, "Sorry, %s is not active.\n", buf); 719 sleep(1); 720 return; 721 } else 722 parse_source(found); 723 724 out_free: 725 free(buf); 726 } 727 728 static void print_mapped_keys(void) 729 { 730 char *name = NULL; 731 732 if (sym_filter_entry) { 733 struct symbol *sym = sym_entry__symbol(sym_filter_entry); 734 name = sym->name; 735 } 736 737 fprintf(stdout, "\nMapped keys:\n"); 738 fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", delay_secs); 739 fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", print_entries); 740 741 if (nr_counters > 1) 742 fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(sym_counter)); 743 744 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter); 745 746 fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter); 747 fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL"); 748 fprintf(stdout, "\t[S] stop annotation.\n"); 749 750 if (nr_counters > 1) 751 fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0); 752 753 fprintf(stdout, 754 "\t[K] hide kernel_symbols symbols. \t(%s)\n", 755 hide_kernel_symbols ? "yes" : "no"); 756 fprintf(stdout, 757 "\t[U] hide user symbols. \t(%s)\n", 758 hide_user_symbols ? "yes" : "no"); 759 fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", zero ? 1 : 0); 760 fprintf(stdout, "\t[qQ] quit.\n"); 761 } 762 763 static int key_mapped(int c) 764 { 765 switch (c) { 766 case 'd': 767 case 'e': 768 case 'f': 769 case 'z': 770 case 'q': 771 case 'Q': 772 case 'K': 773 case 'U': 774 case 'F': 775 case 's': 776 case 'S': 777 return 1; 778 case 'E': 779 case 'w': 780 return nr_counters > 1 ? 1 : 0; 781 default: 782 break; 783 } 784 785 return 0; 786 } 787 788 static void handle_keypress(struct perf_session *session, int c) 789 { 790 if (!key_mapped(c)) { 791 struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; 792 struct termios tc, save; 793 794 print_mapped_keys(); 795 fprintf(stdout, "\nEnter selection, or unmapped key to continue: "); 796 fflush(stdout); 797 798 tcgetattr(0, &save); 799 tc = save; 800 tc.c_lflag &= ~(ICANON | ECHO); 801 tc.c_cc[VMIN] = 0; 802 tc.c_cc[VTIME] = 0; 803 tcsetattr(0, TCSANOW, &tc); 804 805 poll(&stdin_poll, 1, -1); 806 c = getc(stdin); 807 808 tcsetattr(0, TCSAFLUSH, &save); 809 if (!key_mapped(c)) 810 return; 811 } 812 813 switch (c) { 814 case 'd': 815 prompt_integer(&delay_secs, "Enter display delay"); 816 if (delay_secs < 1) 817 delay_secs = 1; 818 break; 819 case 'e': 820 prompt_integer(&print_entries, "Enter display entries (lines)"); 821 if (print_entries == 0) { 822 sig_winch_handler(SIGWINCH); 823 signal(SIGWINCH, sig_winch_handler); 824 } else 825 signal(SIGWINCH, SIG_DFL); 826 break; 827 case 'E': 828 if (nr_counters > 1) { 829 int i; 830 831 fprintf(stderr, "\nAvailable events:"); 832 for (i = 0; i < nr_counters; i++) 833 fprintf(stderr, "\n\t%d %s", i, event_name(i)); 834 835 prompt_integer(&sym_counter, "Enter details event counter"); 836 837 if (sym_counter >= nr_counters) { 838 fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(0)); 839 sym_counter = 0; 840 sleep(1); 841 } 842 } else sym_counter = 0; 843 break; 844 case 'f': 845 prompt_integer(&count_filter, "Enter display event count filter"); 846 break; 847 case 'F': 848 prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)"); 849 break; 850 case 'K': 851 hide_kernel_symbols = !hide_kernel_symbols; 852 break; 853 case 'q': 854 case 'Q': 855 printf("exiting.\n"); 856 if (dump_symtab) 857 perf_session__fprintf_dsos(session, stderr); 858 exit(0); 859 case 's': 860 prompt_symbol(&sym_filter_entry, "Enter details symbol"); 861 break; 862 case 'S': 863 if (!sym_filter_entry) 864 break; 865 else { 866 struct sym_entry *syme = sym_filter_entry; 867 868 pthread_mutex_lock(&syme->src->lock); 869 sym_filter_entry = NULL; 870 __zero_source_counters(syme); 871 pthread_mutex_unlock(&syme->src->lock); 872 } 873 break; 874 case 'U': 875 hide_user_symbols = !hide_user_symbols; 876 break; 877 case 'w': 878 display_weighted = ~display_weighted; 879 break; 880 case 'z': 881 zero = !zero; 882 break; 883 default: 884 break; 885 } 886 } 887 888 static void *display_thread(void *arg __used) 889 { 890 struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; 891 struct termios tc, save; 892 int delay_msecs, c; 893 struct perf_session *session = (struct perf_session *) arg; 894 895 tcgetattr(0, &save); 896 tc = save; 897 tc.c_lflag &= ~(ICANON | ECHO); 898 tc.c_cc[VMIN] = 0; 899 tc.c_cc[VTIME] = 0; 900 901 repeat: 902 delay_msecs = delay_secs * 1000; 903 tcsetattr(0, TCSANOW, &tc); 904 /* trash return*/ 905 getc(stdin); 906 907 do { 908 print_sym_table(); 909 } while (!poll(&stdin_poll, 1, delay_msecs) == 1); 910 911 c = getc(stdin); 912 tcsetattr(0, TCSAFLUSH, &save); 913 914 handle_keypress(session, c); 915 goto repeat; 916 917 return NULL; 918 } 919 920 /* Tag samples to be skipped. */ 921 static const char *skip_symbols[] = { 922 "default_idle", 923 "cpu_idle", 924 "enter_idle", 925 "exit_idle", 926 "mwait_idle", 927 "mwait_idle_with_hints", 928 "poll_idle", 929 "ppc64_runlatch_off", 930 "pseries_dedicated_idle_sleep", 931 NULL 932 }; 933 934 static int symbol_filter(struct map *map, struct symbol *sym) 935 { 936 struct sym_entry *syme; 937 const char *name = sym->name; 938 int i; 939 940 /* 941 * ppc64 uses function descriptors and appends a '.' to the 942 * start of every instruction address. Remove it. 943 */ 944 if (name[0] == '.') 945 name++; 946 947 if (!strcmp(name, "_text") || 948 !strcmp(name, "_etext") || 949 !strcmp(name, "_sinittext") || 950 !strncmp("init_module", name, 11) || 951 !strncmp("cleanup_module", name, 14) || 952 strstr(name, "_text_start") || 953 strstr(name, "_text_end")) 954 return 1; 955 956 syme = symbol__priv(sym); 957 syme->map = map; 958 syme->src = NULL; 959 960 if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) { 961 /* schedule initial sym_filter_entry setup */ 962 sym_filter_entry_sched = syme; 963 sym_filter = NULL; 964 } 965 966 for (i = 0; skip_symbols[i]; i++) { 967 if (!strcmp(skip_symbols[i], name)) { 968 syme->skip = 1; 969 break; 970 } 971 } 972 973 if (!syme->skip) 974 syme->name_len = strlen(sym->name); 975 976 return 0; 977 } 978 979 static void event__process_sample(const event_t *self, 980 struct perf_session *session, int counter) 981 { 982 u64 ip = self->ip.ip; 983 struct sym_entry *syme; 984 struct addr_location al; 985 struct machine *machine; 986 u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 987 988 ++samples; 989 990 switch (origin) { 991 case PERF_RECORD_MISC_USER: 992 ++us_samples; 993 if (hide_user_symbols) 994 return; 995 machine = perf_session__find_host_machine(session); 996 break; 997 case PERF_RECORD_MISC_KERNEL: 998 ++kernel_samples; 999 if (hide_kernel_symbols) 1000 return; 1001 machine = perf_session__find_host_machine(session); 1002 break; 1003 case PERF_RECORD_MISC_GUEST_KERNEL: 1004 ++guest_kernel_samples; 1005 machine = perf_session__find_machine(session, self->ip.pid); 1006 break; 1007 case PERF_RECORD_MISC_GUEST_USER: 1008 ++guest_us_samples; 1009 /* 1010 * TODO: we don't process guest user from host side 1011 * except simple counting. 1012 */ 1013 return; 1014 default: 1015 return; 1016 } 1017 1018 if (!machine && perf_guest) { 1019 pr_err("Can't find guest [%d]'s kernel information\n", 1020 self->ip.pid); 1021 return; 1022 } 1023 1024 if (self->header.misc & PERF_RECORD_MISC_EXACT_IP) 1025 exact_samples++; 1026 1027 if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 || 1028 al.filtered) 1029 return; 1030 1031 if (al.sym == NULL) { 1032 /* 1033 * As we do lazy loading of symtabs we only will know if the 1034 * specified vmlinux file is invalid when we actually have a 1035 * hit in kernel space and then try to load it. So if we get 1036 * here and there are _no_ symbols in the DSO backing the 1037 * kernel map, bail out. 1038 * 1039 * We may never get here, for instance, if we use -K/ 1040 * --hide-kernel-symbols, even if the user specifies an 1041 * invalid --vmlinux ;-) 1042 */ 1043 if (al.map == machine->vmlinux_maps[MAP__FUNCTION] && 1044 RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) { 1045 pr_err("The %s file can't be used\n", 1046 symbol_conf.vmlinux_name); 1047 exit(1); 1048 } 1049 1050 return; 1051 } 1052 1053 /* let's see, whether we need to install initial sym_filter_entry */ 1054 if (sym_filter_entry_sched) { 1055 sym_filter_entry = sym_filter_entry_sched; 1056 sym_filter_entry_sched = NULL; 1057 if (parse_source(sym_filter_entry) < 0) { 1058 struct symbol *sym = sym_entry__symbol(sym_filter_entry); 1059 1060 pr_err("Can't annotate %s", sym->name); 1061 if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) { 1062 pr_err(": No vmlinux file was found in the path:\n"); 1063 machine__fprintf_vmlinux_path(machine, stderr); 1064 } else 1065 pr_err(".\n"); 1066 exit(1); 1067 } 1068 } 1069 1070 syme = symbol__priv(al.sym); 1071 if (!syme->skip) { 1072 syme->count[counter]++; 1073 syme->origin = origin; 1074 record_precise_ip(syme, counter, ip); 1075 pthread_mutex_lock(&active_symbols_lock); 1076 if (list_empty(&syme->node) || !syme->node.next) 1077 __list_insert_active_sym(syme); 1078 pthread_mutex_unlock(&active_symbols_lock); 1079 } 1080 } 1081 1082 static int event__process(event_t *event, struct perf_session *session) 1083 { 1084 switch (event->header.type) { 1085 case PERF_RECORD_COMM: 1086 event__process_comm(event, session); 1087 break; 1088 case PERF_RECORD_MMAP: 1089 event__process_mmap(event, session); 1090 break; 1091 case PERF_RECORD_FORK: 1092 case PERF_RECORD_EXIT: 1093 event__process_task(event, session); 1094 break; 1095 default: 1096 break; 1097 } 1098 1099 return 0; 1100 } 1101 1102 struct mmap_data { 1103 int counter; 1104 void *base; 1105 int mask; 1106 unsigned int prev; 1107 }; 1108 1109 static unsigned int mmap_read_head(struct mmap_data *md) 1110 { 1111 struct perf_event_mmap_page *pc = md->base; 1112 int head; 1113 1114 head = pc->data_head; 1115 rmb(); 1116 1117 return head; 1118 } 1119 1120 static void perf_session__mmap_read_counter(struct perf_session *self, 1121 struct mmap_data *md) 1122 { 1123 unsigned int head = mmap_read_head(md); 1124 unsigned int old = md->prev; 1125 unsigned char *data = md->base + page_size; 1126 int diff; 1127 1128 /* 1129 * If we're further behind than half the buffer, there's a chance 1130 * the writer will bite our tail and mess up the samples under us. 1131 * 1132 * If we somehow ended up ahead of the head, we got messed up. 1133 * 1134 * In either case, truncate and restart at head. 1135 */ 1136 diff = head - old; 1137 if (diff > md->mask / 2 || diff < 0) { 1138 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); 1139 1140 /* 1141 * head points to a known good entry, start there. 1142 */ 1143 old = head; 1144 } 1145 1146 for (; old != head;) { 1147 event_t *event = (event_t *)&data[old & md->mask]; 1148 1149 event_t event_copy; 1150 1151 size_t size = event->header.size; 1152 1153 /* 1154 * Event straddles the mmap boundary -- header should always 1155 * be inside due to u64 alignment of output. 1156 */ 1157 if ((old & md->mask) + size != ((old + size) & md->mask)) { 1158 unsigned int offset = old; 1159 unsigned int len = min(sizeof(*event), size), cpy; 1160 void *dst = &event_copy; 1161 1162 do { 1163 cpy = min(md->mask + 1 - (offset & md->mask), len); 1164 memcpy(dst, &data[offset & md->mask], cpy); 1165 offset += cpy; 1166 dst += cpy; 1167 len -= cpy; 1168 } while (len); 1169 1170 event = &event_copy; 1171 } 1172 1173 if (event->header.type == PERF_RECORD_SAMPLE) 1174 event__process_sample(event, self, md->counter); 1175 else 1176 event__process(event, self); 1177 old += size; 1178 } 1179 1180 md->prev = old; 1181 } 1182 1183 static struct pollfd *event_array; 1184 static struct mmap_data *mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; 1185 1186 static void perf_session__mmap_read(struct perf_session *self) 1187 { 1188 int i, counter, thread_index; 1189 1190 for (i = 0; i < nr_cpus; i++) { 1191 for (counter = 0; counter < nr_counters; counter++) 1192 for (thread_index = 0; 1193 thread_index < thread_num; 1194 thread_index++) { 1195 perf_session__mmap_read_counter(self, 1196 &mmap_array[i][counter][thread_index]); 1197 } 1198 } 1199 } 1200 1201 int nr_poll; 1202 int group_fd; 1203 1204 static void start_counter(int i, int counter) 1205 { 1206 struct perf_event_attr *attr; 1207 int cpu; 1208 int thread_index; 1209 1210 cpu = profile_cpu; 1211 if (target_tid == -1 && profile_cpu == -1) 1212 cpu = cpumap[i]; 1213 1214 attr = attrs + counter; 1215 1216 attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; 1217 1218 if (freq) { 1219 attr->sample_type |= PERF_SAMPLE_PERIOD; 1220 attr->freq = 1; 1221 attr->sample_freq = freq; 1222 } 1223 1224 attr->inherit = (cpu < 0) && inherit; 1225 attr->mmap = 1; 1226 1227 for (thread_index = 0; thread_index < thread_num; thread_index++) { 1228 try_again: 1229 fd[i][counter][thread_index] = sys_perf_event_open(attr, 1230 all_tids[thread_index], cpu, group_fd, 0); 1231 1232 if (fd[i][counter][thread_index] < 0) { 1233 int err = errno; 1234 1235 if (err == EPERM || err == EACCES) 1236 die("No permission - are you root?\n"); 1237 /* 1238 * If it's cycles then fall back to hrtimer 1239 * based cpu-clock-tick sw counter, which 1240 * is always available even if no PMU support: 1241 */ 1242 if (attr->type == PERF_TYPE_HARDWARE 1243 && attr->config == PERF_COUNT_HW_CPU_CYCLES) { 1244 1245 if (verbose) 1246 warning(" ... trying to fall back to cpu-clock-ticks\n"); 1247 1248 attr->type = PERF_TYPE_SOFTWARE; 1249 attr->config = PERF_COUNT_SW_CPU_CLOCK; 1250 goto try_again; 1251 } 1252 printf("\n"); 1253 error("perfcounter syscall returned with %d (%s)\n", 1254 fd[i][counter][thread_index], strerror(err)); 1255 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); 1256 exit(-1); 1257 } 1258 assert(fd[i][counter][thread_index] >= 0); 1259 fcntl(fd[i][counter][thread_index], F_SETFL, O_NONBLOCK); 1260 1261 /* 1262 * First counter acts as the group leader: 1263 */ 1264 if (group && group_fd == -1) 1265 group_fd = fd[i][counter][thread_index]; 1266 1267 event_array[nr_poll].fd = fd[i][counter][thread_index]; 1268 event_array[nr_poll].events = POLLIN; 1269 nr_poll++; 1270 1271 mmap_array[i][counter][thread_index].counter = counter; 1272 mmap_array[i][counter][thread_index].prev = 0; 1273 mmap_array[i][counter][thread_index].mask = mmap_pages*page_size - 1; 1274 mmap_array[i][counter][thread_index].base = mmap(NULL, (mmap_pages+1)*page_size, 1275 PROT_READ, MAP_SHARED, fd[i][counter][thread_index], 0); 1276 if (mmap_array[i][counter][thread_index].base == MAP_FAILED) 1277 die("failed to mmap with %d (%s)\n", errno, strerror(errno)); 1278 } 1279 } 1280 1281 static int __cmd_top(void) 1282 { 1283 pthread_t thread; 1284 int i, counter; 1285 int ret; 1286 /* 1287 * FIXME: perf_session__new should allow passing a O_MMAP, so that all this 1288 * mmap reading, etc is encapsulated in it. Use O_WRONLY for now. 1289 */ 1290 struct perf_session *session = perf_session__new(NULL, O_WRONLY, false, false); 1291 if (session == NULL) 1292 return -ENOMEM; 1293 1294 if (target_tid != -1) 1295 event__synthesize_thread(target_tid, event__process, session); 1296 else 1297 event__synthesize_threads(event__process, session); 1298 1299 for (i = 0; i < nr_cpus; i++) { 1300 group_fd = -1; 1301 for (counter = 0; counter < nr_counters; counter++) 1302 start_counter(i, counter); 1303 } 1304 1305 /* Wait for a minimal set of events before starting the snapshot */ 1306 poll(&event_array[0], nr_poll, 100); 1307 1308 perf_session__mmap_read(session); 1309 1310 if (pthread_create(&thread, NULL, display_thread, session)) { 1311 printf("Could not create display thread.\n"); 1312 exit(-1); 1313 } 1314 1315 if (realtime_prio) { 1316 struct sched_param param; 1317 1318 param.sched_priority = realtime_prio; 1319 if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { 1320 printf("Could not set realtime priority.\n"); 1321 exit(-1); 1322 } 1323 } 1324 1325 while (1) { 1326 int hits = samples; 1327 1328 perf_session__mmap_read(session); 1329 1330 if (hits == samples) 1331 ret = poll(event_array, nr_poll, 100); 1332 } 1333 1334 return 0; 1335 } 1336 1337 static const char * const top_usage[] = { 1338 "perf top [<options>]", 1339 NULL 1340 }; 1341 1342 static const struct option options[] = { 1343 OPT_CALLBACK('e', "event", NULL, "event", 1344 "event selector. use 'perf list' to list available events", 1345 parse_events), 1346 OPT_INTEGER('c', "count", &default_interval, 1347 "event period to sample"), 1348 OPT_INTEGER('p', "pid", &target_pid, 1349 "profile events on existing process id"), 1350 OPT_INTEGER('t', "tid", &target_tid, 1351 "profile events on existing thread id"), 1352 OPT_BOOLEAN('a', "all-cpus", &system_wide, 1353 "system-wide collection from all CPUs"), 1354 OPT_INTEGER('C', "CPU", &profile_cpu, 1355 "CPU to profile on"), 1356 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 1357 "file", "vmlinux pathname"), 1358 OPT_BOOLEAN('K', "hide_kernel_symbols", &hide_kernel_symbols, 1359 "hide kernel symbols"), 1360 OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"), 1361 OPT_INTEGER('r', "realtime", &realtime_prio, 1362 "collect data with this RT SCHED_FIFO priority"), 1363 OPT_INTEGER('d', "delay", &delay_secs, 1364 "number of seconds to delay between refreshes"), 1365 OPT_BOOLEAN('D', "dump-symtab", &dump_symtab, 1366 "dump the symbol table used for profiling"), 1367 OPT_INTEGER('f', "count-filter", &count_filter, 1368 "only display functions with more events than this"), 1369 OPT_BOOLEAN('g', "group", &group, 1370 "put the counters into a counter group"), 1371 OPT_BOOLEAN('i', "inherit", &inherit, 1372 "child tasks inherit counters"), 1373 OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name", 1374 "symbol to annotate"), 1375 OPT_BOOLEAN('z', "zero", &zero, 1376 "zero history across updates"), 1377 OPT_INTEGER('F', "freq", &freq, 1378 "profile at this frequency"), 1379 OPT_INTEGER('E', "entries", &print_entries, 1380 "display this many functions"), 1381 OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols, 1382 "hide user symbols"), 1383 OPT_INCR('v', "verbose", &verbose, 1384 "be more verbose (show counter open errors, etc)"), 1385 OPT_END() 1386 }; 1387 1388 int cmd_top(int argc, const char **argv, const char *prefix __used) 1389 { 1390 int counter; 1391 int i,j; 1392 1393 page_size = sysconf(_SC_PAGE_SIZE); 1394 1395 argc = parse_options(argc, argv, options, top_usage, 0); 1396 if (argc) 1397 usage_with_options(top_usage, options); 1398 1399 if (target_pid != -1) { 1400 target_tid = target_pid; 1401 thread_num = find_all_tid(target_pid, &all_tids); 1402 if (thread_num <= 0) { 1403 fprintf(stderr, "Can't find all threads of pid %d\n", 1404 target_pid); 1405 usage_with_options(top_usage, options); 1406 } 1407 } else { 1408 all_tids=malloc(sizeof(pid_t)); 1409 if (!all_tids) 1410 return -ENOMEM; 1411 1412 all_tids[0] = target_tid; 1413 thread_num = 1; 1414 } 1415 1416 for (i = 0; i < MAX_NR_CPUS; i++) { 1417 for (j = 0; j < MAX_COUNTERS; j++) { 1418 fd[i][j] = malloc(sizeof(int)*thread_num); 1419 mmap_array[i][j] = zalloc( 1420 sizeof(struct mmap_data)*thread_num); 1421 if (!fd[i][j] || !mmap_array[i][j]) 1422 return -ENOMEM; 1423 } 1424 } 1425 event_array = malloc( 1426 sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num); 1427 if (!event_array) 1428 return -ENOMEM; 1429 1430 /* CPU and PID are mutually exclusive */ 1431 if (target_tid > 0 && profile_cpu != -1) { 1432 printf("WARNING: PID switch overriding CPU\n"); 1433 sleep(1); 1434 profile_cpu = -1; 1435 } 1436 1437 if (!nr_counters) 1438 nr_counters = 1; 1439 1440 symbol_conf.priv_size = (sizeof(struct sym_entry) + 1441 (nr_counters + 1) * sizeof(unsigned long)); 1442 1443 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); 1444 if (symbol__init() < 0) 1445 return -1; 1446 1447 if (delay_secs < 1) 1448 delay_secs = 1; 1449 1450 /* 1451 * User specified count overrides default frequency. 1452 */ 1453 if (default_interval) 1454 freq = 0; 1455 else if (freq) { 1456 default_interval = freq; 1457 } else { 1458 fprintf(stderr, "frequency and count are zero, aborting\n"); 1459 exit(EXIT_FAILURE); 1460 } 1461 1462 /* 1463 * Fill in the ones not specifically initialized via -c: 1464 */ 1465 for (counter = 0; counter < nr_counters; counter++) { 1466 if (attrs[counter].sample_period) 1467 continue; 1468 1469 attrs[counter].sample_period = default_interval; 1470 } 1471 1472 if (target_tid != -1 || profile_cpu != -1) 1473 nr_cpus = 1; 1474 else 1475 nr_cpus = read_cpu_map(); 1476 1477 get_term_dimensions(&winsize); 1478 if (print_entries == 0) { 1479 update_print_entries(&winsize); 1480 signal(SIGWINCH, sig_winch_handler); 1481 } 1482 1483 return __cmd_top(); 1484 } 1485