1 #include "annotate.h" 2 #include "util.h" 3 #include "build-id.h" 4 #include "hist.h" 5 #include "session.h" 6 #include "sort.h" 7 #include <math.h> 8 9 static bool hists__filter_entry_by_dso(struct hists *hists, 10 struct hist_entry *he); 11 static bool hists__filter_entry_by_thread(struct hists *hists, 12 struct hist_entry *he); 13 static bool hists__filter_entry_by_symbol(struct hists *hists, 14 struct hist_entry *he); 15 16 enum hist_filter { 17 HIST_FILTER__DSO, 18 HIST_FILTER__THREAD, 19 HIST_FILTER__PARENT, 20 HIST_FILTER__SYMBOL, 21 }; 22 23 struct callchain_param callchain_param = { 24 .mode = CHAIN_GRAPH_REL, 25 .min_percent = 0.5, 26 .order = ORDER_CALLEE 27 }; 28 29 u16 hists__col_len(struct hists *hists, enum hist_column col) 30 { 31 return hists->col_len[col]; 32 } 33 34 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) 35 { 36 hists->col_len[col] = len; 37 } 38 39 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) 40 { 41 if (len > hists__col_len(hists, col)) { 42 hists__set_col_len(hists, col, len); 43 return true; 44 } 45 return false; 46 } 47 48 static void hists__reset_col_len(struct hists *hists) 49 { 50 enum hist_column col; 51 52 for (col = 0; col < HISTC_NR_COLS; ++col) 53 hists__set_col_len(hists, col, 0); 54 } 55 56 static void hists__set_unres_dso_col_len(struct hists *hists, int dso) 57 { 58 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 59 60 if (hists__col_len(hists, dso) < unresolved_col_width && 61 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 62 !symbol_conf.dso_list) 63 hists__set_col_len(hists, dso, unresolved_col_width); 64 } 65 66 static void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 67 { 68 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 69 u16 len; 70 71 if (h->ms.sym) 72 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4); 73 else 74 hists__set_unres_dso_col_len(hists, HISTC_DSO); 75 76 len = thread__comm_len(h->thread); 77 if (hists__new_col_len(hists, HISTC_COMM, len)) 78 hists__set_col_len(hists, HISTC_THREAD, len + 6); 79 80 if (h->ms.map) { 81 len = dso__name_len(h->ms.map->dso); 82 hists__new_col_len(hists, HISTC_DSO, len); 83 } 84 85 if (h->branch_info) { 86 int symlen; 87 /* 88 * +4 accounts for '[x] ' priv level info 89 * +2 account of 0x prefix on raw addresses 90 */ 91 if (h->branch_info->from.sym) { 92 symlen = (int)h->branch_info->from.sym->namelen + 4; 93 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 94 95 symlen = dso__name_len(h->branch_info->from.map->dso); 96 hists__new_col_len(hists, HISTC_DSO_FROM, symlen); 97 } else { 98 symlen = unresolved_col_width + 4 + 2; 99 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 100 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); 101 } 102 103 if (h->branch_info->to.sym) { 104 symlen = (int)h->branch_info->to.sym->namelen + 4; 105 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 106 107 symlen = dso__name_len(h->branch_info->to.map->dso); 108 hists__new_col_len(hists, HISTC_DSO_TO, symlen); 109 } else { 110 symlen = unresolved_col_width + 4 + 2; 111 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 112 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 113 } 114 } 115 } 116 117 static void hist_entry__add_cpumode_period(struct hist_entry *he, 118 unsigned int cpumode, u64 period) 119 { 120 switch (cpumode) { 121 case PERF_RECORD_MISC_KERNEL: 122 he->period_sys += period; 123 break; 124 case PERF_RECORD_MISC_USER: 125 he->period_us += period; 126 break; 127 case PERF_RECORD_MISC_GUEST_KERNEL: 128 he->period_guest_sys += period; 129 break; 130 case PERF_RECORD_MISC_GUEST_USER: 131 he->period_guest_us += period; 132 break; 133 default: 134 break; 135 } 136 } 137 138 static void hist_entry__decay(struct hist_entry *he) 139 { 140 he->period = (he->period * 7) / 8; 141 he->nr_events = (he->nr_events * 7) / 8; 142 } 143 144 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) 145 { 146 u64 prev_period = he->period; 147 148 if (prev_period == 0) 149 return true; 150 151 hist_entry__decay(he); 152 153 if (!he->filtered) 154 hists->stats.total_period -= prev_period - he->period; 155 156 return he->period == 0; 157 } 158 159 static void __hists__decay_entries(struct hists *hists, bool zap_user, 160 bool zap_kernel, bool threaded) 161 { 162 struct rb_node *next = rb_first(&hists->entries); 163 struct hist_entry *n; 164 165 while (next) { 166 n = rb_entry(next, struct hist_entry, rb_node); 167 next = rb_next(&n->rb_node); 168 /* 169 * We may be annotating this, for instance, so keep it here in 170 * case some it gets new samples, we'll eventually free it when 171 * the user stops browsing and it agains gets fully decayed. 172 */ 173 if (((zap_user && n->level == '.') || 174 (zap_kernel && n->level != '.') || 175 hists__decay_entry(hists, n)) && 176 !n->used) { 177 rb_erase(&n->rb_node, &hists->entries); 178 179 if (sort__need_collapse || threaded) 180 rb_erase(&n->rb_node_in, &hists->entries_collapsed); 181 182 hist_entry__free(n); 183 --hists->nr_entries; 184 } 185 } 186 } 187 188 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 189 { 190 return __hists__decay_entries(hists, zap_user, zap_kernel, false); 191 } 192 193 void hists__decay_entries_threaded(struct hists *hists, 194 bool zap_user, bool zap_kernel) 195 { 196 return __hists__decay_entries(hists, zap_user, zap_kernel, true); 197 } 198 199 /* 200 * histogram, sorted on item, collects periods 201 */ 202 203 static struct hist_entry *hist_entry__new(struct hist_entry *template) 204 { 205 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0; 206 struct hist_entry *he = malloc(sizeof(*he) + callchain_size); 207 208 if (he != NULL) { 209 *he = *template; 210 he->nr_events = 1; 211 if (he->ms.map) 212 he->ms.map->referenced = true; 213 if (symbol_conf.use_callchain) 214 callchain_init(he->callchain); 215 } 216 217 return he; 218 } 219 220 static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h) 221 { 222 if (!h->filtered) { 223 hists__calc_col_len(hists, h); 224 ++hists->nr_entries; 225 hists->stats.total_period += h->period; 226 } 227 } 228 229 static u8 symbol__parent_filter(const struct symbol *parent) 230 { 231 if (symbol_conf.exclude_other && parent == NULL) 232 return 1 << HIST_FILTER__PARENT; 233 return 0; 234 } 235 236 static struct hist_entry *add_hist_entry(struct hists *hists, 237 struct hist_entry *entry, 238 struct addr_location *al, 239 u64 period) 240 { 241 struct rb_node **p; 242 struct rb_node *parent = NULL; 243 struct hist_entry *he; 244 int cmp; 245 246 pthread_mutex_lock(&hists->lock); 247 248 p = &hists->entries_in->rb_node; 249 250 while (*p != NULL) { 251 parent = *p; 252 he = rb_entry(parent, struct hist_entry, rb_node_in); 253 254 cmp = hist_entry__cmp(entry, he); 255 256 if (!cmp) { 257 he->period += period; 258 ++he->nr_events; 259 260 /* If the map of an existing hist_entry has 261 * become out-of-date due to an exec() or 262 * similar, update it. Otherwise we will 263 * mis-adjust symbol addresses when computing 264 * the history counter to increment. 265 */ 266 if (he->ms.map != entry->ms.map) { 267 he->ms.map = entry->ms.map; 268 if (he->ms.map) 269 he->ms.map->referenced = true; 270 } 271 goto out; 272 } 273 274 if (cmp < 0) 275 p = &(*p)->rb_left; 276 else 277 p = &(*p)->rb_right; 278 } 279 280 he = hist_entry__new(entry); 281 if (!he) 282 goto out_unlock; 283 284 rb_link_node(&he->rb_node_in, parent, p); 285 rb_insert_color(&he->rb_node_in, hists->entries_in); 286 out: 287 hist_entry__add_cpumode_period(he, al->cpumode, period); 288 out_unlock: 289 pthread_mutex_unlock(&hists->lock); 290 return he; 291 } 292 293 struct hist_entry *__hists__add_branch_entry(struct hists *self, 294 struct addr_location *al, 295 struct symbol *sym_parent, 296 struct branch_info *bi, 297 u64 period) 298 { 299 struct hist_entry entry = { 300 .thread = al->thread, 301 .ms = { 302 .map = bi->to.map, 303 .sym = bi->to.sym, 304 }, 305 .cpu = al->cpu, 306 .ip = bi->to.addr, 307 .level = al->level, 308 .period = period, 309 .parent = sym_parent, 310 .filtered = symbol__parent_filter(sym_parent), 311 .branch_info = bi, 312 }; 313 314 return add_hist_entry(self, &entry, al, period); 315 } 316 317 struct hist_entry *__hists__add_entry(struct hists *self, 318 struct addr_location *al, 319 struct symbol *sym_parent, u64 period) 320 { 321 struct hist_entry entry = { 322 .thread = al->thread, 323 .ms = { 324 .map = al->map, 325 .sym = al->sym, 326 }, 327 .cpu = al->cpu, 328 .ip = al->addr, 329 .level = al->level, 330 .period = period, 331 .parent = sym_parent, 332 .filtered = symbol__parent_filter(sym_parent), 333 }; 334 335 return add_hist_entry(self, &entry, al, period); 336 } 337 338 int64_t 339 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) 340 { 341 struct sort_entry *se; 342 int64_t cmp = 0; 343 344 list_for_each_entry(se, &hist_entry__sort_list, list) { 345 cmp = se->se_cmp(left, right); 346 if (cmp) 347 break; 348 } 349 350 return cmp; 351 } 352 353 int64_t 354 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) 355 { 356 struct sort_entry *se; 357 int64_t cmp = 0; 358 359 list_for_each_entry(se, &hist_entry__sort_list, list) { 360 int64_t (*f)(struct hist_entry *, struct hist_entry *); 361 362 f = se->se_collapse ?: se->se_cmp; 363 364 cmp = f(left, right); 365 if (cmp) 366 break; 367 } 368 369 return cmp; 370 } 371 372 void hist_entry__free(struct hist_entry *he) 373 { 374 free(he); 375 } 376 377 /* 378 * collapse the histogram 379 */ 380 381 static bool hists__collapse_insert_entry(struct hists *hists __used, 382 struct rb_root *root, 383 struct hist_entry *he) 384 { 385 struct rb_node **p = &root->rb_node; 386 struct rb_node *parent = NULL; 387 struct hist_entry *iter; 388 int64_t cmp; 389 390 while (*p != NULL) { 391 parent = *p; 392 iter = rb_entry(parent, struct hist_entry, rb_node_in); 393 394 cmp = hist_entry__collapse(iter, he); 395 396 if (!cmp) { 397 iter->period += he->period; 398 iter->nr_events += he->nr_events; 399 if (symbol_conf.use_callchain) { 400 callchain_cursor_reset(&callchain_cursor); 401 callchain_merge(&callchain_cursor, 402 iter->callchain, 403 he->callchain); 404 } 405 hist_entry__free(he); 406 return false; 407 } 408 409 if (cmp < 0) 410 p = &(*p)->rb_left; 411 else 412 p = &(*p)->rb_right; 413 } 414 415 rb_link_node(&he->rb_node_in, parent, p); 416 rb_insert_color(&he->rb_node_in, root); 417 return true; 418 } 419 420 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists) 421 { 422 struct rb_root *root; 423 424 pthread_mutex_lock(&hists->lock); 425 426 root = hists->entries_in; 427 if (++hists->entries_in > &hists->entries_in_array[1]) 428 hists->entries_in = &hists->entries_in_array[0]; 429 430 pthread_mutex_unlock(&hists->lock); 431 432 return root; 433 } 434 435 static void hists__apply_filters(struct hists *hists, struct hist_entry *he) 436 { 437 hists__filter_entry_by_dso(hists, he); 438 hists__filter_entry_by_thread(hists, he); 439 hists__filter_entry_by_symbol(hists, he); 440 } 441 442 static void __hists__collapse_resort(struct hists *hists, bool threaded) 443 { 444 struct rb_root *root; 445 struct rb_node *next; 446 struct hist_entry *n; 447 448 if (!sort__need_collapse && !threaded) 449 return; 450 451 root = hists__get_rotate_entries_in(hists); 452 next = rb_first(root); 453 454 while (next) { 455 n = rb_entry(next, struct hist_entry, rb_node_in); 456 next = rb_next(&n->rb_node_in); 457 458 rb_erase(&n->rb_node_in, root); 459 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) { 460 /* 461 * If it wasn't combined with one of the entries already 462 * collapsed, we need to apply the filters that may have 463 * been set by, say, the hist_browser. 464 */ 465 hists__apply_filters(hists, n); 466 } 467 } 468 } 469 470 void hists__collapse_resort(struct hists *hists) 471 { 472 return __hists__collapse_resort(hists, false); 473 } 474 475 void hists__collapse_resort_threaded(struct hists *hists) 476 { 477 return __hists__collapse_resort(hists, true); 478 } 479 480 /* 481 * reverse the map, sort on period. 482 */ 483 484 static void __hists__insert_output_entry(struct rb_root *entries, 485 struct hist_entry *he, 486 u64 min_callchain_hits) 487 { 488 struct rb_node **p = &entries->rb_node; 489 struct rb_node *parent = NULL; 490 struct hist_entry *iter; 491 492 if (symbol_conf.use_callchain) 493 callchain_param.sort(&he->sorted_chain, he->callchain, 494 min_callchain_hits, &callchain_param); 495 496 while (*p != NULL) { 497 parent = *p; 498 iter = rb_entry(parent, struct hist_entry, rb_node); 499 500 if (he->period > iter->period) 501 p = &(*p)->rb_left; 502 else 503 p = &(*p)->rb_right; 504 } 505 506 rb_link_node(&he->rb_node, parent, p); 507 rb_insert_color(&he->rb_node, entries); 508 } 509 510 static void __hists__output_resort(struct hists *hists, bool threaded) 511 { 512 struct rb_root *root; 513 struct rb_node *next; 514 struct hist_entry *n; 515 u64 min_callchain_hits; 516 517 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100); 518 519 if (sort__need_collapse || threaded) 520 root = &hists->entries_collapsed; 521 else 522 root = hists->entries_in; 523 524 next = rb_first(root); 525 hists->entries = RB_ROOT; 526 527 hists->nr_entries = 0; 528 hists->stats.total_period = 0; 529 hists__reset_col_len(hists); 530 531 while (next) { 532 n = rb_entry(next, struct hist_entry, rb_node_in); 533 next = rb_next(&n->rb_node_in); 534 535 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits); 536 hists__inc_nr_entries(hists, n); 537 } 538 } 539 540 void hists__output_resort(struct hists *hists) 541 { 542 return __hists__output_resort(hists, false); 543 } 544 545 void hists__output_resort_threaded(struct hists *hists) 546 { 547 return __hists__output_resort(hists, true); 548 } 549 550 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) 551 { 552 int i; 553 int ret = fprintf(fp, " "); 554 555 for (i = 0; i < left_margin; i++) 556 ret += fprintf(fp, " "); 557 558 return ret; 559 } 560 561 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, 562 int left_margin) 563 { 564 int i; 565 size_t ret = callchain__fprintf_left_margin(fp, left_margin); 566 567 for (i = 0; i < depth; i++) 568 if (depth_mask & (1 << i)) 569 ret += fprintf(fp, "| "); 570 else 571 ret += fprintf(fp, " "); 572 573 ret += fprintf(fp, "\n"); 574 575 return ret; 576 } 577 578 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, 579 int depth, int depth_mask, int period, 580 u64 total_samples, u64 hits, 581 int left_margin) 582 { 583 int i; 584 size_t ret = 0; 585 586 ret += callchain__fprintf_left_margin(fp, left_margin); 587 for (i = 0; i < depth; i++) { 588 if (depth_mask & (1 << i)) 589 ret += fprintf(fp, "|"); 590 else 591 ret += fprintf(fp, " "); 592 if (!period && i == depth - 1) { 593 double percent; 594 595 percent = hits * 100.0 / total_samples; 596 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent); 597 } else 598 ret += fprintf(fp, "%s", " "); 599 } 600 if (chain->ms.sym) 601 ret += fprintf(fp, "%s\n", chain->ms.sym->name); 602 else 603 ret += fprintf(fp, "0x%0" PRIx64 "\n", chain->ip); 604 605 return ret; 606 } 607 608 static struct symbol *rem_sq_bracket; 609 static struct callchain_list rem_hits; 610 611 static void init_rem_hits(void) 612 { 613 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6); 614 if (!rem_sq_bracket) { 615 fprintf(stderr, "Not enough memory to display remaining hits\n"); 616 return; 617 } 618 619 strcpy(rem_sq_bracket->name, "[...]"); 620 rem_hits.ms.sym = rem_sq_bracket; 621 } 622 623 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root, 624 u64 total_samples, int depth, 625 int depth_mask, int left_margin) 626 { 627 struct rb_node *node, *next; 628 struct callchain_node *child; 629 struct callchain_list *chain; 630 int new_depth_mask = depth_mask; 631 u64 remaining; 632 size_t ret = 0; 633 int i; 634 uint entries_printed = 0; 635 636 remaining = total_samples; 637 638 node = rb_first(root); 639 while (node) { 640 u64 new_total; 641 u64 cumul; 642 643 child = rb_entry(node, struct callchain_node, rb_node); 644 cumul = callchain_cumul_hits(child); 645 remaining -= cumul; 646 647 /* 648 * The depth mask manages the output of pipes that show 649 * the depth. We don't want to keep the pipes of the current 650 * level for the last child of this depth. 651 * Except if we have remaining filtered hits. They will 652 * supersede the last child 653 */ 654 next = rb_next(node); 655 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining)) 656 new_depth_mask &= ~(1 << (depth - 1)); 657 658 /* 659 * But we keep the older depth mask for the line separator 660 * to keep the level link until we reach the last child 661 */ 662 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, 663 left_margin); 664 i = 0; 665 list_for_each_entry(chain, &child->val, list) { 666 ret += ipchain__fprintf_graph(fp, chain, depth, 667 new_depth_mask, i++, 668 total_samples, 669 cumul, 670 left_margin); 671 } 672 673 if (callchain_param.mode == CHAIN_GRAPH_REL) 674 new_total = child->children_hit; 675 else 676 new_total = total_samples; 677 678 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total, 679 depth + 1, 680 new_depth_mask | (1 << depth), 681 left_margin); 682 node = next; 683 if (++entries_printed == callchain_param.print_limit) 684 break; 685 } 686 687 if (callchain_param.mode == CHAIN_GRAPH_REL && 688 remaining && remaining != total_samples) { 689 690 if (!rem_sq_bracket) 691 return ret; 692 693 new_depth_mask &= ~(1 << (depth - 1)); 694 ret += ipchain__fprintf_graph(fp, &rem_hits, depth, 695 new_depth_mask, 0, total_samples, 696 remaining, left_margin); 697 } 698 699 return ret; 700 } 701 702 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root, 703 u64 total_samples, int left_margin) 704 { 705 struct callchain_node *cnode; 706 struct callchain_list *chain; 707 u32 entries_printed = 0; 708 bool printed = false; 709 struct rb_node *node; 710 int i = 0; 711 int ret; 712 713 /* 714 * If have one single callchain root, don't bother printing 715 * its percentage (100 % in fractal mode and the same percentage 716 * than the hist in graph mode). This also avoid one level of column. 717 */ 718 node = rb_first(root); 719 if (node && !rb_next(node)) { 720 cnode = rb_entry(node, struct callchain_node, rb_node); 721 list_for_each_entry(chain, &cnode->val, list) { 722 /* 723 * If we sort by symbol, the first entry is the same than 724 * the symbol. No need to print it otherwise it appears as 725 * displayed twice. 726 */ 727 if (!i++ && sort__first_dimension == SORT_SYM) 728 continue; 729 if (!printed) { 730 ret += callchain__fprintf_left_margin(fp, left_margin); 731 ret += fprintf(fp, "|\n"); 732 ret += callchain__fprintf_left_margin(fp, left_margin); 733 ret += fprintf(fp, "---"); 734 left_margin += 3; 735 printed = true; 736 } else 737 ret += callchain__fprintf_left_margin(fp, left_margin); 738 739 if (chain->ms.sym) 740 ret += fprintf(fp, " %s\n", chain->ms.sym->name); 741 else 742 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); 743 744 if (++entries_printed == callchain_param.print_limit) 745 break; 746 } 747 root = &cnode->rb_root; 748 } 749 750 return __callchain__fprintf_graph(fp, root, total_samples, 751 1, 1, left_margin); 752 } 753 754 static size_t __callchain__fprintf_flat(FILE *fp, 755 struct callchain_node *self, 756 u64 total_samples) 757 { 758 struct callchain_list *chain; 759 size_t ret = 0; 760 761 if (!self) 762 return 0; 763 764 ret += __callchain__fprintf_flat(fp, self->parent, total_samples); 765 766 767 list_for_each_entry(chain, &self->val, list) { 768 if (chain->ip >= PERF_CONTEXT_MAX) 769 continue; 770 if (chain->ms.sym) 771 ret += fprintf(fp, " %s\n", chain->ms.sym->name); 772 else 773 ret += fprintf(fp, " %p\n", 774 (void *)(long)chain->ip); 775 } 776 777 return ret; 778 } 779 780 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self, 781 u64 total_samples) 782 { 783 size_t ret = 0; 784 u32 entries_printed = 0; 785 struct rb_node *rb_node; 786 struct callchain_node *chain; 787 788 rb_node = rb_first(self); 789 while (rb_node) { 790 double percent; 791 792 chain = rb_entry(rb_node, struct callchain_node, rb_node); 793 percent = chain->hit * 100.0 / total_samples; 794 795 ret = percent_color_fprintf(fp, " %6.2f%%\n", percent); 796 ret += __callchain__fprintf_flat(fp, chain, total_samples); 797 ret += fprintf(fp, "\n"); 798 if (++entries_printed == callchain_param.print_limit) 799 break; 800 801 rb_node = rb_next(rb_node); 802 } 803 804 return ret; 805 } 806 807 static size_t hist_entry_callchain__fprintf(struct hist_entry *he, 808 u64 total_samples, int left_margin, 809 FILE *fp) 810 { 811 switch (callchain_param.mode) { 812 case CHAIN_GRAPH_REL: 813 return callchain__fprintf_graph(fp, &he->sorted_chain, he->period, 814 left_margin); 815 break; 816 case CHAIN_GRAPH_ABS: 817 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples, 818 left_margin); 819 break; 820 case CHAIN_FLAT: 821 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples); 822 break; 823 case CHAIN_NONE: 824 break; 825 default: 826 pr_err("Bad callchain mode\n"); 827 } 828 829 return 0; 830 } 831 832 void hists__output_recalc_col_len(struct hists *hists, int max_rows) 833 { 834 struct rb_node *next = rb_first(&hists->entries); 835 struct hist_entry *n; 836 int row = 0; 837 838 hists__reset_col_len(hists); 839 840 while (next && row++ < max_rows) { 841 n = rb_entry(next, struct hist_entry, rb_node); 842 if (!n->filtered) 843 hists__calc_col_len(hists, n); 844 next = rb_next(&n->rb_node); 845 } 846 } 847 848 static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s, 849 size_t size, struct hists *pair_hists, 850 bool show_displacement, long displacement, 851 bool color, u64 total_period) 852 { 853 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us; 854 u64 nr_events; 855 const char *sep = symbol_conf.field_sep; 856 int ret; 857 858 if (symbol_conf.exclude_other && !he->parent) 859 return 0; 860 861 if (pair_hists) { 862 period = he->pair ? he->pair->period : 0; 863 nr_events = he->pair ? he->pair->nr_events : 0; 864 total = pair_hists->stats.total_period; 865 period_sys = he->pair ? he->pair->period_sys : 0; 866 period_us = he->pair ? he->pair->period_us : 0; 867 period_guest_sys = he->pair ? he->pair->period_guest_sys : 0; 868 period_guest_us = he->pair ? he->pair->period_guest_us : 0; 869 } else { 870 period = he->period; 871 nr_events = he->nr_events; 872 total = total_period; 873 period_sys = he->period_sys; 874 period_us = he->period_us; 875 period_guest_sys = he->period_guest_sys; 876 period_guest_us = he->period_guest_us; 877 } 878 879 if (total) { 880 if (color) 881 ret = percent_color_snprintf(s, size, 882 sep ? "%.2f" : " %6.2f%%", 883 (period * 100.0) / total); 884 else 885 ret = scnprintf(s, size, sep ? "%.2f" : " %6.2f%%", 886 (period * 100.0) / total); 887 if (symbol_conf.show_cpu_utilization) { 888 ret += percent_color_snprintf(s + ret, size - ret, 889 sep ? "%.2f" : " %6.2f%%", 890 (period_sys * 100.0) / total); 891 ret += percent_color_snprintf(s + ret, size - ret, 892 sep ? "%.2f" : " %6.2f%%", 893 (period_us * 100.0) / total); 894 if (perf_guest) { 895 ret += percent_color_snprintf(s + ret, 896 size - ret, 897 sep ? "%.2f" : " %6.2f%%", 898 (period_guest_sys * 100.0) / 899 total); 900 ret += percent_color_snprintf(s + ret, 901 size - ret, 902 sep ? "%.2f" : " %6.2f%%", 903 (period_guest_us * 100.0) / 904 total); 905 } 906 } 907 } else 908 ret = scnprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period); 909 910 if (symbol_conf.show_nr_samples) { 911 if (sep) 912 ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events); 913 else 914 ret += scnprintf(s + ret, size - ret, "%11" PRIu64, nr_events); 915 } 916 917 if (symbol_conf.show_total_period) { 918 if (sep) 919 ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period); 920 else 921 ret += scnprintf(s + ret, size - ret, " %12" PRIu64, period); 922 } 923 924 if (pair_hists) { 925 char bf[32]; 926 double old_percent = 0, new_percent = 0, diff; 927 928 if (total > 0) 929 old_percent = (period * 100.0) / total; 930 if (total_period > 0) 931 new_percent = (he->period * 100.0) / total_period; 932 933 diff = new_percent - old_percent; 934 935 if (fabs(diff) >= 0.01) 936 scnprintf(bf, sizeof(bf), "%+4.2F%%", diff); 937 else 938 scnprintf(bf, sizeof(bf), " "); 939 940 if (sep) 941 ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf); 942 else 943 ret += scnprintf(s + ret, size - ret, "%11.11s", bf); 944 945 if (show_displacement) { 946 if (displacement) 947 scnprintf(bf, sizeof(bf), "%+4ld", displacement); 948 else 949 scnprintf(bf, sizeof(bf), " "); 950 951 if (sep) 952 ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf); 953 else 954 ret += scnprintf(s + ret, size - ret, "%6.6s", bf); 955 } 956 } 957 958 return ret; 959 } 960 961 int hist_entry__snprintf(struct hist_entry *he, char *s, size_t size, 962 struct hists *hists) 963 { 964 const char *sep = symbol_conf.field_sep; 965 struct sort_entry *se; 966 int ret = 0; 967 968 list_for_each_entry(se, &hist_entry__sort_list, list) { 969 if (se->elide) 970 continue; 971 972 ret += scnprintf(s + ret, size - ret, "%s", sep ?: " "); 973 ret += se->se_snprintf(he, s + ret, size - ret, 974 hists__col_len(hists, se->se_width_idx)); 975 } 976 977 return ret; 978 } 979 980 static int hist_entry__fprintf(struct hist_entry *he, size_t size, 981 struct hists *hists, struct hists *pair_hists, 982 bool show_displacement, long displacement, 983 u64 total_period, FILE *fp) 984 { 985 char bf[512]; 986 int ret; 987 988 if (size == 0 || size > sizeof(bf)) 989 size = sizeof(bf); 990 991 ret = hist_entry__pcnt_snprintf(he, bf, size, pair_hists, 992 show_displacement, displacement, 993 true, total_period); 994 hist_entry__snprintf(he, bf + ret, size - ret, hists); 995 return fprintf(fp, "%s\n", bf); 996 } 997 998 static size_t hist_entry__fprintf_callchain(struct hist_entry *he, 999 struct hists *hists, 1000 u64 total_period, FILE *fp) 1001 { 1002 int left_margin = 0; 1003 1004 if (sort__first_dimension == SORT_COMM) { 1005 struct sort_entry *se = list_first_entry(&hist_entry__sort_list, 1006 typeof(*se), list); 1007 left_margin = hists__col_len(hists, se->se_width_idx); 1008 left_margin -= thread__comm_len(he->thread); 1009 } 1010 1011 return hist_entry_callchain__fprintf(he, total_period, left_margin, fp); 1012 } 1013 1014 size_t hists__fprintf(struct hists *hists, struct hists *pair, 1015 bool show_displacement, bool show_header, int max_rows, 1016 int max_cols, FILE *fp) 1017 { 1018 struct sort_entry *se; 1019 struct rb_node *nd; 1020 size_t ret = 0; 1021 u64 total_period; 1022 unsigned long position = 1; 1023 long displacement = 0; 1024 unsigned int width; 1025 const char *sep = symbol_conf.field_sep; 1026 const char *col_width = symbol_conf.col_width_list_str; 1027 int nr_rows = 0; 1028 1029 init_rem_hits(); 1030 1031 if (!show_header) 1032 goto print_entries; 1033 1034 fprintf(fp, "# %s", pair ? "Baseline" : "Overhead"); 1035 1036 if (symbol_conf.show_cpu_utilization) { 1037 if (sep) { 1038 ret += fprintf(fp, "%csys", *sep); 1039 ret += fprintf(fp, "%cus", *sep); 1040 if (perf_guest) { 1041 ret += fprintf(fp, "%cguest sys", *sep); 1042 ret += fprintf(fp, "%cguest us", *sep); 1043 } 1044 } else { 1045 ret += fprintf(fp, " sys "); 1046 ret += fprintf(fp, " us "); 1047 if (perf_guest) { 1048 ret += fprintf(fp, " guest sys "); 1049 ret += fprintf(fp, " guest us "); 1050 } 1051 } 1052 } 1053 1054 if (symbol_conf.show_nr_samples) { 1055 if (sep) 1056 fprintf(fp, "%cSamples", *sep); 1057 else 1058 fputs(" Samples ", fp); 1059 } 1060 1061 if (symbol_conf.show_total_period) { 1062 if (sep) 1063 ret += fprintf(fp, "%cPeriod", *sep); 1064 else 1065 ret += fprintf(fp, " Period "); 1066 } 1067 1068 if (pair) { 1069 if (sep) 1070 ret += fprintf(fp, "%cDelta", *sep); 1071 else 1072 ret += fprintf(fp, " Delta "); 1073 1074 if (show_displacement) { 1075 if (sep) 1076 ret += fprintf(fp, "%cDisplacement", *sep); 1077 else 1078 ret += fprintf(fp, " Displ"); 1079 } 1080 } 1081 1082 list_for_each_entry(se, &hist_entry__sort_list, list) { 1083 if (se->elide) 1084 continue; 1085 if (sep) { 1086 fprintf(fp, "%c%s", *sep, se->se_header); 1087 continue; 1088 } 1089 width = strlen(se->se_header); 1090 if (symbol_conf.col_width_list_str) { 1091 if (col_width) { 1092 hists__set_col_len(hists, se->se_width_idx, 1093 atoi(col_width)); 1094 col_width = strchr(col_width, ','); 1095 if (col_width) 1096 ++col_width; 1097 } 1098 } 1099 if (!hists__new_col_len(hists, se->se_width_idx, width)) 1100 width = hists__col_len(hists, se->se_width_idx); 1101 fprintf(fp, " %*s", width, se->se_header); 1102 } 1103 1104 fprintf(fp, "\n"); 1105 if (max_rows && ++nr_rows >= max_rows) 1106 goto out; 1107 1108 if (sep) 1109 goto print_entries; 1110 1111 fprintf(fp, "# ........"); 1112 if (symbol_conf.show_cpu_utilization) 1113 fprintf(fp, " ....... ......."); 1114 if (symbol_conf.show_nr_samples) 1115 fprintf(fp, " .........."); 1116 if (symbol_conf.show_total_period) 1117 fprintf(fp, " ............"); 1118 if (pair) { 1119 fprintf(fp, " .........."); 1120 if (show_displacement) 1121 fprintf(fp, " ....."); 1122 } 1123 list_for_each_entry(se, &hist_entry__sort_list, list) { 1124 unsigned int i; 1125 1126 if (se->elide) 1127 continue; 1128 1129 fprintf(fp, " "); 1130 width = hists__col_len(hists, se->se_width_idx); 1131 if (width == 0) 1132 width = strlen(se->se_header); 1133 for (i = 0; i < width; i++) 1134 fprintf(fp, "."); 1135 } 1136 1137 fprintf(fp, "\n"); 1138 if (max_rows && ++nr_rows >= max_rows) 1139 goto out; 1140 1141 fprintf(fp, "#\n"); 1142 if (max_rows && ++nr_rows >= max_rows) 1143 goto out; 1144 1145 print_entries: 1146 total_period = hists->stats.total_period; 1147 1148 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 1149 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 1150 1151 if (h->filtered) 1152 continue; 1153 1154 if (show_displacement) { 1155 if (h->pair != NULL) 1156 displacement = ((long)h->pair->position - 1157 (long)position); 1158 else 1159 displacement = 0; 1160 ++position; 1161 } 1162 ret += hist_entry__fprintf(h, max_cols, hists, pair, show_displacement, 1163 displacement, total_period, fp); 1164 1165 if (symbol_conf.use_callchain) 1166 ret += hist_entry__fprintf_callchain(h, hists, total_period, fp); 1167 if (max_rows && ++nr_rows >= max_rows) 1168 goto out; 1169 1170 if (h->ms.map == NULL && verbose > 1) { 1171 __map_groups__fprintf_maps(&h->thread->mg, 1172 MAP__FUNCTION, verbose, fp); 1173 fprintf(fp, "%.10s end\n", graph_dotted_line); 1174 } 1175 } 1176 out: 1177 free(rem_sq_bracket); 1178 1179 return ret; 1180 } 1181 1182 /* 1183 * See hists__fprintf to match the column widths 1184 */ 1185 unsigned int hists__sort_list_width(struct hists *hists) 1186 { 1187 struct sort_entry *se; 1188 int ret = 9; /* total % */ 1189 1190 if (symbol_conf.show_cpu_utilization) { 1191 ret += 7; /* count_sys % */ 1192 ret += 6; /* count_us % */ 1193 if (perf_guest) { 1194 ret += 13; /* count_guest_sys % */ 1195 ret += 12; /* count_guest_us % */ 1196 } 1197 } 1198 1199 if (symbol_conf.show_nr_samples) 1200 ret += 11; 1201 1202 if (symbol_conf.show_total_period) 1203 ret += 13; 1204 1205 list_for_each_entry(se, &hist_entry__sort_list, list) 1206 if (!se->elide) 1207 ret += 2 + hists__col_len(hists, se->se_width_idx); 1208 1209 if (verbose) /* Addr + origin */ 1210 ret += 3 + BITS_PER_LONG / 4; 1211 1212 return ret; 1213 } 1214 1215 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, 1216 enum hist_filter filter) 1217 { 1218 h->filtered &= ~(1 << filter); 1219 if (h->filtered) 1220 return; 1221 1222 ++hists->nr_entries; 1223 if (h->ms.unfolded) 1224 hists->nr_entries += h->nr_rows; 1225 h->row_offset = 0; 1226 hists->stats.total_period += h->period; 1227 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events; 1228 1229 hists__calc_col_len(hists, h); 1230 } 1231 1232 1233 static bool hists__filter_entry_by_dso(struct hists *hists, 1234 struct hist_entry *he) 1235 { 1236 if (hists->dso_filter != NULL && 1237 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { 1238 he->filtered |= (1 << HIST_FILTER__DSO); 1239 return true; 1240 } 1241 1242 return false; 1243 } 1244 1245 void hists__filter_by_dso(struct hists *hists) 1246 { 1247 struct rb_node *nd; 1248 1249 hists->nr_entries = hists->stats.total_period = 0; 1250 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; 1251 hists__reset_col_len(hists); 1252 1253 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 1254 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 1255 1256 if (symbol_conf.exclude_other && !h->parent) 1257 continue; 1258 1259 if (hists__filter_entry_by_dso(hists, h)) 1260 continue; 1261 1262 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO); 1263 } 1264 } 1265 1266 static bool hists__filter_entry_by_thread(struct hists *hists, 1267 struct hist_entry *he) 1268 { 1269 if (hists->thread_filter != NULL && 1270 he->thread != hists->thread_filter) { 1271 he->filtered |= (1 << HIST_FILTER__THREAD); 1272 return true; 1273 } 1274 1275 return false; 1276 } 1277 1278 void hists__filter_by_thread(struct hists *hists) 1279 { 1280 struct rb_node *nd; 1281 1282 hists->nr_entries = hists->stats.total_period = 0; 1283 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; 1284 hists__reset_col_len(hists); 1285 1286 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 1287 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 1288 1289 if (hists__filter_entry_by_thread(hists, h)) 1290 continue; 1291 1292 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD); 1293 } 1294 } 1295 1296 static bool hists__filter_entry_by_symbol(struct hists *hists, 1297 struct hist_entry *he) 1298 { 1299 if (hists->symbol_filter_str != NULL && 1300 (!he->ms.sym || strstr(he->ms.sym->name, 1301 hists->symbol_filter_str) == NULL)) { 1302 he->filtered |= (1 << HIST_FILTER__SYMBOL); 1303 return true; 1304 } 1305 1306 return false; 1307 } 1308 1309 void hists__filter_by_symbol(struct hists *hists) 1310 { 1311 struct rb_node *nd; 1312 1313 hists->nr_entries = hists->stats.total_period = 0; 1314 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; 1315 hists__reset_col_len(hists); 1316 1317 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 1318 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 1319 1320 if (hists__filter_entry_by_symbol(hists, h)) 1321 continue; 1322 1323 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL); 1324 } 1325 } 1326 1327 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip) 1328 { 1329 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip); 1330 } 1331 1332 int hist_entry__annotate(struct hist_entry *he, size_t privsize) 1333 { 1334 return symbol__annotate(he->ms.sym, he->ms.map, privsize); 1335 } 1336 1337 void hists__inc_nr_events(struct hists *hists, u32 type) 1338 { 1339 ++hists->stats.nr_events[0]; 1340 ++hists->stats.nr_events[type]; 1341 } 1342 1343 size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp) 1344 { 1345 int i; 1346 size_t ret = 0; 1347 1348 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { 1349 const char *name; 1350 1351 if (hists->stats.nr_events[i] == 0) 1352 continue; 1353 1354 name = perf_event__name(i); 1355 if (!strcmp(name, "UNKNOWN")) 1356 continue; 1357 1358 ret += fprintf(fp, "%16s events: %10d\n", name, 1359 hists->stats.nr_events[i]); 1360 } 1361 1362 return ret; 1363 } 1364