1 #include "annotate.h" 2 #include "util.h" 3 #include "build-id.h" 4 #include "hist.h" 5 #include "session.h" 6 #include "sort.h" 7 #include <math.h> 8 9 static bool hists__filter_entry_by_dso(struct hists *hists, 10 struct hist_entry *he); 11 static bool hists__filter_entry_by_thread(struct hists *hists, 12 struct hist_entry *he); 13 14 enum hist_filter { 15 HIST_FILTER__DSO, 16 HIST_FILTER__THREAD, 17 HIST_FILTER__PARENT, 18 }; 19 20 struct callchain_param callchain_param = { 21 .mode = CHAIN_GRAPH_REL, 22 .min_percent = 0.5, 23 .order = ORDER_CALLEE 24 }; 25 26 u16 hists__col_len(struct hists *hists, enum hist_column col) 27 { 28 return hists->col_len[col]; 29 } 30 31 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) 32 { 33 hists->col_len[col] = len; 34 } 35 36 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) 37 { 38 if (len > hists__col_len(hists, col)) { 39 hists__set_col_len(hists, col, len); 40 return true; 41 } 42 return false; 43 } 44 45 static void hists__reset_col_len(struct hists *hists) 46 { 47 enum hist_column col; 48 49 for (col = 0; col < HISTC_NR_COLS; ++col) 50 hists__set_col_len(hists, col, 0); 51 } 52 53 static void hists__set_unres_dso_col_len(struct hists *hists, int dso) 54 { 55 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 56 57 if (hists__col_len(hists, dso) < unresolved_col_width && 58 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 59 !symbol_conf.dso_list) 60 hists__set_col_len(hists, dso, unresolved_col_width); 61 } 62 63 static void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 64 { 65 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 66 u16 len; 67 68 if (h->ms.sym) 69 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4); 70 else 71 hists__set_unres_dso_col_len(hists, HISTC_DSO); 72 73 len = thread__comm_len(h->thread); 74 if (hists__new_col_len(hists, HISTC_COMM, len)) 75 hists__set_col_len(hists, HISTC_THREAD, len + 6); 76 77 if (h->ms.map) { 78 len = dso__name_len(h->ms.map->dso); 79 hists__new_col_len(hists, HISTC_DSO, len); 80 } 81 82 if (h->branch_info) { 83 int symlen; 84 /* 85 * +4 accounts for '[x] ' priv level info 86 * +2 account of 0x prefix on raw addresses 87 */ 88 if (h->branch_info->from.sym) { 89 symlen = (int)h->branch_info->from.sym->namelen + 4; 90 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 91 92 symlen = dso__name_len(h->branch_info->from.map->dso); 93 hists__new_col_len(hists, HISTC_DSO_FROM, symlen); 94 } else { 95 symlen = unresolved_col_width + 4 + 2; 96 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 97 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); 98 } 99 100 if (h->branch_info->to.sym) { 101 symlen = (int)h->branch_info->to.sym->namelen + 4; 102 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 103 104 symlen = dso__name_len(h->branch_info->to.map->dso); 105 hists__new_col_len(hists, HISTC_DSO_TO, symlen); 106 } else { 107 symlen = unresolved_col_width + 4 + 2; 108 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 109 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 110 } 111 } 112 } 113 114 static void hist_entry__add_cpumode_period(struct hist_entry *he, 115 unsigned int cpumode, u64 period) 116 { 117 switch (cpumode) { 118 case PERF_RECORD_MISC_KERNEL: 119 he->period_sys += period; 120 break; 121 case PERF_RECORD_MISC_USER: 122 he->period_us += period; 123 break; 124 case PERF_RECORD_MISC_GUEST_KERNEL: 125 he->period_guest_sys += period; 126 break; 127 case PERF_RECORD_MISC_GUEST_USER: 128 he->period_guest_us += period; 129 break; 130 default: 131 break; 132 } 133 } 134 135 static void hist_entry__decay(struct hist_entry *he) 136 { 137 he->period = (he->period * 7) / 8; 138 he->nr_events = (he->nr_events * 7) / 8; 139 } 140 141 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) 142 { 143 u64 prev_period = he->period; 144 145 if (prev_period == 0) 146 return true; 147 148 hist_entry__decay(he); 149 150 if (!he->filtered) 151 hists->stats.total_period -= prev_period - he->period; 152 153 return he->period == 0; 154 } 155 156 static void __hists__decay_entries(struct hists *hists, bool zap_user, 157 bool zap_kernel, bool threaded) 158 { 159 struct rb_node *next = rb_first(&hists->entries); 160 struct hist_entry *n; 161 162 while (next) { 163 n = rb_entry(next, struct hist_entry, rb_node); 164 next = rb_next(&n->rb_node); 165 /* 166 * We may be annotating this, for instance, so keep it here in 167 * case some it gets new samples, we'll eventually free it when 168 * the user stops browsing and it agains gets fully decayed. 169 */ 170 if (((zap_user && n->level == '.') || 171 (zap_kernel && n->level != '.') || 172 hists__decay_entry(hists, n)) && 173 !n->used) { 174 rb_erase(&n->rb_node, &hists->entries); 175 176 if (sort__need_collapse || threaded) 177 rb_erase(&n->rb_node_in, &hists->entries_collapsed); 178 179 hist_entry__free(n); 180 --hists->nr_entries; 181 } 182 } 183 } 184 185 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 186 { 187 return __hists__decay_entries(hists, zap_user, zap_kernel, false); 188 } 189 190 void hists__decay_entries_threaded(struct hists *hists, 191 bool zap_user, bool zap_kernel) 192 { 193 return __hists__decay_entries(hists, zap_user, zap_kernel, true); 194 } 195 196 /* 197 * histogram, sorted on item, collects periods 198 */ 199 200 static struct hist_entry *hist_entry__new(struct hist_entry *template) 201 { 202 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0; 203 struct hist_entry *he = malloc(sizeof(*he) + callchain_size); 204 205 if (he != NULL) { 206 *he = *template; 207 he->nr_events = 1; 208 if (he->ms.map) 209 he->ms.map->referenced = true; 210 if (symbol_conf.use_callchain) 211 callchain_init(he->callchain); 212 } 213 214 return he; 215 } 216 217 static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h) 218 { 219 if (!h->filtered) { 220 hists__calc_col_len(hists, h); 221 ++hists->nr_entries; 222 hists->stats.total_period += h->period; 223 } 224 } 225 226 static u8 symbol__parent_filter(const struct symbol *parent) 227 { 228 if (symbol_conf.exclude_other && parent == NULL) 229 return 1 << HIST_FILTER__PARENT; 230 return 0; 231 } 232 233 static struct hist_entry *add_hist_entry(struct hists *hists, 234 struct hist_entry *entry, 235 struct addr_location *al, 236 u64 period) 237 { 238 struct rb_node **p; 239 struct rb_node *parent = NULL; 240 struct hist_entry *he; 241 int cmp; 242 243 pthread_mutex_lock(&hists->lock); 244 245 p = &hists->entries_in->rb_node; 246 247 while (*p != NULL) { 248 parent = *p; 249 he = rb_entry(parent, struct hist_entry, rb_node_in); 250 251 cmp = hist_entry__cmp(entry, he); 252 253 if (!cmp) { 254 he->period += period; 255 ++he->nr_events; 256 goto out; 257 } 258 259 if (cmp < 0) 260 p = &(*p)->rb_left; 261 else 262 p = &(*p)->rb_right; 263 } 264 265 he = hist_entry__new(entry); 266 if (!he) 267 goto out_unlock; 268 269 rb_link_node(&he->rb_node_in, parent, p); 270 rb_insert_color(&he->rb_node_in, hists->entries_in); 271 out: 272 hist_entry__add_cpumode_period(he, al->cpumode, period); 273 out_unlock: 274 pthread_mutex_unlock(&hists->lock); 275 return he; 276 } 277 278 struct hist_entry *__hists__add_branch_entry(struct hists *self, 279 struct addr_location *al, 280 struct symbol *sym_parent, 281 struct branch_info *bi, 282 u64 period) 283 { 284 struct hist_entry entry = { 285 .thread = al->thread, 286 .ms = { 287 .map = bi->to.map, 288 .sym = bi->to.sym, 289 }, 290 .cpu = al->cpu, 291 .ip = bi->to.addr, 292 .level = al->level, 293 .period = period, 294 .parent = sym_parent, 295 .filtered = symbol__parent_filter(sym_parent), 296 .branch_info = bi, 297 }; 298 299 return add_hist_entry(self, &entry, al, period); 300 } 301 302 struct hist_entry *__hists__add_entry(struct hists *self, 303 struct addr_location *al, 304 struct symbol *sym_parent, u64 period) 305 { 306 struct hist_entry entry = { 307 .thread = al->thread, 308 .ms = { 309 .map = al->map, 310 .sym = al->sym, 311 }, 312 .cpu = al->cpu, 313 .ip = al->addr, 314 .level = al->level, 315 .period = period, 316 .parent = sym_parent, 317 .filtered = symbol__parent_filter(sym_parent), 318 }; 319 320 return add_hist_entry(self, &entry, al, period); 321 } 322 323 int64_t 324 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) 325 { 326 struct sort_entry *se; 327 int64_t cmp = 0; 328 329 list_for_each_entry(se, &hist_entry__sort_list, list) { 330 cmp = se->se_cmp(left, right); 331 if (cmp) 332 break; 333 } 334 335 return cmp; 336 } 337 338 int64_t 339 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) 340 { 341 struct sort_entry *se; 342 int64_t cmp = 0; 343 344 list_for_each_entry(se, &hist_entry__sort_list, list) { 345 int64_t (*f)(struct hist_entry *, struct hist_entry *); 346 347 f = se->se_collapse ?: se->se_cmp; 348 349 cmp = f(left, right); 350 if (cmp) 351 break; 352 } 353 354 return cmp; 355 } 356 357 void hist_entry__free(struct hist_entry *he) 358 { 359 free(he); 360 } 361 362 /* 363 * collapse the histogram 364 */ 365 366 static bool hists__collapse_insert_entry(struct hists *hists, 367 struct rb_root *root, 368 struct hist_entry *he) 369 { 370 struct rb_node **p = &root->rb_node; 371 struct rb_node *parent = NULL; 372 struct hist_entry *iter; 373 int64_t cmp; 374 375 while (*p != NULL) { 376 parent = *p; 377 iter = rb_entry(parent, struct hist_entry, rb_node_in); 378 379 cmp = hist_entry__collapse(iter, he); 380 381 if (!cmp) { 382 iter->period += he->period; 383 iter->nr_events += he->nr_events; 384 if (symbol_conf.use_callchain) { 385 callchain_cursor_reset(&hists->callchain_cursor); 386 callchain_merge(&hists->callchain_cursor, iter->callchain, 387 he->callchain); 388 } 389 hist_entry__free(he); 390 return false; 391 } 392 393 if (cmp < 0) 394 p = &(*p)->rb_left; 395 else 396 p = &(*p)->rb_right; 397 } 398 399 rb_link_node(&he->rb_node_in, parent, p); 400 rb_insert_color(&he->rb_node_in, root); 401 return true; 402 } 403 404 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists) 405 { 406 struct rb_root *root; 407 408 pthread_mutex_lock(&hists->lock); 409 410 root = hists->entries_in; 411 if (++hists->entries_in > &hists->entries_in_array[1]) 412 hists->entries_in = &hists->entries_in_array[0]; 413 414 pthread_mutex_unlock(&hists->lock); 415 416 return root; 417 } 418 419 static void hists__apply_filters(struct hists *hists, struct hist_entry *he) 420 { 421 hists__filter_entry_by_dso(hists, he); 422 hists__filter_entry_by_thread(hists, he); 423 } 424 425 static void __hists__collapse_resort(struct hists *hists, bool threaded) 426 { 427 struct rb_root *root; 428 struct rb_node *next; 429 struct hist_entry *n; 430 431 if (!sort__need_collapse && !threaded) 432 return; 433 434 root = hists__get_rotate_entries_in(hists); 435 next = rb_first(root); 436 437 while (next) { 438 n = rb_entry(next, struct hist_entry, rb_node_in); 439 next = rb_next(&n->rb_node_in); 440 441 rb_erase(&n->rb_node_in, root); 442 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) { 443 /* 444 * If it wasn't combined with one of the entries already 445 * collapsed, we need to apply the filters that may have 446 * been set by, say, the hist_browser. 447 */ 448 hists__apply_filters(hists, n); 449 } 450 } 451 } 452 453 void hists__collapse_resort(struct hists *hists) 454 { 455 return __hists__collapse_resort(hists, false); 456 } 457 458 void hists__collapse_resort_threaded(struct hists *hists) 459 { 460 return __hists__collapse_resort(hists, true); 461 } 462 463 /* 464 * reverse the map, sort on period. 465 */ 466 467 static void __hists__insert_output_entry(struct rb_root *entries, 468 struct hist_entry *he, 469 u64 min_callchain_hits) 470 { 471 struct rb_node **p = &entries->rb_node; 472 struct rb_node *parent = NULL; 473 struct hist_entry *iter; 474 475 if (symbol_conf.use_callchain) 476 callchain_param.sort(&he->sorted_chain, he->callchain, 477 min_callchain_hits, &callchain_param); 478 479 while (*p != NULL) { 480 parent = *p; 481 iter = rb_entry(parent, struct hist_entry, rb_node); 482 483 if (he->period > iter->period) 484 p = &(*p)->rb_left; 485 else 486 p = &(*p)->rb_right; 487 } 488 489 rb_link_node(&he->rb_node, parent, p); 490 rb_insert_color(&he->rb_node, entries); 491 } 492 493 static void __hists__output_resort(struct hists *hists, bool threaded) 494 { 495 struct rb_root *root; 496 struct rb_node *next; 497 struct hist_entry *n; 498 u64 min_callchain_hits; 499 500 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100); 501 502 if (sort__need_collapse || threaded) 503 root = &hists->entries_collapsed; 504 else 505 root = hists->entries_in; 506 507 next = rb_first(root); 508 hists->entries = RB_ROOT; 509 510 hists->nr_entries = 0; 511 hists->stats.total_period = 0; 512 hists__reset_col_len(hists); 513 514 while (next) { 515 n = rb_entry(next, struct hist_entry, rb_node_in); 516 next = rb_next(&n->rb_node_in); 517 518 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits); 519 hists__inc_nr_entries(hists, n); 520 } 521 } 522 523 void hists__output_resort(struct hists *hists) 524 { 525 return __hists__output_resort(hists, false); 526 } 527 528 void hists__output_resort_threaded(struct hists *hists) 529 { 530 return __hists__output_resort(hists, true); 531 } 532 533 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) 534 { 535 int i; 536 int ret = fprintf(fp, " "); 537 538 for (i = 0; i < left_margin; i++) 539 ret += fprintf(fp, " "); 540 541 return ret; 542 } 543 544 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, 545 int left_margin) 546 { 547 int i; 548 size_t ret = callchain__fprintf_left_margin(fp, left_margin); 549 550 for (i = 0; i < depth; i++) 551 if (depth_mask & (1 << i)) 552 ret += fprintf(fp, "| "); 553 else 554 ret += fprintf(fp, " "); 555 556 ret += fprintf(fp, "\n"); 557 558 return ret; 559 } 560 561 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, 562 int depth, int depth_mask, int period, 563 u64 total_samples, u64 hits, 564 int left_margin) 565 { 566 int i; 567 size_t ret = 0; 568 569 ret += callchain__fprintf_left_margin(fp, left_margin); 570 for (i = 0; i < depth; i++) { 571 if (depth_mask & (1 << i)) 572 ret += fprintf(fp, "|"); 573 else 574 ret += fprintf(fp, " "); 575 if (!period && i == depth - 1) { 576 double percent; 577 578 percent = hits * 100.0 / total_samples; 579 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent); 580 } else 581 ret += fprintf(fp, "%s", " "); 582 } 583 if (chain->ms.sym) 584 ret += fprintf(fp, "%s\n", chain->ms.sym->name); 585 else 586 ret += fprintf(fp, "%p\n", (void *)(long)chain->ip); 587 588 return ret; 589 } 590 591 static struct symbol *rem_sq_bracket; 592 static struct callchain_list rem_hits; 593 594 static void init_rem_hits(void) 595 { 596 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6); 597 if (!rem_sq_bracket) { 598 fprintf(stderr, "Not enough memory to display remaining hits\n"); 599 return; 600 } 601 602 strcpy(rem_sq_bracket->name, "[...]"); 603 rem_hits.ms.sym = rem_sq_bracket; 604 } 605 606 static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, 607 u64 total_samples, int depth, 608 int depth_mask, int left_margin) 609 { 610 struct rb_node *node, *next; 611 struct callchain_node *child; 612 struct callchain_list *chain; 613 int new_depth_mask = depth_mask; 614 u64 new_total; 615 u64 remaining; 616 size_t ret = 0; 617 int i; 618 uint entries_printed = 0; 619 620 if (callchain_param.mode == CHAIN_GRAPH_REL) 621 new_total = self->children_hit; 622 else 623 new_total = total_samples; 624 625 remaining = new_total; 626 627 node = rb_first(&self->rb_root); 628 while (node) { 629 u64 cumul; 630 631 child = rb_entry(node, struct callchain_node, rb_node); 632 cumul = callchain_cumul_hits(child); 633 remaining -= cumul; 634 635 /* 636 * The depth mask manages the output of pipes that show 637 * the depth. We don't want to keep the pipes of the current 638 * level for the last child of this depth. 639 * Except if we have remaining filtered hits. They will 640 * supersede the last child 641 */ 642 next = rb_next(node); 643 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining)) 644 new_depth_mask &= ~(1 << (depth - 1)); 645 646 /* 647 * But we keep the older depth mask for the line separator 648 * to keep the level link until we reach the last child 649 */ 650 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, 651 left_margin); 652 i = 0; 653 list_for_each_entry(chain, &child->val, list) { 654 ret += ipchain__fprintf_graph(fp, chain, depth, 655 new_depth_mask, i++, 656 new_total, 657 cumul, 658 left_margin); 659 } 660 ret += __callchain__fprintf_graph(fp, child, new_total, 661 depth + 1, 662 new_depth_mask | (1 << depth), 663 left_margin); 664 node = next; 665 if (++entries_printed == callchain_param.print_limit) 666 break; 667 } 668 669 if (callchain_param.mode == CHAIN_GRAPH_REL && 670 remaining && remaining != new_total) { 671 672 if (!rem_sq_bracket) 673 return ret; 674 675 new_depth_mask &= ~(1 << (depth - 1)); 676 677 ret += ipchain__fprintf_graph(fp, &rem_hits, depth, 678 new_depth_mask, 0, new_total, 679 remaining, left_margin); 680 } 681 682 return ret; 683 } 684 685 static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self, 686 u64 total_samples, int left_margin) 687 { 688 struct callchain_list *chain; 689 bool printed = false; 690 int i = 0; 691 int ret = 0; 692 u32 entries_printed = 0; 693 694 list_for_each_entry(chain, &self->val, list) { 695 if (!i++ && sort__first_dimension == SORT_SYM) 696 continue; 697 698 if (!printed) { 699 ret += callchain__fprintf_left_margin(fp, left_margin); 700 ret += fprintf(fp, "|\n"); 701 ret += callchain__fprintf_left_margin(fp, left_margin); 702 ret += fprintf(fp, "---"); 703 704 left_margin += 3; 705 printed = true; 706 } else 707 ret += callchain__fprintf_left_margin(fp, left_margin); 708 709 if (chain->ms.sym) 710 ret += fprintf(fp, " %s\n", chain->ms.sym->name); 711 else 712 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); 713 714 if (++entries_printed == callchain_param.print_limit) 715 break; 716 } 717 718 ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin); 719 720 return ret; 721 } 722 723 static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self, 724 u64 total_samples) 725 { 726 struct callchain_list *chain; 727 size_t ret = 0; 728 729 if (!self) 730 return 0; 731 732 ret += callchain__fprintf_flat(fp, self->parent, total_samples); 733 734 735 list_for_each_entry(chain, &self->val, list) { 736 if (chain->ip >= PERF_CONTEXT_MAX) 737 continue; 738 if (chain->ms.sym) 739 ret += fprintf(fp, " %s\n", chain->ms.sym->name); 740 else 741 ret += fprintf(fp, " %p\n", 742 (void *)(long)chain->ip); 743 } 744 745 return ret; 746 } 747 748 static size_t hist_entry_callchain__fprintf(struct hist_entry *he, 749 u64 total_samples, int left_margin, 750 FILE *fp) 751 { 752 struct rb_node *rb_node; 753 struct callchain_node *chain; 754 size_t ret = 0; 755 u32 entries_printed = 0; 756 757 rb_node = rb_first(&he->sorted_chain); 758 while (rb_node) { 759 double percent; 760 761 chain = rb_entry(rb_node, struct callchain_node, rb_node); 762 percent = chain->hit * 100.0 / total_samples; 763 switch (callchain_param.mode) { 764 case CHAIN_FLAT: 765 ret += percent_color_fprintf(fp, " %6.2f%%\n", 766 percent); 767 ret += callchain__fprintf_flat(fp, chain, total_samples); 768 break; 769 case CHAIN_GRAPH_ABS: /* Falldown */ 770 case CHAIN_GRAPH_REL: 771 ret += callchain__fprintf_graph(fp, chain, total_samples, 772 left_margin); 773 case CHAIN_NONE: 774 default: 775 break; 776 } 777 ret += fprintf(fp, "\n"); 778 if (++entries_printed == callchain_param.print_limit) 779 break; 780 rb_node = rb_next(rb_node); 781 } 782 783 return ret; 784 } 785 786 void hists__output_recalc_col_len(struct hists *hists, int max_rows) 787 { 788 struct rb_node *next = rb_first(&hists->entries); 789 struct hist_entry *n; 790 int row = 0; 791 792 hists__reset_col_len(hists); 793 794 while (next && row++ < max_rows) { 795 n = rb_entry(next, struct hist_entry, rb_node); 796 if (!n->filtered) 797 hists__calc_col_len(hists, n); 798 next = rb_next(&n->rb_node); 799 } 800 } 801 802 static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s, 803 size_t size, struct hists *pair_hists, 804 bool show_displacement, long displacement, 805 bool color, u64 total_period) 806 { 807 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us; 808 u64 nr_events; 809 const char *sep = symbol_conf.field_sep; 810 int ret; 811 812 if (symbol_conf.exclude_other && !he->parent) 813 return 0; 814 815 if (pair_hists) { 816 period = he->pair ? he->pair->period : 0; 817 nr_events = he->pair ? he->pair->nr_events : 0; 818 total = pair_hists->stats.total_period; 819 period_sys = he->pair ? he->pair->period_sys : 0; 820 period_us = he->pair ? he->pair->period_us : 0; 821 period_guest_sys = he->pair ? he->pair->period_guest_sys : 0; 822 period_guest_us = he->pair ? he->pair->period_guest_us : 0; 823 } else { 824 period = he->period; 825 nr_events = he->nr_events; 826 total = total_period; 827 period_sys = he->period_sys; 828 period_us = he->period_us; 829 period_guest_sys = he->period_guest_sys; 830 period_guest_us = he->period_guest_us; 831 } 832 833 if (total) { 834 if (color) 835 ret = percent_color_snprintf(s, size, 836 sep ? "%.2f" : " %6.2f%%", 837 (period * 100.0) / total); 838 else 839 ret = scnprintf(s, size, sep ? "%.2f" : " %6.2f%%", 840 (period * 100.0) / total); 841 if (symbol_conf.show_cpu_utilization) { 842 ret += percent_color_snprintf(s + ret, size - ret, 843 sep ? "%.2f" : " %6.2f%%", 844 (period_sys * 100.0) / total); 845 ret += percent_color_snprintf(s + ret, size - ret, 846 sep ? "%.2f" : " %6.2f%%", 847 (period_us * 100.0) / total); 848 if (perf_guest) { 849 ret += percent_color_snprintf(s + ret, 850 size - ret, 851 sep ? "%.2f" : " %6.2f%%", 852 (period_guest_sys * 100.0) / 853 total); 854 ret += percent_color_snprintf(s + ret, 855 size - ret, 856 sep ? "%.2f" : " %6.2f%%", 857 (period_guest_us * 100.0) / 858 total); 859 } 860 } 861 } else 862 ret = scnprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period); 863 864 if (symbol_conf.show_nr_samples) { 865 if (sep) 866 ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events); 867 else 868 ret += scnprintf(s + ret, size - ret, "%11" PRIu64, nr_events); 869 } 870 871 if (symbol_conf.show_total_period) { 872 if (sep) 873 ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period); 874 else 875 ret += scnprintf(s + ret, size - ret, " %12" PRIu64, period); 876 } 877 878 if (pair_hists) { 879 char bf[32]; 880 double old_percent = 0, new_percent = 0, diff; 881 882 if (total > 0) 883 old_percent = (period * 100.0) / total; 884 if (total_period > 0) 885 new_percent = (he->period * 100.0) / total_period; 886 887 diff = new_percent - old_percent; 888 889 if (fabs(diff) >= 0.01) 890 ret += scnprintf(bf, sizeof(bf), "%+4.2F%%", diff); 891 else 892 ret += scnprintf(bf, sizeof(bf), " "); 893 894 if (sep) 895 ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf); 896 else 897 ret += scnprintf(s + ret, size - ret, "%11.11s", bf); 898 899 if (show_displacement) { 900 if (displacement) 901 ret += scnprintf(bf, sizeof(bf), "%+4ld", displacement); 902 else 903 ret += scnprintf(bf, sizeof(bf), " "); 904 905 if (sep) 906 ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf); 907 else 908 ret += scnprintf(s + ret, size - ret, "%6.6s", bf); 909 } 910 } 911 912 return ret; 913 } 914 915 int hist_entry__snprintf(struct hist_entry *he, char *s, size_t size, 916 struct hists *hists) 917 { 918 const char *sep = symbol_conf.field_sep; 919 struct sort_entry *se; 920 int ret = 0; 921 922 list_for_each_entry(se, &hist_entry__sort_list, list) { 923 if (se->elide) 924 continue; 925 926 ret += scnprintf(s + ret, size - ret, "%s", sep ?: " "); 927 ret += se->se_snprintf(he, s + ret, size - ret, 928 hists__col_len(hists, se->se_width_idx)); 929 } 930 931 return ret; 932 } 933 934 static int hist_entry__fprintf(struct hist_entry *he, size_t size, 935 struct hists *hists, struct hists *pair_hists, 936 bool show_displacement, long displacement, 937 u64 total_period, FILE *fp) 938 { 939 char bf[512]; 940 int ret; 941 942 if (size == 0 || size > sizeof(bf)) 943 size = sizeof(bf); 944 945 ret = hist_entry__pcnt_snprintf(he, bf, size, pair_hists, 946 show_displacement, displacement, 947 true, total_period); 948 hist_entry__snprintf(he, bf + ret, size - ret, hists); 949 return fprintf(fp, "%s\n", bf); 950 } 951 952 static size_t hist_entry__fprintf_callchain(struct hist_entry *he, 953 struct hists *hists, 954 u64 total_period, FILE *fp) 955 { 956 int left_margin = 0; 957 958 if (sort__first_dimension == SORT_COMM) { 959 struct sort_entry *se = list_first_entry(&hist_entry__sort_list, 960 typeof(*se), list); 961 left_margin = hists__col_len(hists, se->se_width_idx); 962 left_margin -= thread__comm_len(he->thread); 963 } 964 965 return hist_entry_callchain__fprintf(he, total_period, left_margin, fp); 966 } 967 968 size_t hists__fprintf(struct hists *hists, struct hists *pair, 969 bool show_displacement, bool show_header, int max_rows, 970 int max_cols, FILE *fp) 971 { 972 struct sort_entry *se; 973 struct rb_node *nd; 974 size_t ret = 0; 975 u64 total_period; 976 unsigned long position = 1; 977 long displacement = 0; 978 unsigned int width; 979 const char *sep = symbol_conf.field_sep; 980 const char *col_width = symbol_conf.col_width_list_str; 981 int nr_rows = 0; 982 983 init_rem_hits(); 984 985 if (!show_header) 986 goto print_entries; 987 988 fprintf(fp, "# %s", pair ? "Baseline" : "Overhead"); 989 990 if (symbol_conf.show_cpu_utilization) { 991 if (sep) { 992 ret += fprintf(fp, "%csys", *sep); 993 ret += fprintf(fp, "%cus", *sep); 994 if (perf_guest) { 995 ret += fprintf(fp, "%cguest sys", *sep); 996 ret += fprintf(fp, "%cguest us", *sep); 997 } 998 } else { 999 ret += fprintf(fp, " sys "); 1000 ret += fprintf(fp, " us "); 1001 if (perf_guest) { 1002 ret += fprintf(fp, " guest sys "); 1003 ret += fprintf(fp, " guest us "); 1004 } 1005 } 1006 } 1007 1008 if (symbol_conf.show_nr_samples) { 1009 if (sep) 1010 fprintf(fp, "%cSamples", *sep); 1011 else 1012 fputs(" Samples ", fp); 1013 } 1014 1015 if (symbol_conf.show_total_period) { 1016 if (sep) 1017 ret += fprintf(fp, "%cPeriod", *sep); 1018 else 1019 ret += fprintf(fp, " Period "); 1020 } 1021 1022 if (pair) { 1023 if (sep) 1024 ret += fprintf(fp, "%cDelta", *sep); 1025 else 1026 ret += fprintf(fp, " Delta "); 1027 1028 if (show_displacement) { 1029 if (sep) 1030 ret += fprintf(fp, "%cDisplacement", *sep); 1031 else 1032 ret += fprintf(fp, " Displ"); 1033 } 1034 } 1035 1036 list_for_each_entry(se, &hist_entry__sort_list, list) { 1037 if (se->elide) 1038 continue; 1039 if (sep) { 1040 fprintf(fp, "%c%s", *sep, se->se_header); 1041 continue; 1042 } 1043 width = strlen(se->se_header); 1044 if (symbol_conf.col_width_list_str) { 1045 if (col_width) { 1046 hists__set_col_len(hists, se->se_width_idx, 1047 atoi(col_width)); 1048 col_width = strchr(col_width, ','); 1049 if (col_width) 1050 ++col_width; 1051 } 1052 } 1053 if (!hists__new_col_len(hists, se->se_width_idx, width)) 1054 width = hists__col_len(hists, se->se_width_idx); 1055 fprintf(fp, " %*s", width, se->se_header); 1056 } 1057 1058 fprintf(fp, "\n"); 1059 if (max_rows && ++nr_rows >= max_rows) 1060 goto out; 1061 1062 if (sep) 1063 goto print_entries; 1064 1065 fprintf(fp, "# ........"); 1066 if (symbol_conf.show_cpu_utilization) 1067 fprintf(fp, " ....... ......."); 1068 if (symbol_conf.show_nr_samples) 1069 fprintf(fp, " .........."); 1070 if (symbol_conf.show_total_period) 1071 fprintf(fp, " ............"); 1072 if (pair) { 1073 fprintf(fp, " .........."); 1074 if (show_displacement) 1075 fprintf(fp, " ....."); 1076 } 1077 list_for_each_entry(se, &hist_entry__sort_list, list) { 1078 unsigned int i; 1079 1080 if (se->elide) 1081 continue; 1082 1083 fprintf(fp, " "); 1084 width = hists__col_len(hists, se->se_width_idx); 1085 if (width == 0) 1086 width = strlen(se->se_header); 1087 for (i = 0; i < width; i++) 1088 fprintf(fp, "."); 1089 } 1090 1091 fprintf(fp, "\n"); 1092 if (max_rows && ++nr_rows >= max_rows) 1093 goto out; 1094 1095 fprintf(fp, "#\n"); 1096 if (max_rows && ++nr_rows >= max_rows) 1097 goto out; 1098 1099 print_entries: 1100 total_period = hists->stats.total_period; 1101 1102 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 1103 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 1104 1105 if (h->filtered) 1106 continue; 1107 1108 if (show_displacement) { 1109 if (h->pair != NULL) 1110 displacement = ((long)h->pair->position - 1111 (long)position); 1112 else 1113 displacement = 0; 1114 ++position; 1115 } 1116 ret += hist_entry__fprintf(h, max_cols, hists, pair, show_displacement, 1117 displacement, total_period, fp); 1118 1119 if (symbol_conf.use_callchain) 1120 ret += hist_entry__fprintf_callchain(h, hists, total_period, fp); 1121 if (max_rows && ++nr_rows >= max_rows) 1122 goto out; 1123 1124 if (h->ms.map == NULL && verbose > 1) { 1125 __map_groups__fprintf_maps(&h->thread->mg, 1126 MAP__FUNCTION, verbose, fp); 1127 fprintf(fp, "%.10s end\n", graph_dotted_line); 1128 } 1129 } 1130 out: 1131 free(rem_sq_bracket); 1132 1133 return ret; 1134 } 1135 1136 /* 1137 * See hists__fprintf to match the column widths 1138 */ 1139 unsigned int hists__sort_list_width(struct hists *hists) 1140 { 1141 struct sort_entry *se; 1142 int ret = 9; /* total % */ 1143 1144 if (symbol_conf.show_cpu_utilization) { 1145 ret += 7; /* count_sys % */ 1146 ret += 6; /* count_us % */ 1147 if (perf_guest) { 1148 ret += 13; /* count_guest_sys % */ 1149 ret += 12; /* count_guest_us % */ 1150 } 1151 } 1152 1153 if (symbol_conf.show_nr_samples) 1154 ret += 11; 1155 1156 if (symbol_conf.show_total_period) 1157 ret += 13; 1158 1159 list_for_each_entry(se, &hist_entry__sort_list, list) 1160 if (!se->elide) 1161 ret += 2 + hists__col_len(hists, se->se_width_idx); 1162 1163 if (verbose) /* Addr + origin */ 1164 ret += 3 + BITS_PER_LONG / 4; 1165 1166 return ret; 1167 } 1168 1169 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, 1170 enum hist_filter filter) 1171 { 1172 h->filtered &= ~(1 << filter); 1173 if (h->filtered) 1174 return; 1175 1176 ++hists->nr_entries; 1177 if (h->ms.unfolded) 1178 hists->nr_entries += h->nr_rows; 1179 h->row_offset = 0; 1180 hists->stats.total_period += h->period; 1181 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events; 1182 1183 hists__calc_col_len(hists, h); 1184 } 1185 1186 1187 static bool hists__filter_entry_by_dso(struct hists *hists, 1188 struct hist_entry *he) 1189 { 1190 if (hists->dso_filter != NULL && 1191 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { 1192 he->filtered |= (1 << HIST_FILTER__DSO); 1193 return true; 1194 } 1195 1196 return false; 1197 } 1198 1199 void hists__filter_by_dso(struct hists *hists) 1200 { 1201 struct rb_node *nd; 1202 1203 hists->nr_entries = hists->stats.total_period = 0; 1204 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; 1205 hists__reset_col_len(hists); 1206 1207 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 1208 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 1209 1210 if (symbol_conf.exclude_other && !h->parent) 1211 continue; 1212 1213 if (hists__filter_entry_by_dso(hists, h)) 1214 continue; 1215 1216 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO); 1217 } 1218 } 1219 1220 static bool hists__filter_entry_by_thread(struct hists *hists, 1221 struct hist_entry *he) 1222 { 1223 if (hists->thread_filter != NULL && 1224 he->thread != hists->thread_filter) { 1225 he->filtered |= (1 << HIST_FILTER__THREAD); 1226 return true; 1227 } 1228 1229 return false; 1230 } 1231 1232 void hists__filter_by_thread(struct hists *hists) 1233 { 1234 struct rb_node *nd; 1235 1236 hists->nr_entries = hists->stats.total_period = 0; 1237 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; 1238 hists__reset_col_len(hists); 1239 1240 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 1241 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 1242 1243 if (hists__filter_entry_by_thread(hists, h)) 1244 continue; 1245 1246 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD); 1247 } 1248 } 1249 1250 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip) 1251 { 1252 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip); 1253 } 1254 1255 int hist_entry__annotate(struct hist_entry *he, size_t privsize) 1256 { 1257 return symbol__annotate(he->ms.sym, he->ms.map, privsize); 1258 } 1259 1260 void hists__inc_nr_events(struct hists *hists, u32 type) 1261 { 1262 ++hists->stats.nr_events[0]; 1263 ++hists->stats.nr_events[type]; 1264 } 1265 1266 size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp) 1267 { 1268 int i; 1269 size_t ret = 0; 1270 1271 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { 1272 const char *name; 1273 1274 if (hists->stats.nr_events[i] == 0) 1275 continue; 1276 1277 name = perf_event__name(i); 1278 if (!strcmp(name, "UNKNOWN")) 1279 continue; 1280 1281 ret += fprintf(fp, "%16s events: %10d\n", name, 1282 hists->stats.nr_events[i]); 1283 } 1284 1285 return ret; 1286 } 1287