1 #include "util.h" 2 #include "build-id.h" 3 #include "hist.h" 4 #include "session.h" 5 #include "sort.h" 6 #include "evsel.h" 7 #include <math.h> 8 9 static bool hists__filter_entry_by_dso(struct hists *hists, 10 struct hist_entry *he); 11 static bool hists__filter_entry_by_thread(struct hists *hists, 12 struct hist_entry *he); 13 static bool hists__filter_entry_by_symbol(struct hists *hists, 14 struct hist_entry *he); 15 16 struct callchain_param callchain_param = { 17 .mode = CHAIN_GRAPH_REL, 18 .min_percent = 0.5, 19 .order = ORDER_CALLEE, 20 .key = CCKEY_FUNCTION 21 }; 22 23 u16 hists__col_len(struct hists *hists, enum hist_column col) 24 { 25 return hists->col_len[col]; 26 } 27 28 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) 29 { 30 hists->col_len[col] = len; 31 } 32 33 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) 34 { 35 if (len > hists__col_len(hists, col)) { 36 hists__set_col_len(hists, col, len); 37 return true; 38 } 39 return false; 40 } 41 42 void hists__reset_col_len(struct hists *hists) 43 { 44 enum hist_column col; 45 46 for (col = 0; col < HISTC_NR_COLS; ++col) 47 hists__set_col_len(hists, col, 0); 48 } 49 50 static void hists__set_unres_dso_col_len(struct hists *hists, int dso) 51 { 52 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 53 54 if (hists__col_len(hists, dso) < unresolved_col_width && 55 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 56 !symbol_conf.dso_list) 57 hists__set_col_len(hists, dso, unresolved_col_width); 58 } 59 60 void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 61 { 62 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 63 int symlen; 64 u16 len; 65 66 /* 67 * +4 accounts for '[x] ' priv level info 68 * +2 accounts for 0x prefix on raw addresses 69 * +3 accounts for ' y ' symtab origin info 70 */ 71 if (h->ms.sym) { 72 symlen = h->ms.sym->namelen + 4; 73 if (verbose) 74 symlen += BITS_PER_LONG / 4 + 2 + 3; 75 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 76 } else { 77 symlen = unresolved_col_width + 4 + 2; 78 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 79 hists__set_unres_dso_col_len(hists, HISTC_DSO); 80 } 81 82 len = thread__comm_len(h->thread); 83 if (hists__new_col_len(hists, HISTC_COMM, len)) 84 hists__set_col_len(hists, HISTC_THREAD, len + 6); 85 86 if (h->ms.map) { 87 len = dso__name_len(h->ms.map->dso); 88 hists__new_col_len(hists, HISTC_DSO, len); 89 } 90 91 if (h->parent) 92 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); 93 94 if (h->branch_info) { 95 if (h->branch_info->from.sym) { 96 symlen = (int)h->branch_info->from.sym->namelen + 4; 97 if (verbose) 98 symlen += BITS_PER_LONG / 4 + 2 + 3; 99 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 100 101 symlen = dso__name_len(h->branch_info->from.map->dso); 102 hists__new_col_len(hists, HISTC_DSO_FROM, symlen); 103 } else { 104 symlen = unresolved_col_width + 4 + 2; 105 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 106 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); 107 } 108 109 if (h->branch_info->to.sym) { 110 symlen = (int)h->branch_info->to.sym->namelen + 4; 111 if (verbose) 112 symlen += BITS_PER_LONG / 4 + 2 + 3; 113 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 114 115 symlen = dso__name_len(h->branch_info->to.map->dso); 116 hists__new_col_len(hists, HISTC_DSO_TO, symlen); 117 } else { 118 symlen = unresolved_col_width + 4 + 2; 119 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 120 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 121 } 122 } 123 124 if (h->mem_info) { 125 if (h->mem_info->daddr.sym) { 126 symlen = (int)h->mem_info->daddr.sym->namelen + 4 127 + unresolved_col_width + 2; 128 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 129 symlen); 130 } else { 131 symlen = unresolved_col_width + 4 + 2; 132 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 133 symlen); 134 } 135 if (h->mem_info->daddr.map) { 136 symlen = dso__name_len(h->mem_info->daddr.map->dso); 137 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, 138 symlen); 139 } else { 140 symlen = unresolved_col_width + 4 + 2; 141 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 142 } 143 } else { 144 symlen = unresolved_col_width + 4 + 2; 145 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); 146 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 147 } 148 149 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); 150 hists__new_col_len(hists, HISTC_MEM_TLB, 22); 151 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); 152 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3); 153 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); 154 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); 155 156 if (h->transaction) 157 hists__new_col_len(hists, HISTC_TRANSACTION, 158 hist_entry__transaction_len()); 159 } 160 161 void hists__output_recalc_col_len(struct hists *hists, int max_rows) 162 { 163 struct rb_node *next = rb_first(&hists->entries); 164 struct hist_entry *n; 165 int row = 0; 166 167 hists__reset_col_len(hists); 168 169 while (next && row++ < max_rows) { 170 n = rb_entry(next, struct hist_entry, rb_node); 171 if (!n->filtered) 172 hists__calc_col_len(hists, n); 173 next = rb_next(&n->rb_node); 174 } 175 } 176 177 static void he_stat__add_cpumode_period(struct he_stat *he_stat, 178 unsigned int cpumode, u64 period) 179 { 180 switch (cpumode) { 181 case PERF_RECORD_MISC_KERNEL: 182 he_stat->period_sys += period; 183 break; 184 case PERF_RECORD_MISC_USER: 185 he_stat->period_us += period; 186 break; 187 case PERF_RECORD_MISC_GUEST_KERNEL: 188 he_stat->period_guest_sys += period; 189 break; 190 case PERF_RECORD_MISC_GUEST_USER: 191 he_stat->period_guest_us += period; 192 break; 193 default: 194 break; 195 } 196 } 197 198 static void he_stat__add_period(struct he_stat *he_stat, u64 period, 199 u64 weight) 200 { 201 202 he_stat->period += period; 203 he_stat->weight += weight; 204 he_stat->nr_events += 1; 205 } 206 207 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) 208 { 209 dest->period += src->period; 210 dest->period_sys += src->period_sys; 211 dest->period_us += src->period_us; 212 dest->period_guest_sys += src->period_guest_sys; 213 dest->period_guest_us += src->period_guest_us; 214 dest->nr_events += src->nr_events; 215 dest->weight += src->weight; 216 } 217 218 static void he_stat__decay(struct he_stat *he_stat) 219 { 220 he_stat->period = (he_stat->period * 7) / 8; 221 he_stat->nr_events = (he_stat->nr_events * 7) / 8; 222 /* XXX need decay for weight too? */ 223 } 224 225 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) 226 { 227 u64 prev_period = he->stat.period; 228 u64 diff; 229 230 if (prev_period == 0) 231 return true; 232 233 he_stat__decay(&he->stat); 234 235 diff = prev_period - he->stat.period; 236 237 hists->stats.total_period -= diff; 238 if (!he->filtered) 239 hists->stats.total_non_filtered_period -= diff; 240 241 return he->stat.period == 0; 242 } 243 244 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 245 { 246 struct rb_node *next = rb_first(&hists->entries); 247 struct hist_entry *n; 248 249 while (next) { 250 n = rb_entry(next, struct hist_entry, rb_node); 251 next = rb_next(&n->rb_node); 252 /* 253 * We may be annotating this, for instance, so keep it here in 254 * case some it gets new samples, we'll eventually free it when 255 * the user stops browsing and it agains gets fully decayed. 256 */ 257 if (((zap_user && n->level == '.') || 258 (zap_kernel && n->level != '.') || 259 hists__decay_entry(hists, n)) && 260 !n->used) { 261 rb_erase(&n->rb_node, &hists->entries); 262 263 if (sort__need_collapse) 264 rb_erase(&n->rb_node_in, &hists->entries_collapsed); 265 266 --hists->nr_entries; 267 if (!n->filtered) 268 --hists->nr_non_filtered_entries; 269 270 hist_entry__free(n); 271 } 272 } 273 } 274 275 /* 276 * histogram, sorted on item, collects periods 277 */ 278 279 static struct hist_entry *hist_entry__new(struct hist_entry *template) 280 { 281 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0; 282 struct hist_entry *he = zalloc(sizeof(*he) + callchain_size); 283 284 if (he != NULL) { 285 *he = *template; 286 287 if (he->ms.map) 288 he->ms.map->referenced = true; 289 290 if (he->branch_info) { 291 /* 292 * This branch info is (a part of) allocated from 293 * sample__resolve_bstack() and will be freed after 294 * adding new entries. So we need to save a copy. 295 */ 296 he->branch_info = malloc(sizeof(*he->branch_info)); 297 if (he->branch_info == NULL) { 298 free(he); 299 return NULL; 300 } 301 302 memcpy(he->branch_info, template->branch_info, 303 sizeof(*he->branch_info)); 304 305 if (he->branch_info->from.map) 306 he->branch_info->from.map->referenced = true; 307 if (he->branch_info->to.map) 308 he->branch_info->to.map->referenced = true; 309 } 310 311 if (he->mem_info) { 312 if (he->mem_info->iaddr.map) 313 he->mem_info->iaddr.map->referenced = true; 314 if (he->mem_info->daddr.map) 315 he->mem_info->daddr.map->referenced = true; 316 } 317 318 if (symbol_conf.use_callchain) 319 callchain_init(he->callchain); 320 321 INIT_LIST_HEAD(&he->pairs.node); 322 } 323 324 return he; 325 } 326 327 static u8 symbol__parent_filter(const struct symbol *parent) 328 { 329 if (symbol_conf.exclude_other && parent == NULL) 330 return 1 << HIST_FILTER__PARENT; 331 return 0; 332 } 333 334 static struct hist_entry *add_hist_entry(struct hists *hists, 335 struct hist_entry *entry, 336 struct addr_location *al) 337 { 338 struct rb_node **p; 339 struct rb_node *parent = NULL; 340 struct hist_entry *he; 341 int64_t cmp; 342 u64 period = entry->stat.period; 343 u64 weight = entry->stat.weight; 344 345 p = &hists->entries_in->rb_node; 346 347 while (*p != NULL) { 348 parent = *p; 349 he = rb_entry(parent, struct hist_entry, rb_node_in); 350 351 /* 352 * Make sure that it receives arguments in a same order as 353 * hist_entry__collapse() so that we can use an appropriate 354 * function when searching an entry regardless which sort 355 * keys were used. 356 */ 357 cmp = hist_entry__cmp(he, entry); 358 359 if (!cmp) { 360 he_stat__add_period(&he->stat, period, weight); 361 362 /* 363 * This mem info was allocated from sample__resolve_mem 364 * and will not be used anymore. 365 */ 366 zfree(&entry->mem_info); 367 368 /* If the map of an existing hist_entry has 369 * become out-of-date due to an exec() or 370 * similar, update it. Otherwise we will 371 * mis-adjust symbol addresses when computing 372 * the history counter to increment. 373 */ 374 if (he->ms.map != entry->ms.map) { 375 he->ms.map = entry->ms.map; 376 if (he->ms.map) 377 he->ms.map->referenced = true; 378 } 379 goto out; 380 } 381 382 if (cmp < 0) 383 p = &(*p)->rb_left; 384 else 385 p = &(*p)->rb_right; 386 } 387 388 he = hist_entry__new(entry); 389 if (!he) 390 return NULL; 391 392 rb_link_node(&he->rb_node_in, parent, p); 393 rb_insert_color(&he->rb_node_in, hists->entries_in); 394 out: 395 he_stat__add_cpumode_period(&he->stat, al->cpumode, period); 396 return he; 397 } 398 399 struct hist_entry *__hists__add_entry(struct hists *hists, 400 struct addr_location *al, 401 struct symbol *sym_parent, 402 struct branch_info *bi, 403 struct mem_info *mi, 404 u64 period, u64 weight, u64 transaction) 405 { 406 struct hist_entry entry = { 407 .thread = al->thread, 408 .comm = thread__comm(al->thread), 409 .ms = { 410 .map = al->map, 411 .sym = al->sym, 412 }, 413 .cpu = al->cpu, 414 .ip = al->addr, 415 .level = al->level, 416 .stat = { 417 .nr_events = 1, 418 .period = period, 419 .weight = weight, 420 }, 421 .parent = sym_parent, 422 .filtered = symbol__parent_filter(sym_parent) | al->filtered, 423 .hists = hists, 424 .branch_info = bi, 425 .mem_info = mi, 426 .transaction = transaction, 427 }; 428 429 return add_hist_entry(hists, &entry, al); 430 } 431 432 int64_t 433 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) 434 { 435 struct sort_entry *se; 436 int64_t cmp = 0; 437 438 list_for_each_entry(se, &hist_entry__sort_list, list) { 439 cmp = se->se_cmp(left, right); 440 if (cmp) 441 break; 442 } 443 444 return cmp; 445 } 446 447 int64_t 448 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) 449 { 450 struct sort_entry *se; 451 int64_t cmp = 0; 452 453 list_for_each_entry(se, &hist_entry__sort_list, list) { 454 int64_t (*f)(struct hist_entry *, struct hist_entry *); 455 456 f = se->se_collapse ?: se->se_cmp; 457 458 cmp = f(left, right); 459 if (cmp) 460 break; 461 } 462 463 return cmp; 464 } 465 466 void hist_entry__free(struct hist_entry *he) 467 { 468 zfree(&he->branch_info); 469 zfree(&he->mem_info); 470 free_srcline(he->srcline); 471 free(he); 472 } 473 474 /* 475 * collapse the histogram 476 */ 477 478 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused, 479 struct rb_root *root, 480 struct hist_entry *he) 481 { 482 struct rb_node **p = &root->rb_node; 483 struct rb_node *parent = NULL; 484 struct hist_entry *iter; 485 int64_t cmp; 486 487 while (*p != NULL) { 488 parent = *p; 489 iter = rb_entry(parent, struct hist_entry, rb_node_in); 490 491 cmp = hist_entry__collapse(iter, he); 492 493 if (!cmp) { 494 he_stat__add_stat(&iter->stat, &he->stat); 495 496 if (symbol_conf.use_callchain) { 497 callchain_cursor_reset(&callchain_cursor); 498 callchain_merge(&callchain_cursor, 499 iter->callchain, 500 he->callchain); 501 } 502 hist_entry__free(he); 503 return false; 504 } 505 506 if (cmp < 0) 507 p = &(*p)->rb_left; 508 else 509 p = &(*p)->rb_right; 510 } 511 512 rb_link_node(&he->rb_node_in, parent, p); 513 rb_insert_color(&he->rb_node_in, root); 514 return true; 515 } 516 517 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists) 518 { 519 struct rb_root *root; 520 521 pthread_mutex_lock(&hists->lock); 522 523 root = hists->entries_in; 524 if (++hists->entries_in > &hists->entries_in_array[1]) 525 hists->entries_in = &hists->entries_in_array[0]; 526 527 pthread_mutex_unlock(&hists->lock); 528 529 return root; 530 } 531 532 static void hists__apply_filters(struct hists *hists, struct hist_entry *he) 533 { 534 hists__filter_entry_by_dso(hists, he); 535 hists__filter_entry_by_thread(hists, he); 536 hists__filter_entry_by_symbol(hists, he); 537 } 538 539 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog) 540 { 541 struct rb_root *root; 542 struct rb_node *next; 543 struct hist_entry *n; 544 545 if (!sort__need_collapse) 546 return; 547 548 root = hists__get_rotate_entries_in(hists); 549 next = rb_first(root); 550 551 while (next) { 552 if (session_done()) 553 break; 554 n = rb_entry(next, struct hist_entry, rb_node_in); 555 next = rb_next(&n->rb_node_in); 556 557 rb_erase(&n->rb_node_in, root); 558 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) { 559 /* 560 * If it wasn't combined with one of the entries already 561 * collapsed, we need to apply the filters that may have 562 * been set by, say, the hist_browser. 563 */ 564 hists__apply_filters(hists, n); 565 } 566 if (prog) 567 ui_progress__update(prog, 1); 568 } 569 } 570 571 /* 572 * reverse the map, sort on period. 573 */ 574 575 static int period_cmp(u64 period_a, u64 period_b) 576 { 577 if (period_a > period_b) 578 return 1; 579 if (period_a < period_b) 580 return -1; 581 return 0; 582 } 583 584 static int hist_entry__sort_on_period(struct hist_entry *a, 585 struct hist_entry *b) 586 { 587 int ret; 588 int i, nr_members; 589 struct perf_evsel *evsel; 590 struct hist_entry *pair; 591 u64 *periods_a, *periods_b; 592 593 ret = period_cmp(a->stat.period, b->stat.period); 594 if (ret || !symbol_conf.event_group) 595 return ret; 596 597 evsel = hists_to_evsel(a->hists); 598 nr_members = evsel->nr_members; 599 if (nr_members <= 1) 600 return ret; 601 602 periods_a = zalloc(sizeof(periods_a) * nr_members); 603 periods_b = zalloc(sizeof(periods_b) * nr_members); 604 605 if (!periods_a || !periods_b) 606 goto out; 607 608 list_for_each_entry(pair, &a->pairs.head, pairs.node) { 609 evsel = hists_to_evsel(pair->hists); 610 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period; 611 } 612 613 list_for_each_entry(pair, &b->pairs.head, pairs.node) { 614 evsel = hists_to_evsel(pair->hists); 615 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period; 616 } 617 618 for (i = 1; i < nr_members; i++) { 619 ret = period_cmp(periods_a[i], periods_b[i]); 620 if (ret) 621 break; 622 } 623 624 out: 625 free(periods_a); 626 free(periods_b); 627 628 return ret; 629 } 630 631 static void hists__reset_filter_stats(struct hists *hists) 632 { 633 hists->nr_non_filtered_entries = 0; 634 hists->stats.total_non_filtered_period = 0; 635 } 636 637 void hists__reset_stats(struct hists *hists) 638 { 639 hists->nr_entries = 0; 640 hists->stats.total_period = 0; 641 642 hists__reset_filter_stats(hists); 643 } 644 645 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h) 646 { 647 hists->nr_non_filtered_entries++; 648 hists->stats.total_non_filtered_period += h->stat.period; 649 } 650 651 void hists__inc_stats(struct hists *hists, struct hist_entry *h) 652 { 653 if (!h->filtered) 654 hists__inc_filter_stats(hists, h); 655 656 hists->nr_entries++; 657 hists->stats.total_period += h->stat.period; 658 } 659 660 static void __hists__insert_output_entry(struct rb_root *entries, 661 struct hist_entry *he, 662 u64 min_callchain_hits) 663 { 664 struct rb_node **p = &entries->rb_node; 665 struct rb_node *parent = NULL; 666 struct hist_entry *iter; 667 668 if (symbol_conf.use_callchain) 669 callchain_param.sort(&he->sorted_chain, he->callchain, 670 min_callchain_hits, &callchain_param); 671 672 while (*p != NULL) { 673 parent = *p; 674 iter = rb_entry(parent, struct hist_entry, rb_node); 675 676 if (hist_entry__sort_on_period(he, iter) > 0) 677 p = &(*p)->rb_left; 678 else 679 p = &(*p)->rb_right; 680 } 681 682 rb_link_node(&he->rb_node, parent, p); 683 rb_insert_color(&he->rb_node, entries); 684 } 685 686 void hists__output_resort(struct hists *hists) 687 { 688 struct rb_root *root; 689 struct rb_node *next; 690 struct hist_entry *n; 691 u64 min_callchain_hits; 692 693 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100); 694 695 if (sort__need_collapse) 696 root = &hists->entries_collapsed; 697 else 698 root = hists->entries_in; 699 700 next = rb_first(root); 701 hists->entries = RB_ROOT; 702 703 hists__reset_stats(hists); 704 hists__reset_col_len(hists); 705 706 while (next) { 707 n = rb_entry(next, struct hist_entry, rb_node_in); 708 next = rb_next(&n->rb_node_in); 709 710 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits); 711 hists__inc_stats(hists, n); 712 713 if (!n->filtered) 714 hists__calc_col_len(hists, n); 715 } 716 } 717 718 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, 719 enum hist_filter filter) 720 { 721 h->filtered &= ~(1 << filter); 722 if (h->filtered) 723 return; 724 725 /* force fold unfiltered entry for simplicity */ 726 h->ms.unfolded = false; 727 h->row_offset = 0; 728 729 hists->stats.nr_non_filtered_samples += h->stat.nr_events; 730 731 hists__inc_filter_stats(hists, h); 732 hists__calc_col_len(hists, h); 733 } 734 735 736 static bool hists__filter_entry_by_dso(struct hists *hists, 737 struct hist_entry *he) 738 { 739 if (hists->dso_filter != NULL && 740 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { 741 he->filtered |= (1 << HIST_FILTER__DSO); 742 return true; 743 } 744 745 return false; 746 } 747 748 void hists__filter_by_dso(struct hists *hists) 749 { 750 struct rb_node *nd; 751 752 hists->stats.nr_non_filtered_samples = 0; 753 754 hists__reset_filter_stats(hists); 755 hists__reset_col_len(hists); 756 757 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 758 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 759 760 if (symbol_conf.exclude_other && !h->parent) 761 continue; 762 763 if (hists__filter_entry_by_dso(hists, h)) 764 continue; 765 766 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO); 767 } 768 } 769 770 static bool hists__filter_entry_by_thread(struct hists *hists, 771 struct hist_entry *he) 772 { 773 if (hists->thread_filter != NULL && 774 he->thread != hists->thread_filter) { 775 he->filtered |= (1 << HIST_FILTER__THREAD); 776 return true; 777 } 778 779 return false; 780 } 781 782 void hists__filter_by_thread(struct hists *hists) 783 { 784 struct rb_node *nd; 785 786 hists->stats.nr_non_filtered_samples = 0; 787 788 hists__reset_filter_stats(hists); 789 hists__reset_col_len(hists); 790 791 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 792 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 793 794 if (hists__filter_entry_by_thread(hists, h)) 795 continue; 796 797 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD); 798 } 799 } 800 801 static bool hists__filter_entry_by_symbol(struct hists *hists, 802 struct hist_entry *he) 803 { 804 if (hists->symbol_filter_str != NULL && 805 (!he->ms.sym || strstr(he->ms.sym->name, 806 hists->symbol_filter_str) == NULL)) { 807 he->filtered |= (1 << HIST_FILTER__SYMBOL); 808 return true; 809 } 810 811 return false; 812 } 813 814 void hists__filter_by_symbol(struct hists *hists) 815 { 816 struct rb_node *nd; 817 818 hists->stats.nr_non_filtered_samples = 0; 819 820 hists__reset_filter_stats(hists); 821 hists__reset_col_len(hists); 822 823 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 824 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 825 826 if (hists__filter_entry_by_symbol(hists, h)) 827 continue; 828 829 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL); 830 } 831 } 832 833 void events_stats__inc(struct events_stats *stats, u32 type) 834 { 835 ++stats->nr_events[0]; 836 ++stats->nr_events[type]; 837 } 838 839 void hists__inc_nr_events(struct hists *hists, u32 type) 840 { 841 events_stats__inc(&hists->stats, type); 842 } 843 844 static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 845 struct hist_entry *pair) 846 { 847 struct rb_root *root; 848 struct rb_node **p; 849 struct rb_node *parent = NULL; 850 struct hist_entry *he; 851 int64_t cmp; 852 853 if (sort__need_collapse) 854 root = &hists->entries_collapsed; 855 else 856 root = hists->entries_in; 857 858 p = &root->rb_node; 859 860 while (*p != NULL) { 861 parent = *p; 862 he = rb_entry(parent, struct hist_entry, rb_node_in); 863 864 cmp = hist_entry__collapse(he, pair); 865 866 if (!cmp) 867 goto out; 868 869 if (cmp < 0) 870 p = &(*p)->rb_left; 871 else 872 p = &(*p)->rb_right; 873 } 874 875 he = hist_entry__new(pair); 876 if (he) { 877 memset(&he->stat, 0, sizeof(he->stat)); 878 he->hists = hists; 879 rb_link_node(&he->rb_node_in, parent, p); 880 rb_insert_color(&he->rb_node_in, root); 881 hists__inc_stats(hists, he); 882 he->dummy = true; 883 } 884 out: 885 return he; 886 } 887 888 static struct hist_entry *hists__find_entry(struct hists *hists, 889 struct hist_entry *he) 890 { 891 struct rb_node *n; 892 893 if (sort__need_collapse) 894 n = hists->entries_collapsed.rb_node; 895 else 896 n = hists->entries_in->rb_node; 897 898 while (n) { 899 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); 900 int64_t cmp = hist_entry__collapse(iter, he); 901 902 if (cmp < 0) 903 n = n->rb_left; 904 else if (cmp > 0) 905 n = n->rb_right; 906 else 907 return iter; 908 } 909 910 return NULL; 911 } 912 913 /* 914 * Look for pairs to link to the leader buckets (hist_entries): 915 */ 916 void hists__match(struct hists *leader, struct hists *other) 917 { 918 struct rb_root *root; 919 struct rb_node *nd; 920 struct hist_entry *pos, *pair; 921 922 if (sort__need_collapse) 923 root = &leader->entries_collapsed; 924 else 925 root = leader->entries_in; 926 927 for (nd = rb_first(root); nd; nd = rb_next(nd)) { 928 pos = rb_entry(nd, struct hist_entry, rb_node_in); 929 pair = hists__find_entry(other, pos); 930 931 if (pair) 932 hist_entry__add_pair(pair, pos); 933 } 934 } 935 936 /* 937 * Look for entries in the other hists that are not present in the leader, if 938 * we find them, just add a dummy entry on the leader hists, with period=0, 939 * nr_events=0, to serve as the list header. 940 */ 941 int hists__link(struct hists *leader, struct hists *other) 942 { 943 struct rb_root *root; 944 struct rb_node *nd; 945 struct hist_entry *pos, *pair; 946 947 if (sort__need_collapse) 948 root = &other->entries_collapsed; 949 else 950 root = other->entries_in; 951 952 for (nd = rb_first(root); nd; nd = rb_next(nd)) { 953 pos = rb_entry(nd, struct hist_entry, rb_node_in); 954 955 if (!hist_entry__has_pairs(pos)) { 956 pair = hists__add_dummy_entry(leader, pos); 957 if (pair == NULL) 958 return -1; 959 hist_entry__add_pair(pos, pair); 960 } 961 } 962 963 return 0; 964 } 965 966 u64 hists__total_period(struct hists *hists) 967 { 968 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period : 969 hists->stats.total_period; 970 } 971 972 int parse_filter_percentage(const struct option *opt __maybe_unused, 973 const char *arg, int unset __maybe_unused) 974 { 975 if (!strcmp(arg, "relative")) 976 symbol_conf.filter_relative = true; 977 else if (!strcmp(arg, "absolute")) 978 symbol_conf.filter_relative = false; 979 else 980 return -1; 981 982 return 0; 983 } 984 985 int perf_hist_config(const char *var, const char *value) 986 { 987 if (!strcmp(var, "hist.percentage")) 988 return parse_filter_percentage(NULL, value, 0); 989 990 return 0; 991 } 992