1 // SPDX-License-Identifier: GPL-2.0 2 #include "callchain.h" 3 #include "util.h" 4 #include "build-id.h" 5 #include "hist.h" 6 #include "map.h" 7 #include "session.h" 8 #include "namespaces.h" 9 #include "sort.h" 10 #include "units.h" 11 #include "evlist.h" 12 #include "evsel.h" 13 #include "annotate.h" 14 #include "srcline.h" 15 #include "symbol.h" 16 #include "thread.h" 17 #include "ui/progress.h" 18 #include <errno.h> 19 #include <math.h> 20 #include <inttypes.h> 21 #include <sys/param.h> 22 23 static bool hists__filter_entry_by_dso(struct hists *hists, 24 struct hist_entry *he); 25 static bool hists__filter_entry_by_thread(struct hists *hists, 26 struct hist_entry *he); 27 static bool hists__filter_entry_by_symbol(struct hists *hists, 28 struct hist_entry *he); 29 static bool hists__filter_entry_by_socket(struct hists *hists, 30 struct hist_entry *he); 31 32 u16 hists__col_len(struct hists *hists, enum hist_column col) 33 { 34 return hists->col_len[col]; 35 } 36 37 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) 38 { 39 hists->col_len[col] = len; 40 } 41 42 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) 43 { 44 if (len > hists__col_len(hists, col)) { 45 hists__set_col_len(hists, col, len); 46 return true; 47 } 48 return false; 49 } 50 51 void hists__reset_col_len(struct hists *hists) 52 { 53 enum hist_column col; 54 55 for (col = 0; col < HISTC_NR_COLS; ++col) 56 hists__set_col_len(hists, col, 0); 57 } 58 59 static void hists__set_unres_dso_col_len(struct hists *hists, int dso) 60 { 61 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 62 63 if (hists__col_len(hists, dso) < unresolved_col_width && 64 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 65 !symbol_conf.dso_list) 66 hists__set_col_len(hists, dso, unresolved_col_width); 67 } 68 69 void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 70 { 71 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 72 int symlen; 73 u16 len; 74 75 /* 76 * +4 accounts for '[x] ' priv level info 77 * +2 accounts for 0x prefix on raw addresses 78 * +3 accounts for ' y ' symtab origin info 79 */ 80 if (h->ms.sym) { 81 symlen = h->ms.sym->namelen + 4; 82 if (verbose > 0) 83 symlen += BITS_PER_LONG / 4 + 2 + 3; 84 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 85 } else { 86 symlen = unresolved_col_width + 4 + 2; 87 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 88 hists__set_unres_dso_col_len(hists, HISTC_DSO); 89 } 90 91 len = thread__comm_len(h->thread); 92 if (hists__new_col_len(hists, HISTC_COMM, len)) 93 hists__set_col_len(hists, HISTC_THREAD, len + 8); 94 95 if (h->ms.map) { 96 len = dso__name_len(h->ms.map->dso); 97 hists__new_col_len(hists, HISTC_DSO, len); 98 } 99 100 if (h->parent) 101 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); 102 103 if (h->branch_info) { 104 if (h->branch_info->from.sym) { 105 symlen = (int)h->branch_info->from.sym->namelen + 4; 106 if (verbose > 0) 107 symlen += BITS_PER_LONG / 4 + 2 + 3; 108 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 109 110 symlen = dso__name_len(h->branch_info->from.map->dso); 111 hists__new_col_len(hists, HISTC_DSO_FROM, symlen); 112 } else { 113 symlen = unresolved_col_width + 4 + 2; 114 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 115 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); 116 } 117 118 if (h->branch_info->to.sym) { 119 symlen = (int)h->branch_info->to.sym->namelen + 4; 120 if (verbose > 0) 121 symlen += BITS_PER_LONG / 4 + 2 + 3; 122 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 123 124 symlen = dso__name_len(h->branch_info->to.map->dso); 125 hists__new_col_len(hists, HISTC_DSO_TO, symlen); 126 } else { 127 symlen = unresolved_col_width + 4 + 2; 128 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 129 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 130 } 131 132 if (h->branch_info->srcline_from) 133 hists__new_col_len(hists, HISTC_SRCLINE_FROM, 134 strlen(h->branch_info->srcline_from)); 135 if (h->branch_info->srcline_to) 136 hists__new_col_len(hists, HISTC_SRCLINE_TO, 137 strlen(h->branch_info->srcline_to)); 138 } 139 140 if (h->mem_info) { 141 if (h->mem_info->daddr.sym) { 142 symlen = (int)h->mem_info->daddr.sym->namelen + 4 143 + unresolved_col_width + 2; 144 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 145 symlen); 146 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 147 symlen + 1); 148 } else { 149 symlen = unresolved_col_width + 4 + 2; 150 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 151 symlen); 152 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 153 symlen); 154 } 155 156 if (h->mem_info->iaddr.sym) { 157 symlen = (int)h->mem_info->iaddr.sym->namelen + 4 158 + unresolved_col_width + 2; 159 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 160 symlen); 161 } else { 162 symlen = unresolved_col_width + 4 + 2; 163 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 164 symlen); 165 } 166 167 if (h->mem_info->daddr.map) { 168 symlen = dso__name_len(h->mem_info->daddr.map->dso); 169 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, 170 symlen); 171 } else { 172 symlen = unresolved_col_width + 4 + 2; 173 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 174 } 175 176 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR, 177 unresolved_col_width + 4 + 2); 178 179 } else { 180 symlen = unresolved_col_width + 4 + 2; 181 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); 182 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen); 183 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 184 } 185 186 hists__new_col_len(hists, HISTC_CGROUP_ID, 20); 187 hists__new_col_len(hists, HISTC_CPU, 3); 188 hists__new_col_len(hists, HISTC_SOCKET, 6); 189 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); 190 hists__new_col_len(hists, HISTC_MEM_TLB, 22); 191 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); 192 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3); 193 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); 194 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); 195 196 if (h->srcline) { 197 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header)); 198 hists__new_col_len(hists, HISTC_SRCLINE, len); 199 } 200 201 if (h->srcfile) 202 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile)); 203 204 if (h->transaction) 205 hists__new_col_len(hists, HISTC_TRANSACTION, 206 hist_entry__transaction_len()); 207 208 if (h->trace_output) 209 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output)); 210 } 211 212 void hists__output_recalc_col_len(struct hists *hists, int max_rows) 213 { 214 struct rb_node *next = rb_first_cached(&hists->entries); 215 struct hist_entry *n; 216 int row = 0; 217 218 hists__reset_col_len(hists); 219 220 while (next && row++ < max_rows) { 221 n = rb_entry(next, struct hist_entry, rb_node); 222 if (!n->filtered) 223 hists__calc_col_len(hists, n); 224 next = rb_next(&n->rb_node); 225 } 226 } 227 228 static void he_stat__add_cpumode_period(struct he_stat *he_stat, 229 unsigned int cpumode, u64 period) 230 { 231 switch (cpumode) { 232 case PERF_RECORD_MISC_KERNEL: 233 he_stat->period_sys += period; 234 break; 235 case PERF_RECORD_MISC_USER: 236 he_stat->period_us += period; 237 break; 238 case PERF_RECORD_MISC_GUEST_KERNEL: 239 he_stat->period_guest_sys += period; 240 break; 241 case PERF_RECORD_MISC_GUEST_USER: 242 he_stat->period_guest_us += period; 243 break; 244 default: 245 break; 246 } 247 } 248 249 static void he_stat__add_period(struct he_stat *he_stat, u64 period, 250 u64 weight) 251 { 252 253 he_stat->period += period; 254 he_stat->weight += weight; 255 he_stat->nr_events += 1; 256 } 257 258 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) 259 { 260 dest->period += src->period; 261 dest->period_sys += src->period_sys; 262 dest->period_us += src->period_us; 263 dest->period_guest_sys += src->period_guest_sys; 264 dest->period_guest_us += src->period_guest_us; 265 dest->nr_events += src->nr_events; 266 dest->weight += src->weight; 267 } 268 269 static void he_stat__decay(struct he_stat *he_stat) 270 { 271 he_stat->period = (he_stat->period * 7) / 8; 272 he_stat->nr_events = (he_stat->nr_events * 7) / 8; 273 /* XXX need decay for weight too? */ 274 } 275 276 static void hists__delete_entry(struct hists *hists, struct hist_entry *he); 277 278 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) 279 { 280 u64 prev_period = he->stat.period; 281 u64 diff; 282 283 if (prev_period == 0) 284 return true; 285 286 he_stat__decay(&he->stat); 287 if (symbol_conf.cumulate_callchain) 288 he_stat__decay(he->stat_acc); 289 decay_callchain(he->callchain); 290 291 diff = prev_period - he->stat.period; 292 293 if (!he->depth) { 294 hists->stats.total_period -= diff; 295 if (!he->filtered) 296 hists->stats.total_non_filtered_period -= diff; 297 } 298 299 if (!he->leaf) { 300 struct hist_entry *child; 301 struct rb_node *node = rb_first_cached(&he->hroot_out); 302 while (node) { 303 child = rb_entry(node, struct hist_entry, rb_node); 304 node = rb_next(node); 305 306 if (hists__decay_entry(hists, child)) 307 hists__delete_entry(hists, child); 308 } 309 } 310 311 return he->stat.period == 0; 312 } 313 314 static void hists__delete_entry(struct hists *hists, struct hist_entry *he) 315 { 316 struct rb_root_cached *root_in; 317 struct rb_root_cached *root_out; 318 319 if (he->parent_he) { 320 root_in = &he->parent_he->hroot_in; 321 root_out = &he->parent_he->hroot_out; 322 } else { 323 if (hists__has(hists, need_collapse)) 324 root_in = &hists->entries_collapsed; 325 else 326 root_in = hists->entries_in; 327 root_out = &hists->entries; 328 } 329 330 rb_erase_cached(&he->rb_node_in, root_in); 331 rb_erase_cached(&he->rb_node, root_out); 332 333 --hists->nr_entries; 334 if (!he->filtered) 335 --hists->nr_non_filtered_entries; 336 337 hist_entry__delete(he); 338 } 339 340 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 341 { 342 struct rb_node *next = rb_first_cached(&hists->entries); 343 struct hist_entry *n; 344 345 while (next) { 346 n = rb_entry(next, struct hist_entry, rb_node); 347 next = rb_next(&n->rb_node); 348 if (((zap_user && n->level == '.') || 349 (zap_kernel && n->level != '.') || 350 hists__decay_entry(hists, n))) { 351 hists__delete_entry(hists, n); 352 } 353 } 354 } 355 356 void hists__delete_entries(struct hists *hists) 357 { 358 struct rb_node *next = rb_first_cached(&hists->entries); 359 struct hist_entry *n; 360 361 while (next) { 362 n = rb_entry(next, struct hist_entry, rb_node); 363 next = rb_next(&n->rb_node); 364 365 hists__delete_entry(hists, n); 366 } 367 } 368 369 /* 370 * histogram, sorted on item, collects periods 371 */ 372 373 static int hist_entry__init(struct hist_entry *he, 374 struct hist_entry *template, 375 bool sample_self, 376 size_t callchain_size) 377 { 378 *he = *template; 379 he->callchain_size = callchain_size; 380 381 if (symbol_conf.cumulate_callchain) { 382 he->stat_acc = malloc(sizeof(he->stat)); 383 if (he->stat_acc == NULL) 384 return -ENOMEM; 385 memcpy(he->stat_acc, &he->stat, sizeof(he->stat)); 386 if (!sample_self) 387 memset(&he->stat, 0, sizeof(he->stat)); 388 } 389 390 map__get(he->ms.map); 391 392 if (he->branch_info) { 393 /* 394 * This branch info is (a part of) allocated from 395 * sample__resolve_bstack() and will be freed after 396 * adding new entries. So we need to save a copy. 397 */ 398 he->branch_info = malloc(sizeof(*he->branch_info)); 399 if (he->branch_info == NULL) { 400 map__zput(he->ms.map); 401 free(he->stat_acc); 402 return -ENOMEM; 403 } 404 405 memcpy(he->branch_info, template->branch_info, 406 sizeof(*he->branch_info)); 407 408 map__get(he->branch_info->from.map); 409 map__get(he->branch_info->to.map); 410 } 411 412 if (he->mem_info) { 413 map__get(he->mem_info->iaddr.map); 414 map__get(he->mem_info->daddr.map); 415 } 416 417 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 418 callchain_init(he->callchain); 419 420 if (he->raw_data) { 421 he->raw_data = memdup(he->raw_data, he->raw_size); 422 423 if (he->raw_data == NULL) { 424 map__put(he->ms.map); 425 if (he->branch_info) { 426 map__put(he->branch_info->from.map); 427 map__put(he->branch_info->to.map); 428 free(he->branch_info); 429 } 430 if (he->mem_info) { 431 map__put(he->mem_info->iaddr.map); 432 map__put(he->mem_info->daddr.map); 433 } 434 free(he->stat_acc); 435 return -ENOMEM; 436 } 437 } 438 INIT_LIST_HEAD(&he->pairs.node); 439 thread__get(he->thread); 440 he->hroot_in = RB_ROOT_CACHED; 441 he->hroot_out = RB_ROOT_CACHED; 442 443 if (!symbol_conf.report_hierarchy) 444 he->leaf = true; 445 446 return 0; 447 } 448 449 static void *hist_entry__zalloc(size_t size) 450 { 451 return zalloc(size + sizeof(struct hist_entry)); 452 } 453 454 static void hist_entry__free(void *ptr) 455 { 456 free(ptr); 457 } 458 459 static struct hist_entry_ops default_ops = { 460 .new = hist_entry__zalloc, 461 .free = hist_entry__free, 462 }; 463 464 static struct hist_entry *hist_entry__new(struct hist_entry *template, 465 bool sample_self) 466 { 467 struct hist_entry_ops *ops = template->ops; 468 size_t callchain_size = 0; 469 struct hist_entry *he; 470 int err = 0; 471 472 if (!ops) 473 ops = template->ops = &default_ops; 474 475 if (symbol_conf.use_callchain) 476 callchain_size = sizeof(struct callchain_root); 477 478 he = ops->new(callchain_size); 479 if (he) { 480 err = hist_entry__init(he, template, sample_self, callchain_size); 481 if (err) { 482 ops->free(he); 483 he = NULL; 484 } 485 } 486 487 return he; 488 } 489 490 static u8 symbol__parent_filter(const struct symbol *parent) 491 { 492 if (symbol_conf.exclude_other && parent == NULL) 493 return 1 << HIST_FILTER__PARENT; 494 return 0; 495 } 496 497 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period) 498 { 499 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain) 500 return; 501 502 he->hists->callchain_period += period; 503 if (!he->filtered) 504 he->hists->callchain_non_filtered_period += period; 505 } 506 507 static struct hist_entry *hists__findnew_entry(struct hists *hists, 508 struct hist_entry *entry, 509 struct addr_location *al, 510 bool sample_self) 511 { 512 struct rb_node **p; 513 struct rb_node *parent = NULL; 514 struct hist_entry *he; 515 int64_t cmp; 516 u64 period = entry->stat.period; 517 u64 weight = entry->stat.weight; 518 bool leftmost = true; 519 520 p = &hists->entries_in->rb_root.rb_node; 521 522 while (*p != NULL) { 523 parent = *p; 524 he = rb_entry(parent, struct hist_entry, rb_node_in); 525 526 /* 527 * Make sure that it receives arguments in a same order as 528 * hist_entry__collapse() so that we can use an appropriate 529 * function when searching an entry regardless which sort 530 * keys were used. 531 */ 532 cmp = hist_entry__cmp(he, entry); 533 534 if (!cmp) { 535 if (sample_self) { 536 he_stat__add_period(&he->stat, period, weight); 537 hist_entry__add_callchain_period(he, period); 538 } 539 if (symbol_conf.cumulate_callchain) 540 he_stat__add_period(he->stat_acc, period, weight); 541 542 /* 543 * This mem info was allocated from sample__resolve_mem 544 * and will not be used anymore. 545 */ 546 mem_info__zput(entry->mem_info); 547 548 /* If the map of an existing hist_entry has 549 * become out-of-date due to an exec() or 550 * similar, update it. Otherwise we will 551 * mis-adjust symbol addresses when computing 552 * the history counter to increment. 553 */ 554 if (he->ms.map != entry->ms.map) { 555 map__put(he->ms.map); 556 he->ms.map = map__get(entry->ms.map); 557 } 558 goto out; 559 } 560 561 if (cmp < 0) 562 p = &(*p)->rb_left; 563 else { 564 p = &(*p)->rb_right; 565 leftmost = false; 566 } 567 } 568 569 he = hist_entry__new(entry, sample_self); 570 if (!he) 571 return NULL; 572 573 if (sample_self) 574 hist_entry__add_callchain_period(he, period); 575 hists->nr_entries++; 576 577 rb_link_node(&he->rb_node_in, parent, p); 578 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost); 579 out: 580 if (sample_self) 581 he_stat__add_cpumode_period(&he->stat, al->cpumode, period); 582 if (symbol_conf.cumulate_callchain) 583 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); 584 return he; 585 } 586 587 static struct hist_entry* 588 __hists__add_entry(struct hists *hists, 589 struct addr_location *al, 590 struct symbol *sym_parent, 591 struct branch_info *bi, 592 struct mem_info *mi, 593 struct perf_sample *sample, 594 bool sample_self, 595 struct hist_entry_ops *ops) 596 { 597 struct namespaces *ns = thread__namespaces(al->thread); 598 struct hist_entry entry = { 599 .thread = al->thread, 600 .comm = thread__comm(al->thread), 601 .cgroup_id = { 602 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0, 603 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0, 604 }, 605 .ms = { 606 .map = al->map, 607 .sym = al->sym, 608 }, 609 .srcline = al->srcline ? strdup(al->srcline) : NULL, 610 .socket = al->socket, 611 .cpu = al->cpu, 612 .cpumode = al->cpumode, 613 .ip = al->addr, 614 .level = al->level, 615 .stat = { 616 .nr_events = 1, 617 .period = sample->period, 618 .weight = sample->weight, 619 }, 620 .parent = sym_parent, 621 .filtered = symbol__parent_filter(sym_parent) | al->filtered, 622 .hists = hists, 623 .branch_info = bi, 624 .mem_info = mi, 625 .transaction = sample->transaction, 626 .raw_data = sample->raw_data, 627 .raw_size = sample->raw_size, 628 .ops = ops, 629 }, *he = hists__findnew_entry(hists, &entry, al, sample_self); 630 631 if (!hists->has_callchains && he && he->callchain_size != 0) 632 hists->has_callchains = true; 633 return he; 634 } 635 636 struct hist_entry *hists__add_entry(struct hists *hists, 637 struct addr_location *al, 638 struct symbol *sym_parent, 639 struct branch_info *bi, 640 struct mem_info *mi, 641 struct perf_sample *sample, 642 bool sample_self) 643 { 644 return __hists__add_entry(hists, al, sym_parent, bi, mi, 645 sample, sample_self, NULL); 646 } 647 648 struct hist_entry *hists__add_entry_ops(struct hists *hists, 649 struct hist_entry_ops *ops, 650 struct addr_location *al, 651 struct symbol *sym_parent, 652 struct branch_info *bi, 653 struct mem_info *mi, 654 struct perf_sample *sample, 655 bool sample_self) 656 { 657 return __hists__add_entry(hists, al, sym_parent, bi, mi, 658 sample, sample_self, ops); 659 } 660 661 static int 662 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 663 struct addr_location *al __maybe_unused) 664 { 665 return 0; 666 } 667 668 static int 669 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 670 struct addr_location *al __maybe_unused) 671 { 672 return 0; 673 } 674 675 static int 676 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 677 { 678 struct perf_sample *sample = iter->sample; 679 struct mem_info *mi; 680 681 mi = sample__resolve_mem(sample, al); 682 if (mi == NULL) 683 return -ENOMEM; 684 685 iter->priv = mi; 686 return 0; 687 } 688 689 static int 690 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 691 { 692 u64 cost; 693 struct mem_info *mi = iter->priv; 694 struct hists *hists = evsel__hists(iter->evsel); 695 struct perf_sample *sample = iter->sample; 696 struct hist_entry *he; 697 698 if (mi == NULL) 699 return -EINVAL; 700 701 cost = sample->weight; 702 if (!cost) 703 cost = 1; 704 705 /* 706 * must pass period=weight in order to get the correct 707 * sorting from hists__collapse_resort() which is solely 708 * based on periods. We want sorting be done on nr_events * weight 709 * and this is indirectly achieved by passing period=weight here 710 * and the he_stat__add_period() function. 711 */ 712 sample->period = cost; 713 714 he = hists__add_entry(hists, al, iter->parent, NULL, mi, 715 sample, true); 716 if (!he) 717 return -ENOMEM; 718 719 iter->he = he; 720 return 0; 721 } 722 723 static int 724 iter_finish_mem_entry(struct hist_entry_iter *iter, 725 struct addr_location *al __maybe_unused) 726 { 727 struct perf_evsel *evsel = iter->evsel; 728 struct hists *hists = evsel__hists(evsel); 729 struct hist_entry *he = iter->he; 730 int err = -EINVAL; 731 732 if (he == NULL) 733 goto out; 734 735 hists__inc_nr_samples(hists, he->filtered); 736 737 err = hist_entry__append_callchain(he, iter->sample); 738 739 out: 740 /* 741 * We don't need to free iter->priv (mem_info) here since the mem info 742 * was either already freed in hists__findnew_entry() or passed to a 743 * new hist entry by hist_entry__new(). 744 */ 745 iter->priv = NULL; 746 747 iter->he = NULL; 748 return err; 749 } 750 751 static int 752 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 753 { 754 struct branch_info *bi; 755 struct perf_sample *sample = iter->sample; 756 757 bi = sample__resolve_bstack(sample, al); 758 if (!bi) 759 return -ENOMEM; 760 761 iter->curr = 0; 762 iter->total = sample->branch_stack->nr; 763 764 iter->priv = bi; 765 return 0; 766 } 767 768 static int 769 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, 770 struct addr_location *al __maybe_unused) 771 { 772 return 0; 773 } 774 775 static int 776 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 777 { 778 struct branch_info *bi = iter->priv; 779 int i = iter->curr; 780 781 if (bi == NULL) 782 return 0; 783 784 if (iter->curr >= iter->total) 785 return 0; 786 787 al->map = bi[i].to.map; 788 al->sym = bi[i].to.sym; 789 al->addr = bi[i].to.addr; 790 return 1; 791 } 792 793 static int 794 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 795 { 796 struct branch_info *bi; 797 struct perf_evsel *evsel = iter->evsel; 798 struct hists *hists = evsel__hists(evsel); 799 struct perf_sample *sample = iter->sample; 800 struct hist_entry *he = NULL; 801 int i = iter->curr; 802 int err = 0; 803 804 bi = iter->priv; 805 806 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) 807 goto out; 808 809 /* 810 * The report shows the percentage of total branches captured 811 * and not events sampled. Thus we use a pseudo period of 1. 812 */ 813 sample->period = 1; 814 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1; 815 816 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, 817 sample, true); 818 if (he == NULL) 819 return -ENOMEM; 820 821 hists__inc_nr_samples(hists, he->filtered); 822 823 out: 824 iter->he = he; 825 iter->curr++; 826 return err; 827 } 828 829 static int 830 iter_finish_branch_entry(struct hist_entry_iter *iter, 831 struct addr_location *al __maybe_unused) 832 { 833 zfree(&iter->priv); 834 iter->he = NULL; 835 836 return iter->curr >= iter->total ? 0 : -1; 837 } 838 839 static int 840 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused, 841 struct addr_location *al __maybe_unused) 842 { 843 return 0; 844 } 845 846 static int 847 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) 848 { 849 struct perf_evsel *evsel = iter->evsel; 850 struct perf_sample *sample = iter->sample; 851 struct hist_entry *he; 852 853 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 854 sample, true); 855 if (he == NULL) 856 return -ENOMEM; 857 858 iter->he = he; 859 return 0; 860 } 861 862 static int 863 iter_finish_normal_entry(struct hist_entry_iter *iter, 864 struct addr_location *al __maybe_unused) 865 { 866 struct hist_entry *he = iter->he; 867 struct perf_evsel *evsel = iter->evsel; 868 struct perf_sample *sample = iter->sample; 869 870 if (he == NULL) 871 return 0; 872 873 iter->he = NULL; 874 875 hists__inc_nr_samples(evsel__hists(evsel), he->filtered); 876 877 return hist_entry__append_callchain(he, sample); 878 } 879 880 static int 881 iter_prepare_cumulative_entry(struct hist_entry_iter *iter, 882 struct addr_location *al __maybe_unused) 883 { 884 struct hist_entry **he_cache; 885 886 callchain_cursor_commit(&callchain_cursor); 887 888 /* 889 * This is for detecting cycles or recursions so that they're 890 * cumulated only one time to prevent entries more than 100% 891 * overhead. 892 */ 893 he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1)); 894 if (he_cache == NULL) 895 return -ENOMEM; 896 897 iter->priv = he_cache; 898 iter->curr = 0; 899 900 return 0; 901 } 902 903 static int 904 iter_add_single_cumulative_entry(struct hist_entry_iter *iter, 905 struct addr_location *al) 906 { 907 struct perf_evsel *evsel = iter->evsel; 908 struct hists *hists = evsel__hists(evsel); 909 struct perf_sample *sample = iter->sample; 910 struct hist_entry **he_cache = iter->priv; 911 struct hist_entry *he; 912 int err = 0; 913 914 he = hists__add_entry(hists, al, iter->parent, NULL, NULL, 915 sample, true); 916 if (he == NULL) 917 return -ENOMEM; 918 919 iter->he = he; 920 he_cache[iter->curr++] = he; 921 922 hist_entry__append_callchain(he, sample); 923 924 /* 925 * We need to re-initialize the cursor since callchain_append() 926 * advanced the cursor to the end. 927 */ 928 callchain_cursor_commit(&callchain_cursor); 929 930 hists__inc_nr_samples(hists, he->filtered); 931 932 return err; 933 } 934 935 static int 936 iter_next_cumulative_entry(struct hist_entry_iter *iter, 937 struct addr_location *al) 938 { 939 struct callchain_cursor_node *node; 940 941 node = callchain_cursor_current(&callchain_cursor); 942 if (node == NULL) 943 return 0; 944 945 return fill_callchain_info(al, node, iter->hide_unresolved); 946 } 947 948 static int 949 iter_add_next_cumulative_entry(struct hist_entry_iter *iter, 950 struct addr_location *al) 951 { 952 struct perf_evsel *evsel = iter->evsel; 953 struct perf_sample *sample = iter->sample; 954 struct hist_entry **he_cache = iter->priv; 955 struct hist_entry *he; 956 struct hist_entry he_tmp = { 957 .hists = evsel__hists(evsel), 958 .cpu = al->cpu, 959 .thread = al->thread, 960 .comm = thread__comm(al->thread), 961 .ip = al->addr, 962 .ms = { 963 .map = al->map, 964 .sym = al->sym, 965 }, 966 .srcline = al->srcline ? strdup(al->srcline) : NULL, 967 .parent = iter->parent, 968 .raw_data = sample->raw_data, 969 .raw_size = sample->raw_size, 970 }; 971 int i; 972 struct callchain_cursor cursor; 973 974 callchain_cursor_snapshot(&cursor, &callchain_cursor); 975 976 callchain_cursor_advance(&callchain_cursor); 977 978 /* 979 * Check if there's duplicate entries in the callchain. 980 * It's possible that it has cycles or recursive calls. 981 */ 982 for (i = 0; i < iter->curr; i++) { 983 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) { 984 /* to avoid calling callback function */ 985 iter->he = NULL; 986 return 0; 987 } 988 } 989 990 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 991 sample, false); 992 if (he == NULL) 993 return -ENOMEM; 994 995 iter->he = he; 996 he_cache[iter->curr++] = he; 997 998 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 999 callchain_append(he->callchain, &cursor, sample->period); 1000 return 0; 1001 } 1002 1003 static int 1004 iter_finish_cumulative_entry(struct hist_entry_iter *iter, 1005 struct addr_location *al __maybe_unused) 1006 { 1007 zfree(&iter->priv); 1008 iter->he = NULL; 1009 1010 return 0; 1011 } 1012 1013 const struct hist_iter_ops hist_iter_mem = { 1014 .prepare_entry = iter_prepare_mem_entry, 1015 .add_single_entry = iter_add_single_mem_entry, 1016 .next_entry = iter_next_nop_entry, 1017 .add_next_entry = iter_add_next_nop_entry, 1018 .finish_entry = iter_finish_mem_entry, 1019 }; 1020 1021 const struct hist_iter_ops hist_iter_branch = { 1022 .prepare_entry = iter_prepare_branch_entry, 1023 .add_single_entry = iter_add_single_branch_entry, 1024 .next_entry = iter_next_branch_entry, 1025 .add_next_entry = iter_add_next_branch_entry, 1026 .finish_entry = iter_finish_branch_entry, 1027 }; 1028 1029 const struct hist_iter_ops hist_iter_normal = { 1030 .prepare_entry = iter_prepare_normal_entry, 1031 .add_single_entry = iter_add_single_normal_entry, 1032 .next_entry = iter_next_nop_entry, 1033 .add_next_entry = iter_add_next_nop_entry, 1034 .finish_entry = iter_finish_normal_entry, 1035 }; 1036 1037 const struct hist_iter_ops hist_iter_cumulative = { 1038 .prepare_entry = iter_prepare_cumulative_entry, 1039 .add_single_entry = iter_add_single_cumulative_entry, 1040 .next_entry = iter_next_cumulative_entry, 1041 .add_next_entry = iter_add_next_cumulative_entry, 1042 .finish_entry = iter_finish_cumulative_entry, 1043 }; 1044 1045 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, 1046 int max_stack_depth, void *arg) 1047 { 1048 int err, err2; 1049 struct map *alm = NULL; 1050 1051 if (al) 1052 alm = map__get(al->map); 1053 1054 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, 1055 iter->evsel, al, max_stack_depth); 1056 if (err) 1057 return err; 1058 1059 err = iter->ops->prepare_entry(iter, al); 1060 if (err) 1061 goto out; 1062 1063 err = iter->ops->add_single_entry(iter, al); 1064 if (err) 1065 goto out; 1066 1067 if (iter->he && iter->add_entry_cb) { 1068 err = iter->add_entry_cb(iter, al, true, arg); 1069 if (err) 1070 goto out; 1071 } 1072 1073 while (iter->ops->next_entry(iter, al)) { 1074 err = iter->ops->add_next_entry(iter, al); 1075 if (err) 1076 break; 1077 1078 if (iter->he && iter->add_entry_cb) { 1079 err = iter->add_entry_cb(iter, al, false, arg); 1080 if (err) 1081 goto out; 1082 } 1083 } 1084 1085 out: 1086 err2 = iter->ops->finish_entry(iter, al); 1087 if (!err) 1088 err = err2; 1089 1090 map__put(alm); 1091 1092 return err; 1093 } 1094 1095 int64_t 1096 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) 1097 { 1098 struct hists *hists = left->hists; 1099 struct perf_hpp_fmt *fmt; 1100 int64_t cmp = 0; 1101 1102 hists__for_each_sort_list(hists, fmt) { 1103 if (perf_hpp__is_dynamic_entry(fmt) && 1104 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1105 continue; 1106 1107 cmp = fmt->cmp(fmt, left, right); 1108 if (cmp) 1109 break; 1110 } 1111 1112 return cmp; 1113 } 1114 1115 int64_t 1116 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) 1117 { 1118 struct hists *hists = left->hists; 1119 struct perf_hpp_fmt *fmt; 1120 int64_t cmp = 0; 1121 1122 hists__for_each_sort_list(hists, fmt) { 1123 if (perf_hpp__is_dynamic_entry(fmt) && 1124 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1125 continue; 1126 1127 cmp = fmt->collapse(fmt, left, right); 1128 if (cmp) 1129 break; 1130 } 1131 1132 return cmp; 1133 } 1134 1135 void hist_entry__delete(struct hist_entry *he) 1136 { 1137 struct hist_entry_ops *ops = he->ops; 1138 1139 thread__zput(he->thread); 1140 map__zput(he->ms.map); 1141 1142 if (he->branch_info) { 1143 map__zput(he->branch_info->from.map); 1144 map__zput(he->branch_info->to.map); 1145 free_srcline(he->branch_info->srcline_from); 1146 free_srcline(he->branch_info->srcline_to); 1147 zfree(&he->branch_info); 1148 } 1149 1150 if (he->mem_info) { 1151 map__zput(he->mem_info->iaddr.map); 1152 map__zput(he->mem_info->daddr.map); 1153 mem_info__zput(he->mem_info); 1154 } 1155 1156 zfree(&he->stat_acc); 1157 free_srcline(he->srcline); 1158 if (he->srcfile && he->srcfile[0]) 1159 free(he->srcfile); 1160 free_callchain(he->callchain); 1161 free(he->trace_output); 1162 free(he->raw_data); 1163 ops->free(he); 1164 } 1165 1166 /* 1167 * If this is not the last column, then we need to pad it according to the 1168 * pre-calculated max length for this column, otherwise don't bother adding 1169 * spaces because that would break viewing this with, for instance, 'less', 1170 * that would show tons of trailing spaces when a long C++ demangled method 1171 * names is sampled. 1172 */ 1173 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp, 1174 struct perf_hpp_fmt *fmt, int printed) 1175 { 1176 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) { 1177 const int width = fmt->width(fmt, hpp, he->hists); 1178 if (printed < width) { 1179 advance_hpp(hpp, printed); 1180 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " "); 1181 } 1182 } 1183 1184 return printed; 1185 } 1186 1187 /* 1188 * collapse the histogram 1189 */ 1190 1191 static void hists__apply_filters(struct hists *hists, struct hist_entry *he); 1192 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he, 1193 enum hist_filter type); 1194 1195 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt); 1196 1197 static bool check_thread_entry(struct perf_hpp_fmt *fmt) 1198 { 1199 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt); 1200 } 1201 1202 static void hist_entry__check_and_remove_filter(struct hist_entry *he, 1203 enum hist_filter type, 1204 fmt_chk_fn check) 1205 { 1206 struct perf_hpp_fmt *fmt; 1207 bool type_match = false; 1208 struct hist_entry *parent = he->parent_he; 1209 1210 switch (type) { 1211 case HIST_FILTER__THREAD: 1212 if (symbol_conf.comm_list == NULL && 1213 symbol_conf.pid_list == NULL && 1214 symbol_conf.tid_list == NULL) 1215 return; 1216 break; 1217 case HIST_FILTER__DSO: 1218 if (symbol_conf.dso_list == NULL) 1219 return; 1220 break; 1221 case HIST_FILTER__SYMBOL: 1222 if (symbol_conf.sym_list == NULL) 1223 return; 1224 break; 1225 case HIST_FILTER__PARENT: 1226 case HIST_FILTER__GUEST: 1227 case HIST_FILTER__HOST: 1228 case HIST_FILTER__SOCKET: 1229 case HIST_FILTER__C2C: 1230 default: 1231 return; 1232 } 1233 1234 /* if it's filtered by own fmt, it has to have filter bits */ 1235 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1236 if (check(fmt)) { 1237 type_match = true; 1238 break; 1239 } 1240 } 1241 1242 if (type_match) { 1243 /* 1244 * If the filter is for current level entry, propagate 1245 * filter marker to parents. The marker bit was 1246 * already set by default so it only needs to clear 1247 * non-filtered entries. 1248 */ 1249 if (!(he->filtered & (1 << type))) { 1250 while (parent) { 1251 parent->filtered &= ~(1 << type); 1252 parent = parent->parent_he; 1253 } 1254 } 1255 } else { 1256 /* 1257 * If current entry doesn't have matching formats, set 1258 * filter marker for upper level entries. it will be 1259 * cleared if its lower level entries is not filtered. 1260 * 1261 * For lower-level entries, it inherits parent's 1262 * filter bit so that lower level entries of a 1263 * non-filtered entry won't set the filter marker. 1264 */ 1265 if (parent == NULL) 1266 he->filtered |= (1 << type); 1267 else 1268 he->filtered |= (parent->filtered & (1 << type)); 1269 } 1270 } 1271 1272 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he) 1273 { 1274 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD, 1275 check_thread_entry); 1276 1277 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO, 1278 perf_hpp__is_dso_entry); 1279 1280 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL, 1281 perf_hpp__is_sym_entry); 1282 1283 hists__apply_filters(he->hists, he); 1284 } 1285 1286 static struct hist_entry *hierarchy_insert_entry(struct hists *hists, 1287 struct rb_root_cached *root, 1288 struct hist_entry *he, 1289 struct hist_entry *parent_he, 1290 struct perf_hpp_list *hpp_list) 1291 { 1292 struct rb_node **p = &root->rb_root.rb_node; 1293 struct rb_node *parent = NULL; 1294 struct hist_entry *iter, *new; 1295 struct perf_hpp_fmt *fmt; 1296 int64_t cmp; 1297 bool leftmost = true; 1298 1299 while (*p != NULL) { 1300 parent = *p; 1301 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1302 1303 cmp = 0; 1304 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1305 cmp = fmt->collapse(fmt, iter, he); 1306 if (cmp) 1307 break; 1308 } 1309 1310 if (!cmp) { 1311 he_stat__add_stat(&iter->stat, &he->stat); 1312 return iter; 1313 } 1314 1315 if (cmp < 0) 1316 p = &parent->rb_left; 1317 else { 1318 p = &parent->rb_right; 1319 leftmost = false; 1320 } 1321 } 1322 1323 new = hist_entry__new(he, true); 1324 if (new == NULL) 1325 return NULL; 1326 1327 hists->nr_entries++; 1328 1329 /* save related format list for output */ 1330 new->hpp_list = hpp_list; 1331 new->parent_he = parent_he; 1332 1333 hist_entry__apply_hierarchy_filters(new); 1334 1335 /* some fields are now passed to 'new' */ 1336 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1337 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 1338 he->trace_output = NULL; 1339 else 1340 new->trace_output = NULL; 1341 1342 if (perf_hpp__is_srcline_entry(fmt)) 1343 he->srcline = NULL; 1344 else 1345 new->srcline = NULL; 1346 1347 if (perf_hpp__is_srcfile_entry(fmt)) 1348 he->srcfile = NULL; 1349 else 1350 new->srcfile = NULL; 1351 } 1352 1353 rb_link_node(&new->rb_node_in, parent, p); 1354 rb_insert_color_cached(&new->rb_node_in, root, leftmost); 1355 return new; 1356 } 1357 1358 static int hists__hierarchy_insert_entry(struct hists *hists, 1359 struct rb_root_cached *root, 1360 struct hist_entry *he) 1361 { 1362 struct perf_hpp_list_node *node; 1363 struct hist_entry *new_he = NULL; 1364 struct hist_entry *parent = NULL; 1365 int depth = 0; 1366 int ret = 0; 1367 1368 list_for_each_entry(node, &hists->hpp_formats, list) { 1369 /* skip period (overhead) and elided columns */ 1370 if (node->level == 0 || node->skip) 1371 continue; 1372 1373 /* insert copy of 'he' for each fmt into the hierarchy */ 1374 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp); 1375 if (new_he == NULL) { 1376 ret = -1; 1377 break; 1378 } 1379 1380 root = &new_he->hroot_in; 1381 new_he->depth = depth++; 1382 parent = new_he; 1383 } 1384 1385 if (new_he) { 1386 new_he->leaf = true; 1387 1388 if (hist_entry__has_callchains(new_he) && 1389 symbol_conf.use_callchain) { 1390 callchain_cursor_reset(&callchain_cursor); 1391 if (callchain_merge(&callchain_cursor, 1392 new_he->callchain, 1393 he->callchain) < 0) 1394 ret = -1; 1395 } 1396 } 1397 1398 /* 'he' is no longer used */ 1399 hist_entry__delete(he); 1400 1401 /* return 0 (or -1) since it already applied filters */ 1402 return ret; 1403 } 1404 1405 static int hists__collapse_insert_entry(struct hists *hists, 1406 struct rb_root_cached *root, 1407 struct hist_entry *he) 1408 { 1409 struct rb_node **p = &root->rb_root.rb_node; 1410 struct rb_node *parent = NULL; 1411 struct hist_entry *iter; 1412 int64_t cmp; 1413 bool leftmost = true; 1414 1415 if (symbol_conf.report_hierarchy) 1416 return hists__hierarchy_insert_entry(hists, root, he); 1417 1418 while (*p != NULL) { 1419 parent = *p; 1420 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1421 1422 cmp = hist_entry__collapse(iter, he); 1423 1424 if (!cmp) { 1425 int ret = 0; 1426 1427 he_stat__add_stat(&iter->stat, &he->stat); 1428 if (symbol_conf.cumulate_callchain) 1429 he_stat__add_stat(iter->stat_acc, he->stat_acc); 1430 1431 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) { 1432 callchain_cursor_reset(&callchain_cursor); 1433 if (callchain_merge(&callchain_cursor, 1434 iter->callchain, 1435 he->callchain) < 0) 1436 ret = -1; 1437 } 1438 hist_entry__delete(he); 1439 return ret; 1440 } 1441 1442 if (cmp < 0) 1443 p = &(*p)->rb_left; 1444 else { 1445 p = &(*p)->rb_right; 1446 leftmost = false; 1447 } 1448 } 1449 hists->nr_entries++; 1450 1451 rb_link_node(&he->rb_node_in, parent, p); 1452 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 1453 return 1; 1454 } 1455 1456 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists) 1457 { 1458 struct rb_root_cached *root; 1459 1460 pthread_mutex_lock(&hists->lock); 1461 1462 root = hists->entries_in; 1463 if (++hists->entries_in > &hists->entries_in_array[1]) 1464 hists->entries_in = &hists->entries_in_array[0]; 1465 1466 pthread_mutex_unlock(&hists->lock); 1467 1468 return root; 1469 } 1470 1471 static void hists__apply_filters(struct hists *hists, struct hist_entry *he) 1472 { 1473 hists__filter_entry_by_dso(hists, he); 1474 hists__filter_entry_by_thread(hists, he); 1475 hists__filter_entry_by_symbol(hists, he); 1476 hists__filter_entry_by_socket(hists, he); 1477 } 1478 1479 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog) 1480 { 1481 struct rb_root_cached *root; 1482 struct rb_node *next; 1483 struct hist_entry *n; 1484 int ret; 1485 1486 if (!hists__has(hists, need_collapse)) 1487 return 0; 1488 1489 hists->nr_entries = 0; 1490 1491 root = hists__get_rotate_entries_in(hists); 1492 1493 next = rb_first_cached(root); 1494 1495 while (next) { 1496 if (session_done()) 1497 break; 1498 n = rb_entry(next, struct hist_entry, rb_node_in); 1499 next = rb_next(&n->rb_node_in); 1500 1501 rb_erase_cached(&n->rb_node_in, root); 1502 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n); 1503 if (ret < 0) 1504 return -1; 1505 1506 if (ret) { 1507 /* 1508 * If it wasn't combined with one of the entries already 1509 * collapsed, we need to apply the filters that may have 1510 * been set by, say, the hist_browser. 1511 */ 1512 hists__apply_filters(hists, n); 1513 } 1514 if (prog) 1515 ui_progress__update(prog, 1); 1516 } 1517 return 0; 1518 } 1519 1520 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b) 1521 { 1522 struct hists *hists = a->hists; 1523 struct perf_hpp_fmt *fmt; 1524 int64_t cmp = 0; 1525 1526 hists__for_each_sort_list(hists, fmt) { 1527 if (perf_hpp__should_skip(fmt, a->hists)) 1528 continue; 1529 1530 cmp = fmt->sort(fmt, a, b); 1531 if (cmp) 1532 break; 1533 } 1534 1535 return cmp; 1536 } 1537 1538 static void hists__reset_filter_stats(struct hists *hists) 1539 { 1540 hists->nr_non_filtered_entries = 0; 1541 hists->stats.total_non_filtered_period = 0; 1542 } 1543 1544 void hists__reset_stats(struct hists *hists) 1545 { 1546 hists->nr_entries = 0; 1547 hists->stats.total_period = 0; 1548 1549 hists__reset_filter_stats(hists); 1550 } 1551 1552 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h) 1553 { 1554 hists->nr_non_filtered_entries++; 1555 hists->stats.total_non_filtered_period += h->stat.period; 1556 } 1557 1558 void hists__inc_stats(struct hists *hists, struct hist_entry *h) 1559 { 1560 if (!h->filtered) 1561 hists__inc_filter_stats(hists, h); 1562 1563 hists->nr_entries++; 1564 hists->stats.total_period += h->stat.period; 1565 } 1566 1567 static void hierarchy_recalc_total_periods(struct hists *hists) 1568 { 1569 struct rb_node *node; 1570 struct hist_entry *he; 1571 1572 node = rb_first_cached(&hists->entries); 1573 1574 hists->stats.total_period = 0; 1575 hists->stats.total_non_filtered_period = 0; 1576 1577 /* 1578 * recalculate total period using top-level entries only 1579 * since lower level entries only see non-filtered entries 1580 * but upper level entries have sum of both entries. 1581 */ 1582 while (node) { 1583 he = rb_entry(node, struct hist_entry, rb_node); 1584 node = rb_next(node); 1585 1586 hists->stats.total_period += he->stat.period; 1587 if (!he->filtered) 1588 hists->stats.total_non_filtered_period += he->stat.period; 1589 } 1590 } 1591 1592 static void hierarchy_insert_output_entry(struct rb_root_cached *root, 1593 struct hist_entry *he) 1594 { 1595 struct rb_node **p = &root->rb_root.rb_node; 1596 struct rb_node *parent = NULL; 1597 struct hist_entry *iter; 1598 struct perf_hpp_fmt *fmt; 1599 bool leftmost = true; 1600 1601 while (*p != NULL) { 1602 parent = *p; 1603 iter = rb_entry(parent, struct hist_entry, rb_node); 1604 1605 if (hist_entry__sort(he, iter) > 0) 1606 p = &parent->rb_left; 1607 else { 1608 p = &parent->rb_right; 1609 leftmost = false; 1610 } 1611 } 1612 1613 rb_link_node(&he->rb_node, parent, p); 1614 rb_insert_color_cached(&he->rb_node, root, leftmost); 1615 1616 /* update column width of dynamic entry */ 1617 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 1618 if (perf_hpp__is_dynamic_entry(fmt)) 1619 fmt->sort(fmt, he, NULL); 1620 } 1621 } 1622 1623 static void hists__hierarchy_output_resort(struct hists *hists, 1624 struct ui_progress *prog, 1625 struct rb_root_cached *root_in, 1626 struct rb_root_cached *root_out, 1627 u64 min_callchain_hits, 1628 bool use_callchain) 1629 { 1630 struct rb_node *node; 1631 struct hist_entry *he; 1632 1633 *root_out = RB_ROOT_CACHED; 1634 node = rb_first_cached(root_in); 1635 1636 while (node) { 1637 he = rb_entry(node, struct hist_entry, rb_node_in); 1638 node = rb_next(node); 1639 1640 hierarchy_insert_output_entry(root_out, he); 1641 1642 if (prog) 1643 ui_progress__update(prog, 1); 1644 1645 hists->nr_entries++; 1646 if (!he->filtered) { 1647 hists->nr_non_filtered_entries++; 1648 hists__calc_col_len(hists, he); 1649 } 1650 1651 if (!he->leaf) { 1652 hists__hierarchy_output_resort(hists, prog, 1653 &he->hroot_in, 1654 &he->hroot_out, 1655 min_callchain_hits, 1656 use_callchain); 1657 continue; 1658 } 1659 1660 if (!use_callchain) 1661 continue; 1662 1663 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1664 u64 total = he->stat.period; 1665 1666 if (symbol_conf.cumulate_callchain) 1667 total = he->stat_acc->period; 1668 1669 min_callchain_hits = total * (callchain_param.min_percent / 100); 1670 } 1671 1672 callchain_param.sort(&he->sorted_chain, he->callchain, 1673 min_callchain_hits, &callchain_param); 1674 } 1675 } 1676 1677 static void __hists__insert_output_entry(struct rb_root_cached *entries, 1678 struct hist_entry *he, 1679 u64 min_callchain_hits, 1680 bool use_callchain) 1681 { 1682 struct rb_node **p = &entries->rb_root.rb_node; 1683 struct rb_node *parent = NULL; 1684 struct hist_entry *iter; 1685 struct perf_hpp_fmt *fmt; 1686 bool leftmost = true; 1687 1688 if (use_callchain) { 1689 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1690 u64 total = he->stat.period; 1691 1692 if (symbol_conf.cumulate_callchain) 1693 total = he->stat_acc->period; 1694 1695 min_callchain_hits = total * (callchain_param.min_percent / 100); 1696 } 1697 callchain_param.sort(&he->sorted_chain, he->callchain, 1698 min_callchain_hits, &callchain_param); 1699 } 1700 1701 while (*p != NULL) { 1702 parent = *p; 1703 iter = rb_entry(parent, struct hist_entry, rb_node); 1704 1705 if (hist_entry__sort(he, iter) > 0) 1706 p = &(*p)->rb_left; 1707 else { 1708 p = &(*p)->rb_right; 1709 leftmost = false; 1710 } 1711 } 1712 1713 rb_link_node(&he->rb_node, parent, p); 1714 rb_insert_color_cached(&he->rb_node, entries, leftmost); 1715 1716 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) { 1717 if (perf_hpp__is_dynamic_entry(fmt) && 1718 perf_hpp__defined_dynamic_entry(fmt, he->hists)) 1719 fmt->sort(fmt, he, NULL); /* update column width */ 1720 } 1721 } 1722 1723 static void output_resort(struct hists *hists, struct ui_progress *prog, 1724 bool use_callchain, hists__resort_cb_t cb, 1725 void *cb_arg) 1726 { 1727 struct rb_root_cached *root; 1728 struct rb_node *next; 1729 struct hist_entry *n; 1730 u64 callchain_total; 1731 u64 min_callchain_hits; 1732 1733 callchain_total = hists->callchain_period; 1734 if (symbol_conf.filter_relative) 1735 callchain_total = hists->callchain_non_filtered_period; 1736 1737 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100); 1738 1739 hists__reset_stats(hists); 1740 hists__reset_col_len(hists); 1741 1742 if (symbol_conf.report_hierarchy) { 1743 hists__hierarchy_output_resort(hists, prog, 1744 &hists->entries_collapsed, 1745 &hists->entries, 1746 min_callchain_hits, 1747 use_callchain); 1748 hierarchy_recalc_total_periods(hists); 1749 return; 1750 } 1751 1752 if (hists__has(hists, need_collapse)) 1753 root = &hists->entries_collapsed; 1754 else 1755 root = hists->entries_in; 1756 1757 next = rb_first_cached(root); 1758 hists->entries = RB_ROOT_CACHED; 1759 1760 while (next) { 1761 n = rb_entry(next, struct hist_entry, rb_node_in); 1762 next = rb_next(&n->rb_node_in); 1763 1764 if (cb && cb(n, cb_arg)) 1765 continue; 1766 1767 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain); 1768 hists__inc_stats(hists, n); 1769 1770 if (!n->filtered) 1771 hists__calc_col_len(hists, n); 1772 1773 if (prog) 1774 ui_progress__update(prog, 1); 1775 } 1776 } 1777 1778 void perf_evsel__output_resort_cb(struct perf_evsel *evsel, struct ui_progress *prog, 1779 hists__resort_cb_t cb, void *cb_arg) 1780 { 1781 bool use_callchain; 1782 1783 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph) 1784 use_callchain = evsel__has_callchain(evsel); 1785 else 1786 use_callchain = symbol_conf.use_callchain; 1787 1788 use_callchain |= symbol_conf.show_branchflag_count; 1789 1790 output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg); 1791 } 1792 1793 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog) 1794 { 1795 return perf_evsel__output_resort_cb(evsel, prog, NULL, NULL); 1796 } 1797 1798 void hists__output_resort(struct hists *hists, struct ui_progress *prog) 1799 { 1800 output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL); 1801 } 1802 1803 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog, 1804 hists__resort_cb_t cb) 1805 { 1806 output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL); 1807 } 1808 1809 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd) 1810 { 1811 if (he->leaf || hmd == HMD_FORCE_SIBLING) 1812 return false; 1813 1814 if (he->unfolded || hmd == HMD_FORCE_CHILD) 1815 return true; 1816 1817 return false; 1818 } 1819 1820 struct rb_node *rb_hierarchy_last(struct rb_node *node) 1821 { 1822 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1823 1824 while (can_goto_child(he, HMD_NORMAL)) { 1825 node = rb_last(&he->hroot_out.rb_root); 1826 he = rb_entry(node, struct hist_entry, rb_node); 1827 } 1828 return node; 1829 } 1830 1831 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd) 1832 { 1833 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1834 1835 if (can_goto_child(he, hmd)) 1836 node = rb_first_cached(&he->hroot_out); 1837 else 1838 node = rb_next(node); 1839 1840 while (node == NULL) { 1841 he = he->parent_he; 1842 if (he == NULL) 1843 break; 1844 1845 node = rb_next(&he->rb_node); 1846 } 1847 return node; 1848 } 1849 1850 struct rb_node *rb_hierarchy_prev(struct rb_node *node) 1851 { 1852 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1853 1854 node = rb_prev(node); 1855 if (node) 1856 return rb_hierarchy_last(node); 1857 1858 he = he->parent_he; 1859 if (he == NULL) 1860 return NULL; 1861 1862 return &he->rb_node; 1863 } 1864 1865 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit) 1866 { 1867 struct rb_node *node; 1868 struct hist_entry *child; 1869 float percent; 1870 1871 if (he->leaf) 1872 return false; 1873 1874 node = rb_first_cached(&he->hroot_out); 1875 child = rb_entry(node, struct hist_entry, rb_node); 1876 1877 while (node && child->filtered) { 1878 node = rb_next(node); 1879 child = rb_entry(node, struct hist_entry, rb_node); 1880 } 1881 1882 if (node) 1883 percent = hist_entry__get_percent_limit(child); 1884 else 1885 percent = 0; 1886 1887 return node && percent >= limit; 1888 } 1889 1890 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, 1891 enum hist_filter filter) 1892 { 1893 h->filtered &= ~(1 << filter); 1894 1895 if (symbol_conf.report_hierarchy) { 1896 struct hist_entry *parent = h->parent_he; 1897 1898 while (parent) { 1899 he_stat__add_stat(&parent->stat, &h->stat); 1900 1901 parent->filtered &= ~(1 << filter); 1902 1903 if (parent->filtered) 1904 goto next; 1905 1906 /* force fold unfiltered entry for simplicity */ 1907 parent->unfolded = false; 1908 parent->has_no_entry = false; 1909 parent->row_offset = 0; 1910 parent->nr_rows = 0; 1911 next: 1912 parent = parent->parent_he; 1913 } 1914 } 1915 1916 if (h->filtered) 1917 return; 1918 1919 /* force fold unfiltered entry for simplicity */ 1920 h->unfolded = false; 1921 h->has_no_entry = false; 1922 h->row_offset = 0; 1923 h->nr_rows = 0; 1924 1925 hists->stats.nr_non_filtered_samples += h->stat.nr_events; 1926 1927 hists__inc_filter_stats(hists, h); 1928 hists__calc_col_len(hists, h); 1929 } 1930 1931 1932 static bool hists__filter_entry_by_dso(struct hists *hists, 1933 struct hist_entry *he) 1934 { 1935 if (hists->dso_filter != NULL && 1936 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { 1937 he->filtered |= (1 << HIST_FILTER__DSO); 1938 return true; 1939 } 1940 1941 return false; 1942 } 1943 1944 static bool hists__filter_entry_by_thread(struct hists *hists, 1945 struct hist_entry *he) 1946 { 1947 if (hists->thread_filter != NULL && 1948 he->thread != hists->thread_filter) { 1949 he->filtered |= (1 << HIST_FILTER__THREAD); 1950 return true; 1951 } 1952 1953 return false; 1954 } 1955 1956 static bool hists__filter_entry_by_symbol(struct hists *hists, 1957 struct hist_entry *he) 1958 { 1959 if (hists->symbol_filter_str != NULL && 1960 (!he->ms.sym || strstr(he->ms.sym->name, 1961 hists->symbol_filter_str) == NULL)) { 1962 he->filtered |= (1 << HIST_FILTER__SYMBOL); 1963 return true; 1964 } 1965 1966 return false; 1967 } 1968 1969 static bool hists__filter_entry_by_socket(struct hists *hists, 1970 struct hist_entry *he) 1971 { 1972 if ((hists->socket_filter > -1) && 1973 (he->socket != hists->socket_filter)) { 1974 he->filtered |= (1 << HIST_FILTER__SOCKET); 1975 return true; 1976 } 1977 1978 return false; 1979 } 1980 1981 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he); 1982 1983 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter) 1984 { 1985 struct rb_node *nd; 1986 1987 hists->stats.nr_non_filtered_samples = 0; 1988 1989 hists__reset_filter_stats(hists); 1990 hists__reset_col_len(hists); 1991 1992 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) { 1993 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 1994 1995 if (filter(hists, h)) 1996 continue; 1997 1998 hists__remove_entry_filter(hists, h, type); 1999 } 2000 } 2001 2002 static void resort_filtered_entry(struct rb_root_cached *root, 2003 struct hist_entry *he) 2004 { 2005 struct rb_node **p = &root->rb_root.rb_node; 2006 struct rb_node *parent = NULL; 2007 struct hist_entry *iter; 2008 struct rb_root_cached new_root = RB_ROOT_CACHED; 2009 struct rb_node *nd; 2010 bool leftmost = true; 2011 2012 while (*p != NULL) { 2013 parent = *p; 2014 iter = rb_entry(parent, struct hist_entry, rb_node); 2015 2016 if (hist_entry__sort(he, iter) > 0) 2017 p = &(*p)->rb_left; 2018 else { 2019 p = &(*p)->rb_right; 2020 leftmost = false; 2021 } 2022 } 2023 2024 rb_link_node(&he->rb_node, parent, p); 2025 rb_insert_color_cached(&he->rb_node, root, leftmost); 2026 2027 if (he->leaf || he->filtered) 2028 return; 2029 2030 nd = rb_first_cached(&he->hroot_out); 2031 while (nd) { 2032 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2033 2034 nd = rb_next(nd); 2035 rb_erase_cached(&h->rb_node, &he->hroot_out); 2036 2037 resort_filtered_entry(&new_root, h); 2038 } 2039 2040 he->hroot_out = new_root; 2041 } 2042 2043 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg) 2044 { 2045 struct rb_node *nd; 2046 struct rb_root_cached new_root = RB_ROOT_CACHED; 2047 2048 hists->stats.nr_non_filtered_samples = 0; 2049 2050 hists__reset_filter_stats(hists); 2051 hists__reset_col_len(hists); 2052 2053 nd = rb_first_cached(&hists->entries); 2054 while (nd) { 2055 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2056 int ret; 2057 2058 ret = hist_entry__filter(h, type, arg); 2059 2060 /* 2061 * case 1. non-matching type 2062 * zero out the period, set filter marker and move to child 2063 */ 2064 if (ret < 0) { 2065 memset(&h->stat, 0, sizeof(h->stat)); 2066 h->filtered |= (1 << type); 2067 2068 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD); 2069 } 2070 /* 2071 * case 2. matched type (filter out) 2072 * set filter marker and move to next 2073 */ 2074 else if (ret == 1) { 2075 h->filtered |= (1 << type); 2076 2077 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2078 } 2079 /* 2080 * case 3. ok (not filtered) 2081 * add period to hists and parents, erase the filter marker 2082 * and move to next sibling 2083 */ 2084 else { 2085 hists__remove_entry_filter(hists, h, type); 2086 2087 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2088 } 2089 } 2090 2091 hierarchy_recalc_total_periods(hists); 2092 2093 /* 2094 * resort output after applying a new filter since filter in a lower 2095 * hierarchy can change periods in a upper hierarchy. 2096 */ 2097 nd = rb_first_cached(&hists->entries); 2098 while (nd) { 2099 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2100 2101 nd = rb_next(nd); 2102 rb_erase_cached(&h->rb_node, &hists->entries); 2103 2104 resort_filtered_entry(&new_root, h); 2105 } 2106 2107 hists->entries = new_root; 2108 } 2109 2110 void hists__filter_by_thread(struct hists *hists) 2111 { 2112 if (symbol_conf.report_hierarchy) 2113 hists__filter_hierarchy(hists, HIST_FILTER__THREAD, 2114 hists->thread_filter); 2115 else 2116 hists__filter_by_type(hists, HIST_FILTER__THREAD, 2117 hists__filter_entry_by_thread); 2118 } 2119 2120 void hists__filter_by_dso(struct hists *hists) 2121 { 2122 if (symbol_conf.report_hierarchy) 2123 hists__filter_hierarchy(hists, HIST_FILTER__DSO, 2124 hists->dso_filter); 2125 else 2126 hists__filter_by_type(hists, HIST_FILTER__DSO, 2127 hists__filter_entry_by_dso); 2128 } 2129 2130 void hists__filter_by_symbol(struct hists *hists) 2131 { 2132 if (symbol_conf.report_hierarchy) 2133 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL, 2134 hists->symbol_filter_str); 2135 else 2136 hists__filter_by_type(hists, HIST_FILTER__SYMBOL, 2137 hists__filter_entry_by_symbol); 2138 } 2139 2140 void hists__filter_by_socket(struct hists *hists) 2141 { 2142 if (symbol_conf.report_hierarchy) 2143 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET, 2144 &hists->socket_filter); 2145 else 2146 hists__filter_by_type(hists, HIST_FILTER__SOCKET, 2147 hists__filter_entry_by_socket); 2148 } 2149 2150 void events_stats__inc(struct events_stats *stats, u32 type) 2151 { 2152 ++stats->nr_events[0]; 2153 ++stats->nr_events[type]; 2154 } 2155 2156 void hists__inc_nr_events(struct hists *hists, u32 type) 2157 { 2158 events_stats__inc(&hists->stats, type); 2159 } 2160 2161 void hists__inc_nr_samples(struct hists *hists, bool filtered) 2162 { 2163 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE); 2164 if (!filtered) 2165 hists->stats.nr_non_filtered_samples++; 2166 } 2167 2168 static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 2169 struct hist_entry *pair) 2170 { 2171 struct rb_root_cached *root; 2172 struct rb_node **p; 2173 struct rb_node *parent = NULL; 2174 struct hist_entry *he; 2175 int64_t cmp; 2176 bool leftmost = true; 2177 2178 if (hists__has(hists, need_collapse)) 2179 root = &hists->entries_collapsed; 2180 else 2181 root = hists->entries_in; 2182 2183 p = &root->rb_root.rb_node; 2184 2185 while (*p != NULL) { 2186 parent = *p; 2187 he = rb_entry(parent, struct hist_entry, rb_node_in); 2188 2189 cmp = hist_entry__collapse(he, pair); 2190 2191 if (!cmp) 2192 goto out; 2193 2194 if (cmp < 0) 2195 p = &(*p)->rb_left; 2196 else { 2197 p = &(*p)->rb_right; 2198 leftmost = false; 2199 } 2200 } 2201 2202 he = hist_entry__new(pair, true); 2203 if (he) { 2204 memset(&he->stat, 0, sizeof(he->stat)); 2205 he->hists = hists; 2206 if (symbol_conf.cumulate_callchain) 2207 memset(he->stat_acc, 0, sizeof(he->stat)); 2208 rb_link_node(&he->rb_node_in, parent, p); 2209 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2210 hists__inc_stats(hists, he); 2211 he->dummy = true; 2212 } 2213 out: 2214 return he; 2215 } 2216 2217 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists, 2218 struct rb_root_cached *root, 2219 struct hist_entry *pair) 2220 { 2221 struct rb_node **p; 2222 struct rb_node *parent = NULL; 2223 struct hist_entry *he; 2224 struct perf_hpp_fmt *fmt; 2225 bool leftmost = true; 2226 2227 p = &root->rb_root.rb_node; 2228 while (*p != NULL) { 2229 int64_t cmp = 0; 2230 2231 parent = *p; 2232 he = rb_entry(parent, struct hist_entry, rb_node_in); 2233 2234 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2235 cmp = fmt->collapse(fmt, he, pair); 2236 if (cmp) 2237 break; 2238 } 2239 if (!cmp) 2240 goto out; 2241 2242 if (cmp < 0) 2243 p = &parent->rb_left; 2244 else { 2245 p = &parent->rb_right; 2246 leftmost = false; 2247 } 2248 } 2249 2250 he = hist_entry__new(pair, true); 2251 if (he) { 2252 rb_link_node(&he->rb_node_in, parent, p); 2253 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2254 2255 he->dummy = true; 2256 he->hists = hists; 2257 memset(&he->stat, 0, sizeof(he->stat)); 2258 hists__inc_stats(hists, he); 2259 } 2260 out: 2261 return he; 2262 } 2263 2264 static struct hist_entry *hists__find_entry(struct hists *hists, 2265 struct hist_entry *he) 2266 { 2267 struct rb_node *n; 2268 2269 if (hists__has(hists, need_collapse)) 2270 n = hists->entries_collapsed.rb_root.rb_node; 2271 else 2272 n = hists->entries_in->rb_root.rb_node; 2273 2274 while (n) { 2275 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); 2276 int64_t cmp = hist_entry__collapse(iter, he); 2277 2278 if (cmp < 0) 2279 n = n->rb_left; 2280 else if (cmp > 0) 2281 n = n->rb_right; 2282 else 2283 return iter; 2284 } 2285 2286 return NULL; 2287 } 2288 2289 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root, 2290 struct hist_entry *he) 2291 { 2292 struct rb_node *n = root->rb_root.rb_node; 2293 2294 while (n) { 2295 struct hist_entry *iter; 2296 struct perf_hpp_fmt *fmt; 2297 int64_t cmp = 0; 2298 2299 iter = rb_entry(n, struct hist_entry, rb_node_in); 2300 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2301 cmp = fmt->collapse(fmt, iter, he); 2302 if (cmp) 2303 break; 2304 } 2305 2306 if (cmp < 0) 2307 n = n->rb_left; 2308 else if (cmp > 0) 2309 n = n->rb_right; 2310 else 2311 return iter; 2312 } 2313 2314 return NULL; 2315 } 2316 2317 static void hists__match_hierarchy(struct rb_root_cached *leader_root, 2318 struct rb_root_cached *other_root) 2319 { 2320 struct rb_node *nd; 2321 struct hist_entry *pos, *pair; 2322 2323 for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) { 2324 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2325 pair = hists__find_hierarchy_entry(other_root, pos); 2326 2327 if (pair) { 2328 hist_entry__add_pair(pair, pos); 2329 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in); 2330 } 2331 } 2332 } 2333 2334 /* 2335 * Look for pairs to link to the leader buckets (hist_entries): 2336 */ 2337 void hists__match(struct hists *leader, struct hists *other) 2338 { 2339 struct rb_root_cached *root; 2340 struct rb_node *nd; 2341 struct hist_entry *pos, *pair; 2342 2343 if (symbol_conf.report_hierarchy) { 2344 /* hierarchy report always collapses entries */ 2345 return hists__match_hierarchy(&leader->entries_collapsed, 2346 &other->entries_collapsed); 2347 } 2348 2349 if (hists__has(leader, need_collapse)) 2350 root = &leader->entries_collapsed; 2351 else 2352 root = leader->entries_in; 2353 2354 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2355 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2356 pair = hists__find_entry(other, pos); 2357 2358 if (pair) 2359 hist_entry__add_pair(pair, pos); 2360 } 2361 } 2362 2363 static int hists__link_hierarchy(struct hists *leader_hists, 2364 struct hist_entry *parent, 2365 struct rb_root_cached *leader_root, 2366 struct rb_root_cached *other_root) 2367 { 2368 struct rb_node *nd; 2369 struct hist_entry *pos, *leader; 2370 2371 for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) { 2372 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2373 2374 if (hist_entry__has_pairs(pos)) { 2375 bool found = false; 2376 2377 list_for_each_entry(leader, &pos->pairs.head, pairs.node) { 2378 if (leader->hists == leader_hists) { 2379 found = true; 2380 break; 2381 } 2382 } 2383 if (!found) 2384 return -1; 2385 } else { 2386 leader = add_dummy_hierarchy_entry(leader_hists, 2387 leader_root, pos); 2388 if (leader == NULL) 2389 return -1; 2390 2391 /* do not point parent in the pos */ 2392 leader->parent_he = parent; 2393 2394 hist_entry__add_pair(pos, leader); 2395 } 2396 2397 if (!pos->leaf) { 2398 if (hists__link_hierarchy(leader_hists, leader, 2399 &leader->hroot_in, 2400 &pos->hroot_in) < 0) 2401 return -1; 2402 } 2403 } 2404 return 0; 2405 } 2406 2407 /* 2408 * Look for entries in the other hists that are not present in the leader, if 2409 * we find them, just add a dummy entry on the leader hists, with period=0, 2410 * nr_events=0, to serve as the list header. 2411 */ 2412 int hists__link(struct hists *leader, struct hists *other) 2413 { 2414 struct rb_root_cached *root; 2415 struct rb_node *nd; 2416 struct hist_entry *pos, *pair; 2417 2418 if (symbol_conf.report_hierarchy) { 2419 /* hierarchy report always collapses entries */ 2420 return hists__link_hierarchy(leader, NULL, 2421 &leader->entries_collapsed, 2422 &other->entries_collapsed); 2423 } 2424 2425 if (hists__has(other, need_collapse)) 2426 root = &other->entries_collapsed; 2427 else 2428 root = other->entries_in; 2429 2430 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2431 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2432 2433 if (!hist_entry__has_pairs(pos)) { 2434 pair = hists__add_dummy_entry(leader, pos); 2435 if (pair == NULL) 2436 return -1; 2437 hist_entry__add_pair(pos, pair); 2438 } 2439 } 2440 2441 return 0; 2442 } 2443 2444 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, 2445 struct perf_sample *sample, bool nonany_branch_mode) 2446 { 2447 struct branch_info *bi; 2448 2449 /* If we have branch cycles always annotate them. */ 2450 if (bs && bs->nr && bs->entries[0].flags.cycles) { 2451 int i; 2452 2453 bi = sample__resolve_bstack(sample, al); 2454 if (bi) { 2455 struct addr_map_symbol *prev = NULL; 2456 2457 /* 2458 * Ignore errors, still want to process the 2459 * other entries. 2460 * 2461 * For non standard branch modes always 2462 * force no IPC (prev == NULL) 2463 * 2464 * Note that perf stores branches reversed from 2465 * program order! 2466 */ 2467 for (i = bs->nr - 1; i >= 0; i--) { 2468 addr_map_symbol__account_cycles(&bi[i].from, 2469 nonany_branch_mode ? NULL : prev, 2470 bi[i].flags.cycles); 2471 prev = &bi[i].to; 2472 } 2473 free(bi); 2474 } 2475 } 2476 } 2477 2478 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp) 2479 { 2480 struct perf_evsel *pos; 2481 size_t ret = 0; 2482 2483 evlist__for_each_entry(evlist, pos) { 2484 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 2485 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp); 2486 } 2487 2488 return ret; 2489 } 2490 2491 2492 u64 hists__total_period(struct hists *hists) 2493 { 2494 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period : 2495 hists->stats.total_period; 2496 } 2497 2498 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq) 2499 { 2500 char unit; 2501 int printed; 2502 const struct dso *dso = hists->dso_filter; 2503 const struct thread *thread = hists->thread_filter; 2504 int socket_id = hists->socket_filter; 2505 unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE]; 2506 u64 nr_events = hists->stats.total_period; 2507 struct perf_evsel *evsel = hists_to_evsel(hists); 2508 const char *ev_name = perf_evsel__name(evsel); 2509 char buf[512], sample_freq_str[64] = ""; 2510 size_t buflen = sizeof(buf); 2511 char ref[30] = " show reference callgraph, "; 2512 bool enable_ref = false; 2513 2514 if (symbol_conf.filter_relative) { 2515 nr_samples = hists->stats.nr_non_filtered_samples; 2516 nr_events = hists->stats.total_non_filtered_period; 2517 } 2518 2519 if (perf_evsel__is_group_event(evsel)) { 2520 struct perf_evsel *pos; 2521 2522 perf_evsel__group_desc(evsel, buf, buflen); 2523 ev_name = buf; 2524 2525 for_each_group_member(pos, evsel) { 2526 struct hists *pos_hists = evsel__hists(pos); 2527 2528 if (symbol_conf.filter_relative) { 2529 nr_samples += pos_hists->stats.nr_non_filtered_samples; 2530 nr_events += pos_hists->stats.total_non_filtered_period; 2531 } else { 2532 nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE]; 2533 nr_events += pos_hists->stats.total_period; 2534 } 2535 } 2536 } 2537 2538 if (symbol_conf.show_ref_callgraph && 2539 strstr(ev_name, "call-graph=no")) 2540 enable_ref = true; 2541 2542 if (show_freq) 2543 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->attr.sample_freq); 2544 2545 nr_samples = convert_unit(nr_samples, &unit); 2546 printed = scnprintf(bf, size, 2547 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64, 2548 nr_samples, unit, evsel->nr_members > 1 ? "s" : "", 2549 ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events); 2550 2551 2552 if (hists->uid_filter_str) 2553 printed += snprintf(bf + printed, size - printed, 2554 ", UID: %s", hists->uid_filter_str); 2555 if (thread) { 2556 if (hists__has(hists, thread)) { 2557 printed += scnprintf(bf + printed, size - printed, 2558 ", Thread: %s(%d)", 2559 (thread->comm_set ? thread__comm_str(thread) : ""), 2560 thread->tid); 2561 } else { 2562 printed += scnprintf(bf + printed, size - printed, 2563 ", Thread: %s", 2564 (thread->comm_set ? thread__comm_str(thread) : "")); 2565 } 2566 } 2567 if (dso) 2568 printed += scnprintf(bf + printed, size - printed, 2569 ", DSO: %s", dso->short_name); 2570 if (socket_id > -1) 2571 printed += scnprintf(bf + printed, size - printed, 2572 ", Processor Socket: %d", socket_id); 2573 2574 return printed; 2575 } 2576 2577 int parse_filter_percentage(const struct option *opt __maybe_unused, 2578 const char *arg, int unset __maybe_unused) 2579 { 2580 if (!strcmp(arg, "relative")) 2581 symbol_conf.filter_relative = true; 2582 else if (!strcmp(arg, "absolute")) 2583 symbol_conf.filter_relative = false; 2584 else { 2585 pr_debug("Invalid percentage: %s\n", arg); 2586 return -1; 2587 } 2588 2589 return 0; 2590 } 2591 2592 int perf_hist_config(const char *var, const char *value) 2593 { 2594 if (!strcmp(var, "hist.percentage")) 2595 return parse_filter_percentage(NULL, value, 0); 2596 2597 return 0; 2598 } 2599 2600 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list) 2601 { 2602 memset(hists, 0, sizeof(*hists)); 2603 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED; 2604 hists->entries_in = &hists->entries_in_array[0]; 2605 hists->entries_collapsed = RB_ROOT_CACHED; 2606 hists->entries = RB_ROOT_CACHED; 2607 pthread_mutex_init(&hists->lock, NULL); 2608 hists->socket_filter = -1; 2609 hists->hpp_list = hpp_list; 2610 INIT_LIST_HEAD(&hists->hpp_formats); 2611 return 0; 2612 } 2613 2614 static void hists__delete_remaining_entries(struct rb_root_cached *root) 2615 { 2616 struct rb_node *node; 2617 struct hist_entry *he; 2618 2619 while (!RB_EMPTY_ROOT(&root->rb_root)) { 2620 node = rb_first_cached(root); 2621 rb_erase_cached(node, root); 2622 2623 he = rb_entry(node, struct hist_entry, rb_node_in); 2624 hist_entry__delete(he); 2625 } 2626 } 2627 2628 static void hists__delete_all_entries(struct hists *hists) 2629 { 2630 hists__delete_entries(hists); 2631 hists__delete_remaining_entries(&hists->entries_in_array[0]); 2632 hists__delete_remaining_entries(&hists->entries_in_array[1]); 2633 hists__delete_remaining_entries(&hists->entries_collapsed); 2634 } 2635 2636 static void hists_evsel__exit(struct perf_evsel *evsel) 2637 { 2638 struct hists *hists = evsel__hists(evsel); 2639 struct perf_hpp_fmt *fmt, *pos; 2640 struct perf_hpp_list_node *node, *tmp; 2641 2642 hists__delete_all_entries(hists); 2643 2644 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) { 2645 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) { 2646 list_del(&fmt->list); 2647 free(fmt); 2648 } 2649 list_del(&node->list); 2650 free(node); 2651 } 2652 } 2653 2654 static int hists_evsel__init(struct perf_evsel *evsel) 2655 { 2656 struct hists *hists = evsel__hists(evsel); 2657 2658 __hists__init(hists, &perf_hpp_list); 2659 return 0; 2660 } 2661 2662 /* 2663 * XXX We probably need a hists_evsel__exit() to free the hist_entries 2664 * stored in the rbtree... 2665 */ 2666 2667 int hists__init(void) 2668 { 2669 int err = perf_evsel__object_config(sizeof(struct hists_evsel), 2670 hists_evsel__init, 2671 hists_evsel__exit); 2672 if (err) 2673 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr); 2674 2675 return err; 2676 } 2677 2678 void perf_hpp_list__init(struct perf_hpp_list *list) 2679 { 2680 INIT_LIST_HEAD(&list->fields); 2681 INIT_LIST_HEAD(&list->sorts); 2682 } 2683