1 // SPDX-License-Identifier: GPL-2.0 2 #include "callchain.h" 3 #include "debug.h" 4 #include "dso.h" 5 #include "build-id.h" 6 #include "hist.h" 7 #include "map.h" 8 #include "session.h" 9 #include "namespaces.h" 10 #include "sort.h" 11 #include "units.h" 12 #include "evlist.h" 13 #include "evsel.h" 14 #include "annotate.h" 15 #include "srcline.h" 16 #include "symbol.h" 17 #include "thread.h" 18 #include "ui/progress.h" 19 #include <errno.h> 20 #include <math.h> 21 #include <inttypes.h> 22 #include <sys/param.h> 23 #include <linux/rbtree.h> 24 #include <linux/string.h> 25 #include <linux/time64.h> 26 #include <linux/zalloc.h> 27 28 static bool hists__filter_entry_by_dso(struct hists *hists, 29 struct hist_entry *he); 30 static bool hists__filter_entry_by_thread(struct hists *hists, 31 struct hist_entry *he); 32 static bool hists__filter_entry_by_symbol(struct hists *hists, 33 struct hist_entry *he); 34 static bool hists__filter_entry_by_socket(struct hists *hists, 35 struct hist_entry *he); 36 37 u16 hists__col_len(struct hists *hists, enum hist_column col) 38 { 39 return hists->col_len[col]; 40 } 41 42 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) 43 { 44 hists->col_len[col] = len; 45 } 46 47 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) 48 { 49 if (len > hists__col_len(hists, col)) { 50 hists__set_col_len(hists, col, len); 51 return true; 52 } 53 return false; 54 } 55 56 void hists__reset_col_len(struct hists *hists) 57 { 58 enum hist_column col; 59 60 for (col = 0; col < HISTC_NR_COLS; ++col) 61 hists__set_col_len(hists, col, 0); 62 } 63 64 static void hists__set_unres_dso_col_len(struct hists *hists, int dso) 65 { 66 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 67 68 if (hists__col_len(hists, dso) < unresolved_col_width && 69 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 70 !symbol_conf.dso_list) 71 hists__set_col_len(hists, dso, unresolved_col_width); 72 } 73 74 void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 75 { 76 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 77 int symlen; 78 u16 len; 79 80 /* 81 * +4 accounts for '[x] ' priv level info 82 * +2 accounts for 0x prefix on raw addresses 83 * +3 accounts for ' y ' symtab origin info 84 */ 85 if (h->ms.sym) { 86 symlen = h->ms.sym->namelen + 4; 87 if (verbose > 0) 88 symlen += BITS_PER_LONG / 4 + 2 + 3; 89 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 90 } else { 91 symlen = unresolved_col_width + 4 + 2; 92 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 93 hists__set_unres_dso_col_len(hists, HISTC_DSO); 94 } 95 96 len = thread__comm_len(h->thread); 97 if (hists__new_col_len(hists, HISTC_COMM, len)) 98 hists__set_col_len(hists, HISTC_THREAD, len + 8); 99 100 if (h->ms.map) { 101 len = dso__name_len(h->ms.map->dso); 102 hists__new_col_len(hists, HISTC_DSO, len); 103 } 104 105 if (h->parent) 106 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); 107 108 if (h->branch_info) { 109 if (h->branch_info->from.sym) { 110 symlen = (int)h->branch_info->from.sym->namelen + 4; 111 if (verbose > 0) 112 symlen += BITS_PER_LONG / 4 + 2 + 3; 113 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 114 115 symlen = dso__name_len(h->branch_info->from.map->dso); 116 hists__new_col_len(hists, HISTC_DSO_FROM, symlen); 117 } else { 118 symlen = unresolved_col_width + 4 + 2; 119 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 120 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); 121 } 122 123 if (h->branch_info->to.sym) { 124 symlen = (int)h->branch_info->to.sym->namelen + 4; 125 if (verbose > 0) 126 symlen += BITS_PER_LONG / 4 + 2 + 3; 127 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 128 129 symlen = dso__name_len(h->branch_info->to.map->dso); 130 hists__new_col_len(hists, HISTC_DSO_TO, symlen); 131 } else { 132 symlen = unresolved_col_width + 4 + 2; 133 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 134 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 135 } 136 137 if (h->branch_info->srcline_from) 138 hists__new_col_len(hists, HISTC_SRCLINE_FROM, 139 strlen(h->branch_info->srcline_from)); 140 if (h->branch_info->srcline_to) 141 hists__new_col_len(hists, HISTC_SRCLINE_TO, 142 strlen(h->branch_info->srcline_to)); 143 } 144 145 if (h->mem_info) { 146 if (h->mem_info->daddr.sym) { 147 symlen = (int)h->mem_info->daddr.sym->namelen + 4 148 + unresolved_col_width + 2; 149 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 150 symlen); 151 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 152 symlen + 1); 153 } else { 154 symlen = unresolved_col_width + 4 + 2; 155 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 156 symlen); 157 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 158 symlen); 159 } 160 161 if (h->mem_info->iaddr.sym) { 162 symlen = (int)h->mem_info->iaddr.sym->namelen + 4 163 + unresolved_col_width + 2; 164 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 165 symlen); 166 } else { 167 symlen = unresolved_col_width + 4 + 2; 168 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 169 symlen); 170 } 171 172 if (h->mem_info->daddr.map) { 173 symlen = dso__name_len(h->mem_info->daddr.map->dso); 174 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, 175 symlen); 176 } else { 177 symlen = unresolved_col_width + 4 + 2; 178 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 179 } 180 181 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR, 182 unresolved_col_width + 4 + 2); 183 184 } else { 185 symlen = unresolved_col_width + 4 + 2; 186 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); 187 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen); 188 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 189 } 190 191 hists__new_col_len(hists, HISTC_CGROUP_ID, 20); 192 hists__new_col_len(hists, HISTC_CPU, 3); 193 hists__new_col_len(hists, HISTC_SOCKET, 6); 194 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); 195 hists__new_col_len(hists, HISTC_MEM_TLB, 22); 196 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); 197 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3); 198 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); 199 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); 200 if (symbol_conf.nanosecs) 201 hists__new_col_len(hists, HISTC_TIME, 16); 202 else 203 hists__new_col_len(hists, HISTC_TIME, 12); 204 205 if (h->srcline) { 206 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header)); 207 hists__new_col_len(hists, HISTC_SRCLINE, len); 208 } 209 210 if (h->srcfile) 211 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile)); 212 213 if (h->transaction) 214 hists__new_col_len(hists, HISTC_TRANSACTION, 215 hist_entry__transaction_len()); 216 217 if (h->trace_output) 218 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output)); 219 } 220 221 void hists__output_recalc_col_len(struct hists *hists, int max_rows) 222 { 223 struct rb_node *next = rb_first_cached(&hists->entries); 224 struct hist_entry *n; 225 int row = 0; 226 227 hists__reset_col_len(hists); 228 229 while (next && row++ < max_rows) { 230 n = rb_entry(next, struct hist_entry, rb_node); 231 if (!n->filtered) 232 hists__calc_col_len(hists, n); 233 next = rb_next(&n->rb_node); 234 } 235 } 236 237 static void he_stat__add_cpumode_period(struct he_stat *he_stat, 238 unsigned int cpumode, u64 period) 239 { 240 switch (cpumode) { 241 case PERF_RECORD_MISC_KERNEL: 242 he_stat->period_sys += period; 243 break; 244 case PERF_RECORD_MISC_USER: 245 he_stat->period_us += period; 246 break; 247 case PERF_RECORD_MISC_GUEST_KERNEL: 248 he_stat->period_guest_sys += period; 249 break; 250 case PERF_RECORD_MISC_GUEST_USER: 251 he_stat->period_guest_us += period; 252 break; 253 default: 254 break; 255 } 256 } 257 258 static long hist_time(unsigned long htime) 259 { 260 unsigned long time_quantum = symbol_conf.time_quantum; 261 if (time_quantum) 262 return (htime / time_quantum) * time_quantum; 263 return htime; 264 } 265 266 static void he_stat__add_period(struct he_stat *he_stat, u64 period, 267 u64 weight) 268 { 269 270 he_stat->period += period; 271 he_stat->weight += weight; 272 he_stat->nr_events += 1; 273 } 274 275 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) 276 { 277 dest->period += src->period; 278 dest->period_sys += src->period_sys; 279 dest->period_us += src->period_us; 280 dest->period_guest_sys += src->period_guest_sys; 281 dest->period_guest_us += src->period_guest_us; 282 dest->nr_events += src->nr_events; 283 dest->weight += src->weight; 284 } 285 286 static void he_stat__decay(struct he_stat *he_stat) 287 { 288 he_stat->period = (he_stat->period * 7) / 8; 289 he_stat->nr_events = (he_stat->nr_events * 7) / 8; 290 /* XXX need decay for weight too? */ 291 } 292 293 static void hists__delete_entry(struct hists *hists, struct hist_entry *he); 294 295 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) 296 { 297 u64 prev_period = he->stat.period; 298 u64 diff; 299 300 if (prev_period == 0) 301 return true; 302 303 he_stat__decay(&he->stat); 304 if (symbol_conf.cumulate_callchain) 305 he_stat__decay(he->stat_acc); 306 decay_callchain(he->callchain); 307 308 diff = prev_period - he->stat.period; 309 310 if (!he->depth) { 311 hists->stats.total_period -= diff; 312 if (!he->filtered) 313 hists->stats.total_non_filtered_period -= diff; 314 } 315 316 if (!he->leaf) { 317 struct hist_entry *child; 318 struct rb_node *node = rb_first_cached(&he->hroot_out); 319 while (node) { 320 child = rb_entry(node, struct hist_entry, rb_node); 321 node = rb_next(node); 322 323 if (hists__decay_entry(hists, child)) 324 hists__delete_entry(hists, child); 325 } 326 } 327 328 return he->stat.period == 0; 329 } 330 331 static void hists__delete_entry(struct hists *hists, struct hist_entry *he) 332 { 333 struct rb_root_cached *root_in; 334 struct rb_root_cached *root_out; 335 336 if (he->parent_he) { 337 root_in = &he->parent_he->hroot_in; 338 root_out = &he->parent_he->hroot_out; 339 } else { 340 if (hists__has(hists, need_collapse)) 341 root_in = &hists->entries_collapsed; 342 else 343 root_in = hists->entries_in; 344 root_out = &hists->entries; 345 } 346 347 rb_erase_cached(&he->rb_node_in, root_in); 348 rb_erase_cached(&he->rb_node, root_out); 349 350 --hists->nr_entries; 351 if (!he->filtered) 352 --hists->nr_non_filtered_entries; 353 354 hist_entry__delete(he); 355 } 356 357 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 358 { 359 struct rb_node *next = rb_first_cached(&hists->entries); 360 struct hist_entry *n; 361 362 while (next) { 363 n = rb_entry(next, struct hist_entry, rb_node); 364 next = rb_next(&n->rb_node); 365 if (((zap_user && n->level == '.') || 366 (zap_kernel && n->level != '.') || 367 hists__decay_entry(hists, n))) { 368 hists__delete_entry(hists, n); 369 } 370 } 371 } 372 373 void hists__delete_entries(struct hists *hists) 374 { 375 struct rb_node *next = rb_first_cached(&hists->entries); 376 struct hist_entry *n; 377 378 while (next) { 379 n = rb_entry(next, struct hist_entry, rb_node); 380 next = rb_next(&n->rb_node); 381 382 hists__delete_entry(hists, n); 383 } 384 } 385 386 struct hist_entry *hists__get_entry(struct hists *hists, int idx) 387 { 388 struct rb_node *next = rb_first_cached(&hists->entries); 389 struct hist_entry *n; 390 int i = 0; 391 392 while (next) { 393 n = rb_entry(next, struct hist_entry, rb_node); 394 if (i == idx) 395 return n; 396 397 next = rb_next(&n->rb_node); 398 i++; 399 } 400 401 return NULL; 402 } 403 404 /* 405 * histogram, sorted on item, collects periods 406 */ 407 408 static int hist_entry__init(struct hist_entry *he, 409 struct hist_entry *template, 410 bool sample_self, 411 size_t callchain_size) 412 { 413 *he = *template; 414 he->callchain_size = callchain_size; 415 416 if (symbol_conf.cumulate_callchain) { 417 he->stat_acc = malloc(sizeof(he->stat)); 418 if (he->stat_acc == NULL) 419 return -ENOMEM; 420 memcpy(he->stat_acc, &he->stat, sizeof(he->stat)); 421 if (!sample_self) 422 memset(&he->stat, 0, sizeof(he->stat)); 423 } 424 425 map__get(he->ms.map); 426 427 if (he->branch_info) { 428 /* 429 * This branch info is (a part of) allocated from 430 * sample__resolve_bstack() and will be freed after 431 * adding new entries. So we need to save a copy. 432 */ 433 he->branch_info = malloc(sizeof(*he->branch_info)); 434 if (he->branch_info == NULL) 435 goto err; 436 437 memcpy(he->branch_info, template->branch_info, 438 sizeof(*he->branch_info)); 439 440 map__get(he->branch_info->from.map); 441 map__get(he->branch_info->to.map); 442 } 443 444 if (he->mem_info) { 445 map__get(he->mem_info->iaddr.map); 446 map__get(he->mem_info->daddr.map); 447 } 448 449 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 450 callchain_init(he->callchain); 451 452 if (he->raw_data) { 453 he->raw_data = memdup(he->raw_data, he->raw_size); 454 if (he->raw_data == NULL) 455 goto err_infos; 456 } 457 458 if (he->srcline) { 459 he->srcline = strdup(he->srcline); 460 if (he->srcline == NULL) 461 goto err_rawdata; 462 } 463 464 if (symbol_conf.res_sample) { 465 he->res_samples = calloc(sizeof(struct res_sample), 466 symbol_conf.res_sample); 467 if (!he->res_samples) 468 goto err_srcline; 469 } 470 471 INIT_LIST_HEAD(&he->pairs.node); 472 thread__get(he->thread); 473 he->hroot_in = RB_ROOT_CACHED; 474 he->hroot_out = RB_ROOT_CACHED; 475 476 if (!symbol_conf.report_hierarchy) 477 he->leaf = true; 478 479 return 0; 480 481 err_srcline: 482 zfree(&he->srcline); 483 484 err_rawdata: 485 zfree(&he->raw_data); 486 487 err_infos: 488 if (he->branch_info) { 489 map__put(he->branch_info->from.map); 490 map__put(he->branch_info->to.map); 491 zfree(&he->branch_info); 492 } 493 if (he->mem_info) { 494 map__put(he->mem_info->iaddr.map); 495 map__put(he->mem_info->daddr.map); 496 } 497 err: 498 map__zput(he->ms.map); 499 zfree(&he->stat_acc); 500 return -ENOMEM; 501 } 502 503 static void *hist_entry__zalloc(size_t size) 504 { 505 return zalloc(size + sizeof(struct hist_entry)); 506 } 507 508 static void hist_entry__free(void *ptr) 509 { 510 free(ptr); 511 } 512 513 static struct hist_entry_ops default_ops = { 514 .new = hist_entry__zalloc, 515 .free = hist_entry__free, 516 }; 517 518 static struct hist_entry *hist_entry__new(struct hist_entry *template, 519 bool sample_self) 520 { 521 struct hist_entry_ops *ops = template->ops; 522 size_t callchain_size = 0; 523 struct hist_entry *he; 524 int err = 0; 525 526 if (!ops) 527 ops = template->ops = &default_ops; 528 529 if (symbol_conf.use_callchain) 530 callchain_size = sizeof(struct callchain_root); 531 532 he = ops->new(callchain_size); 533 if (he) { 534 err = hist_entry__init(he, template, sample_self, callchain_size); 535 if (err) { 536 ops->free(he); 537 he = NULL; 538 } 539 } 540 541 return he; 542 } 543 544 static u8 symbol__parent_filter(const struct symbol *parent) 545 { 546 if (symbol_conf.exclude_other && parent == NULL) 547 return 1 << HIST_FILTER__PARENT; 548 return 0; 549 } 550 551 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period) 552 { 553 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain) 554 return; 555 556 he->hists->callchain_period += period; 557 if (!he->filtered) 558 he->hists->callchain_non_filtered_period += period; 559 } 560 561 static struct hist_entry *hists__findnew_entry(struct hists *hists, 562 struct hist_entry *entry, 563 struct addr_location *al, 564 bool sample_self) 565 { 566 struct rb_node **p; 567 struct rb_node *parent = NULL; 568 struct hist_entry *he; 569 int64_t cmp; 570 u64 period = entry->stat.period; 571 u64 weight = entry->stat.weight; 572 bool leftmost = true; 573 574 p = &hists->entries_in->rb_root.rb_node; 575 576 while (*p != NULL) { 577 parent = *p; 578 he = rb_entry(parent, struct hist_entry, rb_node_in); 579 580 /* 581 * Make sure that it receives arguments in a same order as 582 * hist_entry__collapse() so that we can use an appropriate 583 * function when searching an entry regardless which sort 584 * keys were used. 585 */ 586 cmp = hist_entry__cmp(he, entry); 587 588 if (!cmp) { 589 if (sample_self) { 590 he_stat__add_period(&he->stat, period, weight); 591 hist_entry__add_callchain_period(he, period); 592 } 593 if (symbol_conf.cumulate_callchain) 594 he_stat__add_period(he->stat_acc, period, weight); 595 596 /* 597 * This mem info was allocated from sample__resolve_mem 598 * and will not be used anymore. 599 */ 600 mem_info__zput(entry->mem_info); 601 602 block_info__zput(entry->block_info); 603 604 /* If the map of an existing hist_entry has 605 * become out-of-date due to an exec() or 606 * similar, update it. Otherwise we will 607 * mis-adjust symbol addresses when computing 608 * the history counter to increment. 609 */ 610 if (he->ms.map != entry->ms.map) { 611 map__put(he->ms.map); 612 he->ms.map = map__get(entry->ms.map); 613 } 614 goto out; 615 } 616 617 if (cmp < 0) 618 p = &(*p)->rb_left; 619 else { 620 p = &(*p)->rb_right; 621 leftmost = false; 622 } 623 } 624 625 he = hist_entry__new(entry, sample_self); 626 if (!he) 627 return NULL; 628 629 if (sample_self) 630 hist_entry__add_callchain_period(he, period); 631 hists->nr_entries++; 632 633 rb_link_node(&he->rb_node_in, parent, p); 634 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost); 635 out: 636 if (sample_self) 637 he_stat__add_cpumode_period(&he->stat, al->cpumode, period); 638 if (symbol_conf.cumulate_callchain) 639 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); 640 return he; 641 } 642 643 static unsigned random_max(unsigned high) 644 { 645 unsigned thresh = -high % high; 646 for (;;) { 647 unsigned r = random(); 648 if (r >= thresh) 649 return r % high; 650 } 651 } 652 653 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample) 654 { 655 struct res_sample *r; 656 int j; 657 658 if (he->num_res < symbol_conf.res_sample) { 659 j = he->num_res++; 660 } else { 661 j = random_max(symbol_conf.res_sample); 662 } 663 r = &he->res_samples[j]; 664 r->time = sample->time; 665 r->cpu = sample->cpu; 666 r->tid = sample->tid; 667 } 668 669 static struct hist_entry* 670 __hists__add_entry(struct hists *hists, 671 struct addr_location *al, 672 struct symbol *sym_parent, 673 struct branch_info *bi, 674 struct mem_info *mi, 675 struct block_info *block_info, 676 struct perf_sample *sample, 677 bool sample_self, 678 struct hist_entry_ops *ops) 679 { 680 struct namespaces *ns = thread__namespaces(al->thread); 681 struct hist_entry entry = { 682 .thread = al->thread, 683 .comm = thread__comm(al->thread), 684 .cgroup_id = { 685 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0, 686 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0, 687 }, 688 .ms = { 689 .map = al->map, 690 .sym = al->sym, 691 }, 692 .srcline = (char *) al->srcline, 693 .socket = al->socket, 694 .cpu = al->cpu, 695 .cpumode = al->cpumode, 696 .ip = al->addr, 697 .level = al->level, 698 .stat = { 699 .nr_events = 1, 700 .period = sample->period, 701 .weight = sample->weight, 702 }, 703 .parent = sym_parent, 704 .filtered = symbol__parent_filter(sym_parent) | al->filtered, 705 .hists = hists, 706 .branch_info = bi, 707 .mem_info = mi, 708 .block_info = block_info, 709 .transaction = sample->transaction, 710 .raw_data = sample->raw_data, 711 .raw_size = sample->raw_size, 712 .ops = ops, 713 .time = hist_time(sample->time), 714 }, *he = hists__findnew_entry(hists, &entry, al, sample_self); 715 716 if (!hists->has_callchains && he && he->callchain_size != 0) 717 hists->has_callchains = true; 718 if (he && symbol_conf.res_sample) 719 hists__res_sample(he, sample); 720 return he; 721 } 722 723 struct hist_entry *hists__add_entry(struct hists *hists, 724 struct addr_location *al, 725 struct symbol *sym_parent, 726 struct branch_info *bi, 727 struct mem_info *mi, 728 struct perf_sample *sample, 729 bool sample_self) 730 { 731 return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL, 732 sample, sample_self, NULL); 733 } 734 735 struct hist_entry *hists__add_entry_ops(struct hists *hists, 736 struct hist_entry_ops *ops, 737 struct addr_location *al, 738 struct symbol *sym_parent, 739 struct branch_info *bi, 740 struct mem_info *mi, 741 struct perf_sample *sample, 742 bool sample_self) 743 { 744 return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL, 745 sample, sample_self, ops); 746 } 747 748 struct hist_entry *hists__add_entry_block(struct hists *hists, 749 struct addr_location *al, 750 struct block_info *block_info) 751 { 752 struct hist_entry entry = { 753 .block_info = block_info, 754 .hists = hists, 755 }, *he = hists__findnew_entry(hists, &entry, al, false); 756 757 return he; 758 } 759 760 static int 761 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 762 struct addr_location *al __maybe_unused) 763 { 764 return 0; 765 } 766 767 static int 768 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 769 struct addr_location *al __maybe_unused) 770 { 771 return 0; 772 } 773 774 static int 775 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 776 { 777 struct perf_sample *sample = iter->sample; 778 struct mem_info *mi; 779 780 mi = sample__resolve_mem(sample, al); 781 if (mi == NULL) 782 return -ENOMEM; 783 784 iter->priv = mi; 785 return 0; 786 } 787 788 static int 789 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 790 { 791 u64 cost; 792 struct mem_info *mi = iter->priv; 793 struct hists *hists = evsel__hists(iter->evsel); 794 struct perf_sample *sample = iter->sample; 795 struct hist_entry *he; 796 797 if (mi == NULL) 798 return -EINVAL; 799 800 cost = sample->weight; 801 if (!cost) 802 cost = 1; 803 804 /* 805 * must pass period=weight in order to get the correct 806 * sorting from hists__collapse_resort() which is solely 807 * based on periods. We want sorting be done on nr_events * weight 808 * and this is indirectly achieved by passing period=weight here 809 * and the he_stat__add_period() function. 810 */ 811 sample->period = cost; 812 813 he = hists__add_entry(hists, al, iter->parent, NULL, mi, 814 sample, true); 815 if (!he) 816 return -ENOMEM; 817 818 iter->he = he; 819 return 0; 820 } 821 822 static int 823 iter_finish_mem_entry(struct hist_entry_iter *iter, 824 struct addr_location *al __maybe_unused) 825 { 826 struct evsel *evsel = iter->evsel; 827 struct hists *hists = evsel__hists(evsel); 828 struct hist_entry *he = iter->he; 829 int err = -EINVAL; 830 831 if (he == NULL) 832 goto out; 833 834 hists__inc_nr_samples(hists, he->filtered); 835 836 err = hist_entry__append_callchain(he, iter->sample); 837 838 out: 839 /* 840 * We don't need to free iter->priv (mem_info) here since the mem info 841 * was either already freed in hists__findnew_entry() or passed to a 842 * new hist entry by hist_entry__new(). 843 */ 844 iter->priv = NULL; 845 846 iter->he = NULL; 847 return err; 848 } 849 850 static int 851 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 852 { 853 struct branch_info *bi; 854 struct perf_sample *sample = iter->sample; 855 856 bi = sample__resolve_bstack(sample, al); 857 if (!bi) 858 return -ENOMEM; 859 860 iter->curr = 0; 861 iter->total = sample->branch_stack->nr; 862 863 iter->priv = bi; 864 return 0; 865 } 866 867 static int 868 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, 869 struct addr_location *al __maybe_unused) 870 { 871 return 0; 872 } 873 874 static int 875 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 876 { 877 struct branch_info *bi = iter->priv; 878 int i = iter->curr; 879 880 if (bi == NULL) 881 return 0; 882 883 if (iter->curr >= iter->total) 884 return 0; 885 886 al->map = bi[i].to.map; 887 al->sym = bi[i].to.sym; 888 al->addr = bi[i].to.addr; 889 return 1; 890 } 891 892 static int 893 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 894 { 895 struct branch_info *bi; 896 struct evsel *evsel = iter->evsel; 897 struct hists *hists = evsel__hists(evsel); 898 struct perf_sample *sample = iter->sample; 899 struct hist_entry *he = NULL; 900 int i = iter->curr; 901 int err = 0; 902 903 bi = iter->priv; 904 905 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) 906 goto out; 907 908 /* 909 * The report shows the percentage of total branches captured 910 * and not events sampled. Thus we use a pseudo period of 1. 911 */ 912 sample->period = 1; 913 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1; 914 915 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, 916 sample, true); 917 if (he == NULL) 918 return -ENOMEM; 919 920 hists__inc_nr_samples(hists, he->filtered); 921 922 out: 923 iter->he = he; 924 iter->curr++; 925 return err; 926 } 927 928 static int 929 iter_finish_branch_entry(struct hist_entry_iter *iter, 930 struct addr_location *al __maybe_unused) 931 { 932 zfree(&iter->priv); 933 iter->he = NULL; 934 935 return iter->curr >= iter->total ? 0 : -1; 936 } 937 938 static int 939 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused, 940 struct addr_location *al __maybe_unused) 941 { 942 return 0; 943 } 944 945 static int 946 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) 947 { 948 struct evsel *evsel = iter->evsel; 949 struct perf_sample *sample = iter->sample; 950 struct hist_entry *he; 951 952 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 953 sample, true); 954 if (he == NULL) 955 return -ENOMEM; 956 957 iter->he = he; 958 return 0; 959 } 960 961 static int 962 iter_finish_normal_entry(struct hist_entry_iter *iter, 963 struct addr_location *al __maybe_unused) 964 { 965 struct hist_entry *he = iter->he; 966 struct evsel *evsel = iter->evsel; 967 struct perf_sample *sample = iter->sample; 968 969 if (he == NULL) 970 return 0; 971 972 iter->he = NULL; 973 974 hists__inc_nr_samples(evsel__hists(evsel), he->filtered); 975 976 return hist_entry__append_callchain(he, sample); 977 } 978 979 static int 980 iter_prepare_cumulative_entry(struct hist_entry_iter *iter, 981 struct addr_location *al __maybe_unused) 982 { 983 struct hist_entry **he_cache; 984 985 callchain_cursor_commit(&callchain_cursor); 986 987 /* 988 * This is for detecting cycles or recursions so that they're 989 * cumulated only one time to prevent entries more than 100% 990 * overhead. 991 */ 992 he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1)); 993 if (he_cache == NULL) 994 return -ENOMEM; 995 996 iter->priv = he_cache; 997 iter->curr = 0; 998 999 return 0; 1000 } 1001 1002 static int 1003 iter_add_single_cumulative_entry(struct hist_entry_iter *iter, 1004 struct addr_location *al) 1005 { 1006 struct evsel *evsel = iter->evsel; 1007 struct hists *hists = evsel__hists(evsel); 1008 struct perf_sample *sample = iter->sample; 1009 struct hist_entry **he_cache = iter->priv; 1010 struct hist_entry *he; 1011 int err = 0; 1012 1013 he = hists__add_entry(hists, al, iter->parent, NULL, NULL, 1014 sample, true); 1015 if (he == NULL) 1016 return -ENOMEM; 1017 1018 iter->he = he; 1019 he_cache[iter->curr++] = he; 1020 1021 hist_entry__append_callchain(he, sample); 1022 1023 /* 1024 * We need to re-initialize the cursor since callchain_append() 1025 * advanced the cursor to the end. 1026 */ 1027 callchain_cursor_commit(&callchain_cursor); 1028 1029 hists__inc_nr_samples(hists, he->filtered); 1030 1031 return err; 1032 } 1033 1034 static int 1035 iter_next_cumulative_entry(struct hist_entry_iter *iter, 1036 struct addr_location *al) 1037 { 1038 struct callchain_cursor_node *node; 1039 1040 node = callchain_cursor_current(&callchain_cursor); 1041 if (node == NULL) 1042 return 0; 1043 1044 return fill_callchain_info(al, node, iter->hide_unresolved); 1045 } 1046 1047 static int 1048 iter_add_next_cumulative_entry(struct hist_entry_iter *iter, 1049 struct addr_location *al) 1050 { 1051 struct evsel *evsel = iter->evsel; 1052 struct perf_sample *sample = iter->sample; 1053 struct hist_entry **he_cache = iter->priv; 1054 struct hist_entry *he; 1055 struct hist_entry he_tmp = { 1056 .hists = evsel__hists(evsel), 1057 .cpu = al->cpu, 1058 .thread = al->thread, 1059 .comm = thread__comm(al->thread), 1060 .ip = al->addr, 1061 .ms = { 1062 .map = al->map, 1063 .sym = al->sym, 1064 }, 1065 .srcline = (char *) al->srcline, 1066 .parent = iter->parent, 1067 .raw_data = sample->raw_data, 1068 .raw_size = sample->raw_size, 1069 }; 1070 int i; 1071 struct callchain_cursor cursor; 1072 1073 callchain_cursor_snapshot(&cursor, &callchain_cursor); 1074 1075 callchain_cursor_advance(&callchain_cursor); 1076 1077 /* 1078 * Check if there's duplicate entries in the callchain. 1079 * It's possible that it has cycles or recursive calls. 1080 */ 1081 for (i = 0; i < iter->curr; i++) { 1082 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) { 1083 /* to avoid calling callback function */ 1084 iter->he = NULL; 1085 return 0; 1086 } 1087 } 1088 1089 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 1090 sample, false); 1091 if (he == NULL) 1092 return -ENOMEM; 1093 1094 iter->he = he; 1095 he_cache[iter->curr++] = he; 1096 1097 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 1098 callchain_append(he->callchain, &cursor, sample->period); 1099 return 0; 1100 } 1101 1102 static int 1103 iter_finish_cumulative_entry(struct hist_entry_iter *iter, 1104 struct addr_location *al __maybe_unused) 1105 { 1106 zfree(&iter->priv); 1107 iter->he = NULL; 1108 1109 return 0; 1110 } 1111 1112 const struct hist_iter_ops hist_iter_mem = { 1113 .prepare_entry = iter_prepare_mem_entry, 1114 .add_single_entry = iter_add_single_mem_entry, 1115 .next_entry = iter_next_nop_entry, 1116 .add_next_entry = iter_add_next_nop_entry, 1117 .finish_entry = iter_finish_mem_entry, 1118 }; 1119 1120 const struct hist_iter_ops hist_iter_branch = { 1121 .prepare_entry = iter_prepare_branch_entry, 1122 .add_single_entry = iter_add_single_branch_entry, 1123 .next_entry = iter_next_branch_entry, 1124 .add_next_entry = iter_add_next_branch_entry, 1125 .finish_entry = iter_finish_branch_entry, 1126 }; 1127 1128 const struct hist_iter_ops hist_iter_normal = { 1129 .prepare_entry = iter_prepare_normal_entry, 1130 .add_single_entry = iter_add_single_normal_entry, 1131 .next_entry = iter_next_nop_entry, 1132 .add_next_entry = iter_add_next_nop_entry, 1133 .finish_entry = iter_finish_normal_entry, 1134 }; 1135 1136 const struct hist_iter_ops hist_iter_cumulative = { 1137 .prepare_entry = iter_prepare_cumulative_entry, 1138 .add_single_entry = iter_add_single_cumulative_entry, 1139 .next_entry = iter_next_cumulative_entry, 1140 .add_next_entry = iter_add_next_cumulative_entry, 1141 .finish_entry = iter_finish_cumulative_entry, 1142 }; 1143 1144 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, 1145 int max_stack_depth, void *arg) 1146 { 1147 int err, err2; 1148 struct map *alm = NULL; 1149 1150 if (al) 1151 alm = map__get(al->map); 1152 1153 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, 1154 iter->evsel, al, max_stack_depth); 1155 if (err) { 1156 map__put(alm); 1157 return err; 1158 } 1159 1160 err = iter->ops->prepare_entry(iter, al); 1161 if (err) 1162 goto out; 1163 1164 err = iter->ops->add_single_entry(iter, al); 1165 if (err) 1166 goto out; 1167 1168 if (iter->he && iter->add_entry_cb) { 1169 err = iter->add_entry_cb(iter, al, true, arg); 1170 if (err) 1171 goto out; 1172 } 1173 1174 while (iter->ops->next_entry(iter, al)) { 1175 err = iter->ops->add_next_entry(iter, al); 1176 if (err) 1177 break; 1178 1179 if (iter->he && iter->add_entry_cb) { 1180 err = iter->add_entry_cb(iter, al, false, arg); 1181 if (err) 1182 goto out; 1183 } 1184 } 1185 1186 out: 1187 err2 = iter->ops->finish_entry(iter, al); 1188 if (!err) 1189 err = err2; 1190 1191 map__put(alm); 1192 1193 return err; 1194 } 1195 1196 int64_t 1197 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) 1198 { 1199 struct hists *hists = left->hists; 1200 struct perf_hpp_fmt *fmt; 1201 int64_t cmp = 0; 1202 1203 hists__for_each_sort_list(hists, fmt) { 1204 if (perf_hpp__is_dynamic_entry(fmt) && 1205 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1206 continue; 1207 1208 cmp = fmt->cmp(fmt, left, right); 1209 if (cmp) 1210 break; 1211 } 1212 1213 return cmp; 1214 } 1215 1216 int64_t 1217 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) 1218 { 1219 struct hists *hists = left->hists; 1220 struct perf_hpp_fmt *fmt; 1221 int64_t cmp = 0; 1222 1223 hists__for_each_sort_list(hists, fmt) { 1224 if (perf_hpp__is_dynamic_entry(fmt) && 1225 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1226 continue; 1227 1228 cmp = fmt->collapse(fmt, left, right); 1229 if (cmp) 1230 break; 1231 } 1232 1233 return cmp; 1234 } 1235 1236 void hist_entry__delete(struct hist_entry *he) 1237 { 1238 struct hist_entry_ops *ops = he->ops; 1239 1240 thread__zput(he->thread); 1241 map__zput(he->ms.map); 1242 1243 if (he->branch_info) { 1244 map__zput(he->branch_info->from.map); 1245 map__zput(he->branch_info->to.map); 1246 free_srcline(he->branch_info->srcline_from); 1247 free_srcline(he->branch_info->srcline_to); 1248 zfree(&he->branch_info); 1249 } 1250 1251 if (he->mem_info) { 1252 map__zput(he->mem_info->iaddr.map); 1253 map__zput(he->mem_info->daddr.map); 1254 mem_info__zput(he->mem_info); 1255 } 1256 1257 if (he->block_info) 1258 block_info__zput(he->block_info); 1259 1260 zfree(&he->res_samples); 1261 zfree(&he->stat_acc); 1262 free_srcline(he->srcline); 1263 if (he->srcfile && he->srcfile[0]) 1264 zfree(&he->srcfile); 1265 free_callchain(he->callchain); 1266 zfree(&he->trace_output); 1267 zfree(&he->raw_data); 1268 ops->free(he); 1269 } 1270 1271 /* 1272 * If this is not the last column, then we need to pad it according to the 1273 * pre-calculated max length for this column, otherwise don't bother adding 1274 * spaces because that would break viewing this with, for instance, 'less', 1275 * that would show tons of trailing spaces when a long C++ demangled method 1276 * names is sampled. 1277 */ 1278 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp, 1279 struct perf_hpp_fmt *fmt, int printed) 1280 { 1281 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) { 1282 const int width = fmt->width(fmt, hpp, he->hists); 1283 if (printed < width) { 1284 advance_hpp(hpp, printed); 1285 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " "); 1286 } 1287 } 1288 1289 return printed; 1290 } 1291 1292 /* 1293 * collapse the histogram 1294 */ 1295 1296 static void hists__apply_filters(struct hists *hists, struct hist_entry *he); 1297 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he, 1298 enum hist_filter type); 1299 1300 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt); 1301 1302 static bool check_thread_entry(struct perf_hpp_fmt *fmt) 1303 { 1304 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt); 1305 } 1306 1307 static void hist_entry__check_and_remove_filter(struct hist_entry *he, 1308 enum hist_filter type, 1309 fmt_chk_fn check) 1310 { 1311 struct perf_hpp_fmt *fmt; 1312 bool type_match = false; 1313 struct hist_entry *parent = he->parent_he; 1314 1315 switch (type) { 1316 case HIST_FILTER__THREAD: 1317 if (symbol_conf.comm_list == NULL && 1318 symbol_conf.pid_list == NULL && 1319 symbol_conf.tid_list == NULL) 1320 return; 1321 break; 1322 case HIST_FILTER__DSO: 1323 if (symbol_conf.dso_list == NULL) 1324 return; 1325 break; 1326 case HIST_FILTER__SYMBOL: 1327 if (symbol_conf.sym_list == NULL) 1328 return; 1329 break; 1330 case HIST_FILTER__PARENT: 1331 case HIST_FILTER__GUEST: 1332 case HIST_FILTER__HOST: 1333 case HIST_FILTER__SOCKET: 1334 case HIST_FILTER__C2C: 1335 default: 1336 return; 1337 } 1338 1339 /* if it's filtered by own fmt, it has to have filter bits */ 1340 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1341 if (check(fmt)) { 1342 type_match = true; 1343 break; 1344 } 1345 } 1346 1347 if (type_match) { 1348 /* 1349 * If the filter is for current level entry, propagate 1350 * filter marker to parents. The marker bit was 1351 * already set by default so it only needs to clear 1352 * non-filtered entries. 1353 */ 1354 if (!(he->filtered & (1 << type))) { 1355 while (parent) { 1356 parent->filtered &= ~(1 << type); 1357 parent = parent->parent_he; 1358 } 1359 } 1360 } else { 1361 /* 1362 * If current entry doesn't have matching formats, set 1363 * filter marker for upper level entries. it will be 1364 * cleared if its lower level entries is not filtered. 1365 * 1366 * For lower-level entries, it inherits parent's 1367 * filter bit so that lower level entries of a 1368 * non-filtered entry won't set the filter marker. 1369 */ 1370 if (parent == NULL) 1371 he->filtered |= (1 << type); 1372 else 1373 he->filtered |= (parent->filtered & (1 << type)); 1374 } 1375 } 1376 1377 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he) 1378 { 1379 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD, 1380 check_thread_entry); 1381 1382 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO, 1383 perf_hpp__is_dso_entry); 1384 1385 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL, 1386 perf_hpp__is_sym_entry); 1387 1388 hists__apply_filters(he->hists, he); 1389 } 1390 1391 static struct hist_entry *hierarchy_insert_entry(struct hists *hists, 1392 struct rb_root_cached *root, 1393 struct hist_entry *he, 1394 struct hist_entry *parent_he, 1395 struct perf_hpp_list *hpp_list) 1396 { 1397 struct rb_node **p = &root->rb_root.rb_node; 1398 struct rb_node *parent = NULL; 1399 struct hist_entry *iter, *new; 1400 struct perf_hpp_fmt *fmt; 1401 int64_t cmp; 1402 bool leftmost = true; 1403 1404 while (*p != NULL) { 1405 parent = *p; 1406 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1407 1408 cmp = 0; 1409 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1410 cmp = fmt->collapse(fmt, iter, he); 1411 if (cmp) 1412 break; 1413 } 1414 1415 if (!cmp) { 1416 he_stat__add_stat(&iter->stat, &he->stat); 1417 return iter; 1418 } 1419 1420 if (cmp < 0) 1421 p = &parent->rb_left; 1422 else { 1423 p = &parent->rb_right; 1424 leftmost = false; 1425 } 1426 } 1427 1428 new = hist_entry__new(he, true); 1429 if (new == NULL) 1430 return NULL; 1431 1432 hists->nr_entries++; 1433 1434 /* save related format list for output */ 1435 new->hpp_list = hpp_list; 1436 new->parent_he = parent_he; 1437 1438 hist_entry__apply_hierarchy_filters(new); 1439 1440 /* some fields are now passed to 'new' */ 1441 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1442 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 1443 he->trace_output = NULL; 1444 else 1445 new->trace_output = NULL; 1446 1447 if (perf_hpp__is_srcline_entry(fmt)) 1448 he->srcline = NULL; 1449 else 1450 new->srcline = NULL; 1451 1452 if (perf_hpp__is_srcfile_entry(fmt)) 1453 he->srcfile = NULL; 1454 else 1455 new->srcfile = NULL; 1456 } 1457 1458 rb_link_node(&new->rb_node_in, parent, p); 1459 rb_insert_color_cached(&new->rb_node_in, root, leftmost); 1460 return new; 1461 } 1462 1463 static int hists__hierarchy_insert_entry(struct hists *hists, 1464 struct rb_root_cached *root, 1465 struct hist_entry *he) 1466 { 1467 struct perf_hpp_list_node *node; 1468 struct hist_entry *new_he = NULL; 1469 struct hist_entry *parent = NULL; 1470 int depth = 0; 1471 int ret = 0; 1472 1473 list_for_each_entry(node, &hists->hpp_formats, list) { 1474 /* skip period (overhead) and elided columns */ 1475 if (node->level == 0 || node->skip) 1476 continue; 1477 1478 /* insert copy of 'he' for each fmt into the hierarchy */ 1479 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp); 1480 if (new_he == NULL) { 1481 ret = -1; 1482 break; 1483 } 1484 1485 root = &new_he->hroot_in; 1486 new_he->depth = depth++; 1487 parent = new_he; 1488 } 1489 1490 if (new_he) { 1491 new_he->leaf = true; 1492 1493 if (hist_entry__has_callchains(new_he) && 1494 symbol_conf.use_callchain) { 1495 callchain_cursor_reset(&callchain_cursor); 1496 if (callchain_merge(&callchain_cursor, 1497 new_he->callchain, 1498 he->callchain) < 0) 1499 ret = -1; 1500 } 1501 } 1502 1503 /* 'he' is no longer used */ 1504 hist_entry__delete(he); 1505 1506 /* return 0 (or -1) since it already applied filters */ 1507 return ret; 1508 } 1509 1510 static int hists__collapse_insert_entry(struct hists *hists, 1511 struct rb_root_cached *root, 1512 struct hist_entry *he) 1513 { 1514 struct rb_node **p = &root->rb_root.rb_node; 1515 struct rb_node *parent = NULL; 1516 struct hist_entry *iter; 1517 int64_t cmp; 1518 bool leftmost = true; 1519 1520 if (symbol_conf.report_hierarchy) 1521 return hists__hierarchy_insert_entry(hists, root, he); 1522 1523 while (*p != NULL) { 1524 parent = *p; 1525 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1526 1527 cmp = hist_entry__collapse(iter, he); 1528 1529 if (!cmp) { 1530 int ret = 0; 1531 1532 he_stat__add_stat(&iter->stat, &he->stat); 1533 if (symbol_conf.cumulate_callchain) 1534 he_stat__add_stat(iter->stat_acc, he->stat_acc); 1535 1536 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) { 1537 callchain_cursor_reset(&callchain_cursor); 1538 if (callchain_merge(&callchain_cursor, 1539 iter->callchain, 1540 he->callchain) < 0) 1541 ret = -1; 1542 } 1543 hist_entry__delete(he); 1544 return ret; 1545 } 1546 1547 if (cmp < 0) 1548 p = &(*p)->rb_left; 1549 else { 1550 p = &(*p)->rb_right; 1551 leftmost = false; 1552 } 1553 } 1554 hists->nr_entries++; 1555 1556 rb_link_node(&he->rb_node_in, parent, p); 1557 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 1558 return 1; 1559 } 1560 1561 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists) 1562 { 1563 struct rb_root_cached *root; 1564 1565 pthread_mutex_lock(&hists->lock); 1566 1567 root = hists->entries_in; 1568 if (++hists->entries_in > &hists->entries_in_array[1]) 1569 hists->entries_in = &hists->entries_in_array[0]; 1570 1571 pthread_mutex_unlock(&hists->lock); 1572 1573 return root; 1574 } 1575 1576 static void hists__apply_filters(struct hists *hists, struct hist_entry *he) 1577 { 1578 hists__filter_entry_by_dso(hists, he); 1579 hists__filter_entry_by_thread(hists, he); 1580 hists__filter_entry_by_symbol(hists, he); 1581 hists__filter_entry_by_socket(hists, he); 1582 } 1583 1584 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog) 1585 { 1586 struct rb_root_cached *root; 1587 struct rb_node *next; 1588 struct hist_entry *n; 1589 int ret; 1590 1591 if (!hists__has(hists, need_collapse)) 1592 return 0; 1593 1594 hists->nr_entries = 0; 1595 1596 root = hists__get_rotate_entries_in(hists); 1597 1598 next = rb_first_cached(root); 1599 1600 while (next) { 1601 if (session_done()) 1602 break; 1603 n = rb_entry(next, struct hist_entry, rb_node_in); 1604 next = rb_next(&n->rb_node_in); 1605 1606 rb_erase_cached(&n->rb_node_in, root); 1607 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n); 1608 if (ret < 0) 1609 return -1; 1610 1611 if (ret) { 1612 /* 1613 * If it wasn't combined with one of the entries already 1614 * collapsed, we need to apply the filters that may have 1615 * been set by, say, the hist_browser. 1616 */ 1617 hists__apply_filters(hists, n); 1618 } 1619 if (prog) 1620 ui_progress__update(prog, 1); 1621 } 1622 return 0; 1623 } 1624 1625 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b) 1626 { 1627 struct hists *hists = a->hists; 1628 struct perf_hpp_fmt *fmt; 1629 int64_t cmp = 0; 1630 1631 hists__for_each_sort_list(hists, fmt) { 1632 if (perf_hpp__should_skip(fmt, a->hists)) 1633 continue; 1634 1635 cmp = fmt->sort(fmt, a, b); 1636 if (cmp) 1637 break; 1638 } 1639 1640 return cmp; 1641 } 1642 1643 static void hists__reset_filter_stats(struct hists *hists) 1644 { 1645 hists->nr_non_filtered_entries = 0; 1646 hists->stats.total_non_filtered_period = 0; 1647 } 1648 1649 void hists__reset_stats(struct hists *hists) 1650 { 1651 hists->nr_entries = 0; 1652 hists->stats.total_period = 0; 1653 1654 hists__reset_filter_stats(hists); 1655 } 1656 1657 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h) 1658 { 1659 hists->nr_non_filtered_entries++; 1660 hists->stats.total_non_filtered_period += h->stat.period; 1661 } 1662 1663 void hists__inc_stats(struct hists *hists, struct hist_entry *h) 1664 { 1665 if (!h->filtered) 1666 hists__inc_filter_stats(hists, h); 1667 1668 hists->nr_entries++; 1669 hists->stats.total_period += h->stat.period; 1670 } 1671 1672 static void hierarchy_recalc_total_periods(struct hists *hists) 1673 { 1674 struct rb_node *node; 1675 struct hist_entry *he; 1676 1677 node = rb_first_cached(&hists->entries); 1678 1679 hists->stats.total_period = 0; 1680 hists->stats.total_non_filtered_period = 0; 1681 1682 /* 1683 * recalculate total period using top-level entries only 1684 * since lower level entries only see non-filtered entries 1685 * but upper level entries have sum of both entries. 1686 */ 1687 while (node) { 1688 he = rb_entry(node, struct hist_entry, rb_node); 1689 node = rb_next(node); 1690 1691 hists->stats.total_period += he->stat.period; 1692 if (!he->filtered) 1693 hists->stats.total_non_filtered_period += he->stat.period; 1694 } 1695 } 1696 1697 static void hierarchy_insert_output_entry(struct rb_root_cached *root, 1698 struct hist_entry *he) 1699 { 1700 struct rb_node **p = &root->rb_root.rb_node; 1701 struct rb_node *parent = NULL; 1702 struct hist_entry *iter; 1703 struct perf_hpp_fmt *fmt; 1704 bool leftmost = true; 1705 1706 while (*p != NULL) { 1707 parent = *p; 1708 iter = rb_entry(parent, struct hist_entry, rb_node); 1709 1710 if (hist_entry__sort(he, iter) > 0) 1711 p = &parent->rb_left; 1712 else { 1713 p = &parent->rb_right; 1714 leftmost = false; 1715 } 1716 } 1717 1718 rb_link_node(&he->rb_node, parent, p); 1719 rb_insert_color_cached(&he->rb_node, root, leftmost); 1720 1721 /* update column width of dynamic entry */ 1722 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 1723 if (perf_hpp__is_dynamic_entry(fmt)) 1724 fmt->sort(fmt, he, NULL); 1725 } 1726 } 1727 1728 static void hists__hierarchy_output_resort(struct hists *hists, 1729 struct ui_progress *prog, 1730 struct rb_root_cached *root_in, 1731 struct rb_root_cached *root_out, 1732 u64 min_callchain_hits, 1733 bool use_callchain) 1734 { 1735 struct rb_node *node; 1736 struct hist_entry *he; 1737 1738 *root_out = RB_ROOT_CACHED; 1739 node = rb_first_cached(root_in); 1740 1741 while (node) { 1742 he = rb_entry(node, struct hist_entry, rb_node_in); 1743 node = rb_next(node); 1744 1745 hierarchy_insert_output_entry(root_out, he); 1746 1747 if (prog) 1748 ui_progress__update(prog, 1); 1749 1750 hists->nr_entries++; 1751 if (!he->filtered) { 1752 hists->nr_non_filtered_entries++; 1753 hists__calc_col_len(hists, he); 1754 } 1755 1756 if (!he->leaf) { 1757 hists__hierarchy_output_resort(hists, prog, 1758 &he->hroot_in, 1759 &he->hroot_out, 1760 min_callchain_hits, 1761 use_callchain); 1762 continue; 1763 } 1764 1765 if (!use_callchain) 1766 continue; 1767 1768 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1769 u64 total = he->stat.period; 1770 1771 if (symbol_conf.cumulate_callchain) 1772 total = he->stat_acc->period; 1773 1774 min_callchain_hits = total * (callchain_param.min_percent / 100); 1775 } 1776 1777 callchain_param.sort(&he->sorted_chain, he->callchain, 1778 min_callchain_hits, &callchain_param); 1779 } 1780 } 1781 1782 static void __hists__insert_output_entry(struct rb_root_cached *entries, 1783 struct hist_entry *he, 1784 u64 min_callchain_hits, 1785 bool use_callchain) 1786 { 1787 struct rb_node **p = &entries->rb_root.rb_node; 1788 struct rb_node *parent = NULL; 1789 struct hist_entry *iter; 1790 struct perf_hpp_fmt *fmt; 1791 bool leftmost = true; 1792 1793 if (use_callchain) { 1794 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1795 u64 total = he->stat.period; 1796 1797 if (symbol_conf.cumulate_callchain) 1798 total = he->stat_acc->period; 1799 1800 min_callchain_hits = total * (callchain_param.min_percent / 100); 1801 } 1802 callchain_param.sort(&he->sorted_chain, he->callchain, 1803 min_callchain_hits, &callchain_param); 1804 } 1805 1806 while (*p != NULL) { 1807 parent = *p; 1808 iter = rb_entry(parent, struct hist_entry, rb_node); 1809 1810 if (hist_entry__sort(he, iter) > 0) 1811 p = &(*p)->rb_left; 1812 else { 1813 p = &(*p)->rb_right; 1814 leftmost = false; 1815 } 1816 } 1817 1818 rb_link_node(&he->rb_node, parent, p); 1819 rb_insert_color_cached(&he->rb_node, entries, leftmost); 1820 1821 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) { 1822 if (perf_hpp__is_dynamic_entry(fmt) && 1823 perf_hpp__defined_dynamic_entry(fmt, he->hists)) 1824 fmt->sort(fmt, he, NULL); /* update column width */ 1825 } 1826 } 1827 1828 static void output_resort(struct hists *hists, struct ui_progress *prog, 1829 bool use_callchain, hists__resort_cb_t cb, 1830 void *cb_arg) 1831 { 1832 struct rb_root_cached *root; 1833 struct rb_node *next; 1834 struct hist_entry *n; 1835 u64 callchain_total; 1836 u64 min_callchain_hits; 1837 1838 callchain_total = hists->callchain_period; 1839 if (symbol_conf.filter_relative) 1840 callchain_total = hists->callchain_non_filtered_period; 1841 1842 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100); 1843 1844 hists__reset_stats(hists); 1845 hists__reset_col_len(hists); 1846 1847 if (symbol_conf.report_hierarchy) { 1848 hists__hierarchy_output_resort(hists, prog, 1849 &hists->entries_collapsed, 1850 &hists->entries, 1851 min_callchain_hits, 1852 use_callchain); 1853 hierarchy_recalc_total_periods(hists); 1854 return; 1855 } 1856 1857 if (hists__has(hists, need_collapse)) 1858 root = &hists->entries_collapsed; 1859 else 1860 root = hists->entries_in; 1861 1862 next = rb_first_cached(root); 1863 hists->entries = RB_ROOT_CACHED; 1864 1865 while (next) { 1866 n = rb_entry(next, struct hist_entry, rb_node_in); 1867 next = rb_next(&n->rb_node_in); 1868 1869 if (cb && cb(n, cb_arg)) 1870 continue; 1871 1872 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain); 1873 hists__inc_stats(hists, n); 1874 1875 if (!n->filtered) 1876 hists__calc_col_len(hists, n); 1877 1878 if (prog) 1879 ui_progress__update(prog, 1); 1880 } 1881 } 1882 1883 void perf_evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog, 1884 hists__resort_cb_t cb, void *cb_arg) 1885 { 1886 bool use_callchain; 1887 1888 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph) 1889 use_callchain = evsel__has_callchain(evsel); 1890 else 1891 use_callchain = symbol_conf.use_callchain; 1892 1893 use_callchain |= symbol_conf.show_branchflag_count; 1894 1895 output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg); 1896 } 1897 1898 void perf_evsel__output_resort(struct evsel *evsel, struct ui_progress *prog) 1899 { 1900 return perf_evsel__output_resort_cb(evsel, prog, NULL, NULL); 1901 } 1902 1903 void hists__output_resort(struct hists *hists, struct ui_progress *prog) 1904 { 1905 output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL); 1906 } 1907 1908 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog, 1909 hists__resort_cb_t cb) 1910 { 1911 output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL); 1912 } 1913 1914 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd) 1915 { 1916 if (he->leaf || hmd == HMD_FORCE_SIBLING) 1917 return false; 1918 1919 if (he->unfolded || hmd == HMD_FORCE_CHILD) 1920 return true; 1921 1922 return false; 1923 } 1924 1925 struct rb_node *rb_hierarchy_last(struct rb_node *node) 1926 { 1927 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1928 1929 while (can_goto_child(he, HMD_NORMAL)) { 1930 node = rb_last(&he->hroot_out.rb_root); 1931 he = rb_entry(node, struct hist_entry, rb_node); 1932 } 1933 return node; 1934 } 1935 1936 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd) 1937 { 1938 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1939 1940 if (can_goto_child(he, hmd)) 1941 node = rb_first_cached(&he->hroot_out); 1942 else 1943 node = rb_next(node); 1944 1945 while (node == NULL) { 1946 he = he->parent_he; 1947 if (he == NULL) 1948 break; 1949 1950 node = rb_next(&he->rb_node); 1951 } 1952 return node; 1953 } 1954 1955 struct rb_node *rb_hierarchy_prev(struct rb_node *node) 1956 { 1957 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1958 1959 node = rb_prev(node); 1960 if (node) 1961 return rb_hierarchy_last(node); 1962 1963 he = he->parent_he; 1964 if (he == NULL) 1965 return NULL; 1966 1967 return &he->rb_node; 1968 } 1969 1970 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit) 1971 { 1972 struct rb_node *node; 1973 struct hist_entry *child; 1974 float percent; 1975 1976 if (he->leaf) 1977 return false; 1978 1979 node = rb_first_cached(&he->hroot_out); 1980 child = rb_entry(node, struct hist_entry, rb_node); 1981 1982 while (node && child->filtered) { 1983 node = rb_next(node); 1984 child = rb_entry(node, struct hist_entry, rb_node); 1985 } 1986 1987 if (node) 1988 percent = hist_entry__get_percent_limit(child); 1989 else 1990 percent = 0; 1991 1992 return node && percent >= limit; 1993 } 1994 1995 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, 1996 enum hist_filter filter) 1997 { 1998 h->filtered &= ~(1 << filter); 1999 2000 if (symbol_conf.report_hierarchy) { 2001 struct hist_entry *parent = h->parent_he; 2002 2003 while (parent) { 2004 he_stat__add_stat(&parent->stat, &h->stat); 2005 2006 parent->filtered &= ~(1 << filter); 2007 2008 if (parent->filtered) 2009 goto next; 2010 2011 /* force fold unfiltered entry for simplicity */ 2012 parent->unfolded = false; 2013 parent->has_no_entry = false; 2014 parent->row_offset = 0; 2015 parent->nr_rows = 0; 2016 next: 2017 parent = parent->parent_he; 2018 } 2019 } 2020 2021 if (h->filtered) 2022 return; 2023 2024 /* force fold unfiltered entry for simplicity */ 2025 h->unfolded = false; 2026 h->has_no_entry = false; 2027 h->row_offset = 0; 2028 h->nr_rows = 0; 2029 2030 hists->stats.nr_non_filtered_samples += h->stat.nr_events; 2031 2032 hists__inc_filter_stats(hists, h); 2033 hists__calc_col_len(hists, h); 2034 } 2035 2036 2037 static bool hists__filter_entry_by_dso(struct hists *hists, 2038 struct hist_entry *he) 2039 { 2040 if (hists->dso_filter != NULL && 2041 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { 2042 he->filtered |= (1 << HIST_FILTER__DSO); 2043 return true; 2044 } 2045 2046 return false; 2047 } 2048 2049 static bool hists__filter_entry_by_thread(struct hists *hists, 2050 struct hist_entry *he) 2051 { 2052 if (hists->thread_filter != NULL && 2053 he->thread != hists->thread_filter) { 2054 he->filtered |= (1 << HIST_FILTER__THREAD); 2055 return true; 2056 } 2057 2058 return false; 2059 } 2060 2061 static bool hists__filter_entry_by_symbol(struct hists *hists, 2062 struct hist_entry *he) 2063 { 2064 if (hists->symbol_filter_str != NULL && 2065 (!he->ms.sym || strstr(he->ms.sym->name, 2066 hists->symbol_filter_str) == NULL)) { 2067 he->filtered |= (1 << HIST_FILTER__SYMBOL); 2068 return true; 2069 } 2070 2071 return false; 2072 } 2073 2074 static bool hists__filter_entry_by_socket(struct hists *hists, 2075 struct hist_entry *he) 2076 { 2077 if ((hists->socket_filter > -1) && 2078 (he->socket != hists->socket_filter)) { 2079 he->filtered |= (1 << HIST_FILTER__SOCKET); 2080 return true; 2081 } 2082 2083 return false; 2084 } 2085 2086 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he); 2087 2088 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter) 2089 { 2090 struct rb_node *nd; 2091 2092 hists->stats.nr_non_filtered_samples = 0; 2093 2094 hists__reset_filter_stats(hists); 2095 hists__reset_col_len(hists); 2096 2097 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) { 2098 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2099 2100 if (filter(hists, h)) 2101 continue; 2102 2103 hists__remove_entry_filter(hists, h, type); 2104 } 2105 } 2106 2107 static void resort_filtered_entry(struct rb_root_cached *root, 2108 struct hist_entry *he) 2109 { 2110 struct rb_node **p = &root->rb_root.rb_node; 2111 struct rb_node *parent = NULL; 2112 struct hist_entry *iter; 2113 struct rb_root_cached new_root = RB_ROOT_CACHED; 2114 struct rb_node *nd; 2115 bool leftmost = true; 2116 2117 while (*p != NULL) { 2118 parent = *p; 2119 iter = rb_entry(parent, struct hist_entry, rb_node); 2120 2121 if (hist_entry__sort(he, iter) > 0) 2122 p = &(*p)->rb_left; 2123 else { 2124 p = &(*p)->rb_right; 2125 leftmost = false; 2126 } 2127 } 2128 2129 rb_link_node(&he->rb_node, parent, p); 2130 rb_insert_color_cached(&he->rb_node, root, leftmost); 2131 2132 if (he->leaf || he->filtered) 2133 return; 2134 2135 nd = rb_first_cached(&he->hroot_out); 2136 while (nd) { 2137 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2138 2139 nd = rb_next(nd); 2140 rb_erase_cached(&h->rb_node, &he->hroot_out); 2141 2142 resort_filtered_entry(&new_root, h); 2143 } 2144 2145 he->hroot_out = new_root; 2146 } 2147 2148 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg) 2149 { 2150 struct rb_node *nd; 2151 struct rb_root_cached new_root = RB_ROOT_CACHED; 2152 2153 hists->stats.nr_non_filtered_samples = 0; 2154 2155 hists__reset_filter_stats(hists); 2156 hists__reset_col_len(hists); 2157 2158 nd = rb_first_cached(&hists->entries); 2159 while (nd) { 2160 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2161 int ret; 2162 2163 ret = hist_entry__filter(h, type, arg); 2164 2165 /* 2166 * case 1. non-matching type 2167 * zero out the period, set filter marker and move to child 2168 */ 2169 if (ret < 0) { 2170 memset(&h->stat, 0, sizeof(h->stat)); 2171 h->filtered |= (1 << type); 2172 2173 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD); 2174 } 2175 /* 2176 * case 2. matched type (filter out) 2177 * set filter marker and move to next 2178 */ 2179 else if (ret == 1) { 2180 h->filtered |= (1 << type); 2181 2182 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2183 } 2184 /* 2185 * case 3. ok (not filtered) 2186 * add period to hists and parents, erase the filter marker 2187 * and move to next sibling 2188 */ 2189 else { 2190 hists__remove_entry_filter(hists, h, type); 2191 2192 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2193 } 2194 } 2195 2196 hierarchy_recalc_total_periods(hists); 2197 2198 /* 2199 * resort output after applying a new filter since filter in a lower 2200 * hierarchy can change periods in a upper hierarchy. 2201 */ 2202 nd = rb_first_cached(&hists->entries); 2203 while (nd) { 2204 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2205 2206 nd = rb_next(nd); 2207 rb_erase_cached(&h->rb_node, &hists->entries); 2208 2209 resort_filtered_entry(&new_root, h); 2210 } 2211 2212 hists->entries = new_root; 2213 } 2214 2215 void hists__filter_by_thread(struct hists *hists) 2216 { 2217 if (symbol_conf.report_hierarchy) 2218 hists__filter_hierarchy(hists, HIST_FILTER__THREAD, 2219 hists->thread_filter); 2220 else 2221 hists__filter_by_type(hists, HIST_FILTER__THREAD, 2222 hists__filter_entry_by_thread); 2223 } 2224 2225 void hists__filter_by_dso(struct hists *hists) 2226 { 2227 if (symbol_conf.report_hierarchy) 2228 hists__filter_hierarchy(hists, HIST_FILTER__DSO, 2229 hists->dso_filter); 2230 else 2231 hists__filter_by_type(hists, HIST_FILTER__DSO, 2232 hists__filter_entry_by_dso); 2233 } 2234 2235 void hists__filter_by_symbol(struct hists *hists) 2236 { 2237 if (symbol_conf.report_hierarchy) 2238 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL, 2239 hists->symbol_filter_str); 2240 else 2241 hists__filter_by_type(hists, HIST_FILTER__SYMBOL, 2242 hists__filter_entry_by_symbol); 2243 } 2244 2245 void hists__filter_by_socket(struct hists *hists) 2246 { 2247 if (symbol_conf.report_hierarchy) 2248 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET, 2249 &hists->socket_filter); 2250 else 2251 hists__filter_by_type(hists, HIST_FILTER__SOCKET, 2252 hists__filter_entry_by_socket); 2253 } 2254 2255 void events_stats__inc(struct events_stats *stats, u32 type) 2256 { 2257 ++stats->nr_events[0]; 2258 ++stats->nr_events[type]; 2259 } 2260 2261 void hists__inc_nr_events(struct hists *hists, u32 type) 2262 { 2263 events_stats__inc(&hists->stats, type); 2264 } 2265 2266 void hists__inc_nr_samples(struct hists *hists, bool filtered) 2267 { 2268 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE); 2269 if (!filtered) 2270 hists->stats.nr_non_filtered_samples++; 2271 } 2272 2273 static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 2274 struct hist_entry *pair) 2275 { 2276 struct rb_root_cached *root; 2277 struct rb_node **p; 2278 struct rb_node *parent = NULL; 2279 struct hist_entry *he; 2280 int64_t cmp; 2281 bool leftmost = true; 2282 2283 if (hists__has(hists, need_collapse)) 2284 root = &hists->entries_collapsed; 2285 else 2286 root = hists->entries_in; 2287 2288 p = &root->rb_root.rb_node; 2289 2290 while (*p != NULL) { 2291 parent = *p; 2292 he = rb_entry(parent, struct hist_entry, rb_node_in); 2293 2294 cmp = hist_entry__collapse(he, pair); 2295 2296 if (!cmp) 2297 goto out; 2298 2299 if (cmp < 0) 2300 p = &(*p)->rb_left; 2301 else { 2302 p = &(*p)->rb_right; 2303 leftmost = false; 2304 } 2305 } 2306 2307 he = hist_entry__new(pair, true); 2308 if (he) { 2309 memset(&he->stat, 0, sizeof(he->stat)); 2310 he->hists = hists; 2311 if (symbol_conf.cumulate_callchain) 2312 memset(he->stat_acc, 0, sizeof(he->stat)); 2313 rb_link_node(&he->rb_node_in, parent, p); 2314 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2315 hists__inc_stats(hists, he); 2316 he->dummy = true; 2317 } 2318 out: 2319 return he; 2320 } 2321 2322 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists, 2323 struct rb_root_cached *root, 2324 struct hist_entry *pair) 2325 { 2326 struct rb_node **p; 2327 struct rb_node *parent = NULL; 2328 struct hist_entry *he; 2329 struct perf_hpp_fmt *fmt; 2330 bool leftmost = true; 2331 2332 p = &root->rb_root.rb_node; 2333 while (*p != NULL) { 2334 int64_t cmp = 0; 2335 2336 parent = *p; 2337 he = rb_entry(parent, struct hist_entry, rb_node_in); 2338 2339 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2340 cmp = fmt->collapse(fmt, he, pair); 2341 if (cmp) 2342 break; 2343 } 2344 if (!cmp) 2345 goto out; 2346 2347 if (cmp < 0) 2348 p = &parent->rb_left; 2349 else { 2350 p = &parent->rb_right; 2351 leftmost = false; 2352 } 2353 } 2354 2355 he = hist_entry__new(pair, true); 2356 if (he) { 2357 rb_link_node(&he->rb_node_in, parent, p); 2358 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2359 2360 he->dummy = true; 2361 he->hists = hists; 2362 memset(&he->stat, 0, sizeof(he->stat)); 2363 hists__inc_stats(hists, he); 2364 } 2365 out: 2366 return he; 2367 } 2368 2369 static struct hist_entry *hists__find_entry(struct hists *hists, 2370 struct hist_entry *he) 2371 { 2372 struct rb_node *n; 2373 2374 if (hists__has(hists, need_collapse)) 2375 n = hists->entries_collapsed.rb_root.rb_node; 2376 else 2377 n = hists->entries_in->rb_root.rb_node; 2378 2379 while (n) { 2380 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); 2381 int64_t cmp = hist_entry__collapse(iter, he); 2382 2383 if (cmp < 0) 2384 n = n->rb_left; 2385 else if (cmp > 0) 2386 n = n->rb_right; 2387 else 2388 return iter; 2389 } 2390 2391 return NULL; 2392 } 2393 2394 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root, 2395 struct hist_entry *he) 2396 { 2397 struct rb_node *n = root->rb_root.rb_node; 2398 2399 while (n) { 2400 struct hist_entry *iter; 2401 struct perf_hpp_fmt *fmt; 2402 int64_t cmp = 0; 2403 2404 iter = rb_entry(n, struct hist_entry, rb_node_in); 2405 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2406 cmp = fmt->collapse(fmt, iter, he); 2407 if (cmp) 2408 break; 2409 } 2410 2411 if (cmp < 0) 2412 n = n->rb_left; 2413 else if (cmp > 0) 2414 n = n->rb_right; 2415 else 2416 return iter; 2417 } 2418 2419 return NULL; 2420 } 2421 2422 static void hists__match_hierarchy(struct rb_root_cached *leader_root, 2423 struct rb_root_cached *other_root) 2424 { 2425 struct rb_node *nd; 2426 struct hist_entry *pos, *pair; 2427 2428 for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) { 2429 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2430 pair = hists__find_hierarchy_entry(other_root, pos); 2431 2432 if (pair) { 2433 hist_entry__add_pair(pair, pos); 2434 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in); 2435 } 2436 } 2437 } 2438 2439 /* 2440 * Look for pairs to link to the leader buckets (hist_entries): 2441 */ 2442 void hists__match(struct hists *leader, struct hists *other) 2443 { 2444 struct rb_root_cached *root; 2445 struct rb_node *nd; 2446 struct hist_entry *pos, *pair; 2447 2448 if (symbol_conf.report_hierarchy) { 2449 /* hierarchy report always collapses entries */ 2450 return hists__match_hierarchy(&leader->entries_collapsed, 2451 &other->entries_collapsed); 2452 } 2453 2454 if (hists__has(leader, need_collapse)) 2455 root = &leader->entries_collapsed; 2456 else 2457 root = leader->entries_in; 2458 2459 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2460 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2461 pair = hists__find_entry(other, pos); 2462 2463 if (pair) 2464 hist_entry__add_pair(pair, pos); 2465 } 2466 } 2467 2468 static int hists__link_hierarchy(struct hists *leader_hists, 2469 struct hist_entry *parent, 2470 struct rb_root_cached *leader_root, 2471 struct rb_root_cached *other_root) 2472 { 2473 struct rb_node *nd; 2474 struct hist_entry *pos, *leader; 2475 2476 for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) { 2477 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2478 2479 if (hist_entry__has_pairs(pos)) { 2480 bool found = false; 2481 2482 list_for_each_entry(leader, &pos->pairs.head, pairs.node) { 2483 if (leader->hists == leader_hists) { 2484 found = true; 2485 break; 2486 } 2487 } 2488 if (!found) 2489 return -1; 2490 } else { 2491 leader = add_dummy_hierarchy_entry(leader_hists, 2492 leader_root, pos); 2493 if (leader == NULL) 2494 return -1; 2495 2496 /* do not point parent in the pos */ 2497 leader->parent_he = parent; 2498 2499 hist_entry__add_pair(pos, leader); 2500 } 2501 2502 if (!pos->leaf) { 2503 if (hists__link_hierarchy(leader_hists, leader, 2504 &leader->hroot_in, 2505 &pos->hroot_in) < 0) 2506 return -1; 2507 } 2508 } 2509 return 0; 2510 } 2511 2512 /* 2513 * Look for entries in the other hists that are not present in the leader, if 2514 * we find them, just add a dummy entry on the leader hists, with period=0, 2515 * nr_events=0, to serve as the list header. 2516 */ 2517 int hists__link(struct hists *leader, struct hists *other) 2518 { 2519 struct rb_root_cached *root; 2520 struct rb_node *nd; 2521 struct hist_entry *pos, *pair; 2522 2523 if (symbol_conf.report_hierarchy) { 2524 /* hierarchy report always collapses entries */ 2525 return hists__link_hierarchy(leader, NULL, 2526 &leader->entries_collapsed, 2527 &other->entries_collapsed); 2528 } 2529 2530 if (hists__has(other, need_collapse)) 2531 root = &other->entries_collapsed; 2532 else 2533 root = other->entries_in; 2534 2535 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2536 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2537 2538 if (!hist_entry__has_pairs(pos)) { 2539 pair = hists__add_dummy_entry(leader, pos); 2540 if (pair == NULL) 2541 return -1; 2542 hist_entry__add_pair(pos, pair); 2543 } 2544 } 2545 2546 return 0; 2547 } 2548 2549 int hists__unlink(struct hists *hists) 2550 { 2551 struct rb_root_cached *root; 2552 struct rb_node *nd; 2553 struct hist_entry *pos; 2554 2555 if (hists__has(hists, need_collapse)) 2556 root = &hists->entries_collapsed; 2557 else 2558 root = hists->entries_in; 2559 2560 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2561 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2562 list_del_init(&pos->pairs.node); 2563 } 2564 2565 return 0; 2566 } 2567 2568 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, 2569 struct perf_sample *sample, bool nonany_branch_mode) 2570 { 2571 struct branch_info *bi; 2572 2573 /* If we have branch cycles always annotate them. */ 2574 if (bs && bs->nr && bs->entries[0].flags.cycles) { 2575 int i; 2576 2577 bi = sample__resolve_bstack(sample, al); 2578 if (bi) { 2579 struct addr_map_symbol *prev = NULL; 2580 2581 /* 2582 * Ignore errors, still want to process the 2583 * other entries. 2584 * 2585 * For non standard branch modes always 2586 * force no IPC (prev == NULL) 2587 * 2588 * Note that perf stores branches reversed from 2589 * program order! 2590 */ 2591 for (i = bs->nr - 1; i >= 0; i--) { 2592 addr_map_symbol__account_cycles(&bi[i].from, 2593 nonany_branch_mode ? NULL : prev, 2594 bi[i].flags.cycles); 2595 prev = &bi[i].to; 2596 } 2597 free(bi); 2598 } 2599 } 2600 } 2601 2602 size_t perf_evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp) 2603 { 2604 struct evsel *pos; 2605 size_t ret = 0; 2606 2607 evlist__for_each_entry(evlist, pos) { 2608 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 2609 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp); 2610 } 2611 2612 return ret; 2613 } 2614 2615 2616 u64 hists__total_period(struct hists *hists) 2617 { 2618 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period : 2619 hists->stats.total_period; 2620 } 2621 2622 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq) 2623 { 2624 char unit; 2625 int printed; 2626 const struct dso *dso = hists->dso_filter; 2627 struct thread *thread = hists->thread_filter; 2628 int socket_id = hists->socket_filter; 2629 unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE]; 2630 u64 nr_events = hists->stats.total_period; 2631 struct evsel *evsel = hists_to_evsel(hists); 2632 const char *ev_name = perf_evsel__name(evsel); 2633 char buf[512], sample_freq_str[64] = ""; 2634 size_t buflen = sizeof(buf); 2635 char ref[30] = " show reference callgraph, "; 2636 bool enable_ref = false; 2637 2638 if (symbol_conf.filter_relative) { 2639 nr_samples = hists->stats.nr_non_filtered_samples; 2640 nr_events = hists->stats.total_non_filtered_period; 2641 } 2642 2643 if (perf_evsel__is_group_event(evsel)) { 2644 struct evsel *pos; 2645 2646 perf_evsel__group_desc(evsel, buf, buflen); 2647 ev_name = buf; 2648 2649 for_each_group_member(pos, evsel) { 2650 struct hists *pos_hists = evsel__hists(pos); 2651 2652 if (symbol_conf.filter_relative) { 2653 nr_samples += pos_hists->stats.nr_non_filtered_samples; 2654 nr_events += pos_hists->stats.total_non_filtered_period; 2655 } else { 2656 nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE]; 2657 nr_events += pos_hists->stats.total_period; 2658 } 2659 } 2660 } 2661 2662 if (symbol_conf.show_ref_callgraph && 2663 strstr(ev_name, "call-graph=no")) 2664 enable_ref = true; 2665 2666 if (show_freq) 2667 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq); 2668 2669 nr_samples = convert_unit(nr_samples, &unit); 2670 printed = scnprintf(bf, size, 2671 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64, 2672 nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "", 2673 ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events); 2674 2675 2676 if (hists->uid_filter_str) 2677 printed += snprintf(bf + printed, size - printed, 2678 ", UID: %s", hists->uid_filter_str); 2679 if (thread) { 2680 if (hists__has(hists, thread)) { 2681 printed += scnprintf(bf + printed, size - printed, 2682 ", Thread: %s(%d)", 2683 (thread->comm_set ? thread__comm_str(thread) : ""), 2684 thread->tid); 2685 } else { 2686 printed += scnprintf(bf + printed, size - printed, 2687 ", Thread: %s", 2688 (thread->comm_set ? thread__comm_str(thread) : "")); 2689 } 2690 } 2691 if (dso) 2692 printed += scnprintf(bf + printed, size - printed, 2693 ", DSO: %s", dso->short_name); 2694 if (socket_id > -1) 2695 printed += scnprintf(bf + printed, size - printed, 2696 ", Processor Socket: %d", socket_id); 2697 2698 return printed; 2699 } 2700 2701 int parse_filter_percentage(const struct option *opt __maybe_unused, 2702 const char *arg, int unset __maybe_unused) 2703 { 2704 if (!strcmp(arg, "relative")) 2705 symbol_conf.filter_relative = true; 2706 else if (!strcmp(arg, "absolute")) 2707 symbol_conf.filter_relative = false; 2708 else { 2709 pr_debug("Invalid percentage: %s\n", arg); 2710 return -1; 2711 } 2712 2713 return 0; 2714 } 2715 2716 int perf_hist_config(const char *var, const char *value) 2717 { 2718 if (!strcmp(var, "hist.percentage")) 2719 return parse_filter_percentage(NULL, value, 0); 2720 2721 return 0; 2722 } 2723 2724 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list) 2725 { 2726 memset(hists, 0, sizeof(*hists)); 2727 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED; 2728 hists->entries_in = &hists->entries_in_array[0]; 2729 hists->entries_collapsed = RB_ROOT_CACHED; 2730 hists->entries = RB_ROOT_CACHED; 2731 pthread_mutex_init(&hists->lock, NULL); 2732 hists->socket_filter = -1; 2733 hists->hpp_list = hpp_list; 2734 INIT_LIST_HEAD(&hists->hpp_formats); 2735 return 0; 2736 } 2737 2738 static void hists__delete_remaining_entries(struct rb_root_cached *root) 2739 { 2740 struct rb_node *node; 2741 struct hist_entry *he; 2742 2743 while (!RB_EMPTY_ROOT(&root->rb_root)) { 2744 node = rb_first_cached(root); 2745 rb_erase_cached(node, root); 2746 2747 he = rb_entry(node, struct hist_entry, rb_node_in); 2748 hist_entry__delete(he); 2749 } 2750 } 2751 2752 static void hists__delete_all_entries(struct hists *hists) 2753 { 2754 hists__delete_entries(hists); 2755 hists__delete_remaining_entries(&hists->entries_in_array[0]); 2756 hists__delete_remaining_entries(&hists->entries_in_array[1]); 2757 hists__delete_remaining_entries(&hists->entries_collapsed); 2758 } 2759 2760 static void hists_evsel__exit(struct evsel *evsel) 2761 { 2762 struct hists *hists = evsel__hists(evsel); 2763 struct perf_hpp_fmt *fmt, *pos; 2764 struct perf_hpp_list_node *node, *tmp; 2765 2766 hists__delete_all_entries(hists); 2767 2768 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) { 2769 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) { 2770 list_del_init(&fmt->list); 2771 free(fmt); 2772 } 2773 list_del_init(&node->list); 2774 free(node); 2775 } 2776 } 2777 2778 static int hists_evsel__init(struct evsel *evsel) 2779 { 2780 struct hists *hists = evsel__hists(evsel); 2781 2782 __hists__init(hists, &perf_hpp_list); 2783 return 0; 2784 } 2785 2786 /* 2787 * XXX We probably need a hists_evsel__exit() to free the hist_entries 2788 * stored in the rbtree... 2789 */ 2790 2791 int hists__init(void) 2792 { 2793 int err = perf_evsel__object_config(sizeof(struct hists_evsel), 2794 hists_evsel__init, 2795 hists_evsel__exit); 2796 if (err) 2797 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr); 2798 2799 return err; 2800 } 2801 2802 void perf_hpp_list__init(struct perf_hpp_list *list) 2803 { 2804 INIT_LIST_HEAD(&list->fields); 2805 INIT_LIST_HEAD(&list->sorts); 2806 } 2807