1 // SPDX-License-Identifier: GPL-2.0 2 #include "callchain.h" 3 #include "util.h" 4 #include "build-id.h" 5 #include "hist.h" 6 #include "map.h" 7 #include "session.h" 8 #include "namespaces.h" 9 #include "sort.h" 10 #include "units.h" 11 #include "evlist.h" 12 #include "evsel.h" 13 #include "annotate.h" 14 #include "srcline.h" 15 #include "symbol.h" 16 #include "thread.h" 17 #include "ui/progress.h" 18 #include <errno.h> 19 #include <math.h> 20 #include <inttypes.h> 21 #include <sys/param.h> 22 #include <linux/time64.h> 23 24 static bool hists__filter_entry_by_dso(struct hists *hists, 25 struct hist_entry *he); 26 static bool hists__filter_entry_by_thread(struct hists *hists, 27 struct hist_entry *he); 28 static bool hists__filter_entry_by_symbol(struct hists *hists, 29 struct hist_entry *he); 30 static bool hists__filter_entry_by_socket(struct hists *hists, 31 struct hist_entry *he); 32 33 u16 hists__col_len(struct hists *hists, enum hist_column col) 34 { 35 return hists->col_len[col]; 36 } 37 38 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) 39 { 40 hists->col_len[col] = len; 41 } 42 43 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) 44 { 45 if (len > hists__col_len(hists, col)) { 46 hists__set_col_len(hists, col, len); 47 return true; 48 } 49 return false; 50 } 51 52 void hists__reset_col_len(struct hists *hists) 53 { 54 enum hist_column col; 55 56 for (col = 0; col < HISTC_NR_COLS; ++col) 57 hists__set_col_len(hists, col, 0); 58 } 59 60 static void hists__set_unres_dso_col_len(struct hists *hists, int dso) 61 { 62 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 63 64 if (hists__col_len(hists, dso) < unresolved_col_width && 65 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 66 !symbol_conf.dso_list) 67 hists__set_col_len(hists, dso, unresolved_col_width); 68 } 69 70 void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 71 { 72 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 73 int symlen; 74 u16 len; 75 76 /* 77 * +4 accounts for '[x] ' priv level info 78 * +2 accounts for 0x prefix on raw addresses 79 * +3 accounts for ' y ' symtab origin info 80 */ 81 if (h->ms.sym) { 82 symlen = h->ms.sym->namelen + 4; 83 if (verbose > 0) 84 symlen += BITS_PER_LONG / 4 + 2 + 3; 85 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 86 } else { 87 symlen = unresolved_col_width + 4 + 2; 88 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 89 hists__set_unres_dso_col_len(hists, HISTC_DSO); 90 } 91 92 len = thread__comm_len(h->thread); 93 if (hists__new_col_len(hists, HISTC_COMM, len)) 94 hists__set_col_len(hists, HISTC_THREAD, len + 8); 95 96 if (h->ms.map) { 97 len = dso__name_len(h->ms.map->dso); 98 hists__new_col_len(hists, HISTC_DSO, len); 99 } 100 101 if (h->parent) 102 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); 103 104 if (h->branch_info) { 105 if (h->branch_info->from.sym) { 106 symlen = (int)h->branch_info->from.sym->namelen + 4; 107 if (verbose > 0) 108 symlen += BITS_PER_LONG / 4 + 2 + 3; 109 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 110 111 symlen = dso__name_len(h->branch_info->from.map->dso); 112 hists__new_col_len(hists, HISTC_DSO_FROM, symlen); 113 } else { 114 symlen = unresolved_col_width + 4 + 2; 115 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 116 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); 117 } 118 119 if (h->branch_info->to.sym) { 120 symlen = (int)h->branch_info->to.sym->namelen + 4; 121 if (verbose > 0) 122 symlen += BITS_PER_LONG / 4 + 2 + 3; 123 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 124 125 symlen = dso__name_len(h->branch_info->to.map->dso); 126 hists__new_col_len(hists, HISTC_DSO_TO, symlen); 127 } else { 128 symlen = unresolved_col_width + 4 + 2; 129 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 130 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 131 } 132 133 if (h->branch_info->srcline_from) 134 hists__new_col_len(hists, HISTC_SRCLINE_FROM, 135 strlen(h->branch_info->srcline_from)); 136 if (h->branch_info->srcline_to) 137 hists__new_col_len(hists, HISTC_SRCLINE_TO, 138 strlen(h->branch_info->srcline_to)); 139 } 140 141 if (h->mem_info) { 142 if (h->mem_info->daddr.sym) { 143 symlen = (int)h->mem_info->daddr.sym->namelen + 4 144 + unresolved_col_width + 2; 145 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 146 symlen); 147 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 148 symlen + 1); 149 } else { 150 symlen = unresolved_col_width + 4 + 2; 151 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 152 symlen); 153 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 154 symlen); 155 } 156 157 if (h->mem_info->iaddr.sym) { 158 symlen = (int)h->mem_info->iaddr.sym->namelen + 4 159 + unresolved_col_width + 2; 160 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 161 symlen); 162 } else { 163 symlen = unresolved_col_width + 4 + 2; 164 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 165 symlen); 166 } 167 168 if (h->mem_info->daddr.map) { 169 symlen = dso__name_len(h->mem_info->daddr.map->dso); 170 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, 171 symlen); 172 } else { 173 symlen = unresolved_col_width + 4 + 2; 174 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 175 } 176 177 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR, 178 unresolved_col_width + 4 + 2); 179 180 } else { 181 symlen = unresolved_col_width + 4 + 2; 182 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); 183 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen); 184 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 185 } 186 187 hists__new_col_len(hists, HISTC_CGROUP_ID, 20); 188 hists__new_col_len(hists, HISTC_CPU, 3); 189 hists__new_col_len(hists, HISTC_SOCKET, 6); 190 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); 191 hists__new_col_len(hists, HISTC_MEM_TLB, 22); 192 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); 193 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3); 194 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); 195 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); 196 hists__new_col_len(hists, HISTC_TIME, 12); 197 198 if (h->srcline) { 199 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header)); 200 hists__new_col_len(hists, HISTC_SRCLINE, len); 201 } 202 203 if (h->srcfile) 204 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile)); 205 206 if (h->transaction) 207 hists__new_col_len(hists, HISTC_TRANSACTION, 208 hist_entry__transaction_len()); 209 210 if (h->trace_output) 211 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output)); 212 } 213 214 void hists__output_recalc_col_len(struct hists *hists, int max_rows) 215 { 216 struct rb_node *next = rb_first_cached(&hists->entries); 217 struct hist_entry *n; 218 int row = 0; 219 220 hists__reset_col_len(hists); 221 222 while (next && row++ < max_rows) { 223 n = rb_entry(next, struct hist_entry, rb_node); 224 if (!n->filtered) 225 hists__calc_col_len(hists, n); 226 next = rb_next(&n->rb_node); 227 } 228 } 229 230 static void he_stat__add_cpumode_period(struct he_stat *he_stat, 231 unsigned int cpumode, u64 period) 232 { 233 switch (cpumode) { 234 case PERF_RECORD_MISC_KERNEL: 235 he_stat->period_sys += period; 236 break; 237 case PERF_RECORD_MISC_USER: 238 he_stat->period_us += period; 239 break; 240 case PERF_RECORD_MISC_GUEST_KERNEL: 241 he_stat->period_guest_sys += period; 242 break; 243 case PERF_RECORD_MISC_GUEST_USER: 244 he_stat->period_guest_us += period; 245 break; 246 default: 247 break; 248 } 249 } 250 251 static long hist_time(unsigned long htime) 252 { 253 unsigned long time_quantum = symbol_conf.time_quantum; 254 if (time_quantum) 255 return (htime / time_quantum) * time_quantum; 256 return htime; 257 } 258 259 static void he_stat__add_period(struct he_stat *he_stat, u64 period, 260 u64 weight) 261 { 262 263 he_stat->period += period; 264 he_stat->weight += weight; 265 he_stat->nr_events += 1; 266 } 267 268 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) 269 { 270 dest->period += src->period; 271 dest->period_sys += src->period_sys; 272 dest->period_us += src->period_us; 273 dest->period_guest_sys += src->period_guest_sys; 274 dest->period_guest_us += src->period_guest_us; 275 dest->nr_events += src->nr_events; 276 dest->weight += src->weight; 277 } 278 279 static void he_stat__decay(struct he_stat *he_stat) 280 { 281 he_stat->period = (he_stat->period * 7) / 8; 282 he_stat->nr_events = (he_stat->nr_events * 7) / 8; 283 /* XXX need decay for weight too? */ 284 } 285 286 static void hists__delete_entry(struct hists *hists, struct hist_entry *he); 287 288 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) 289 { 290 u64 prev_period = he->stat.period; 291 u64 diff; 292 293 if (prev_period == 0) 294 return true; 295 296 he_stat__decay(&he->stat); 297 if (symbol_conf.cumulate_callchain) 298 he_stat__decay(he->stat_acc); 299 decay_callchain(he->callchain); 300 301 diff = prev_period - he->stat.period; 302 303 if (!he->depth) { 304 hists->stats.total_period -= diff; 305 if (!he->filtered) 306 hists->stats.total_non_filtered_period -= diff; 307 } 308 309 if (!he->leaf) { 310 struct hist_entry *child; 311 struct rb_node *node = rb_first_cached(&he->hroot_out); 312 while (node) { 313 child = rb_entry(node, struct hist_entry, rb_node); 314 node = rb_next(node); 315 316 if (hists__decay_entry(hists, child)) 317 hists__delete_entry(hists, child); 318 } 319 } 320 321 return he->stat.period == 0; 322 } 323 324 static void hists__delete_entry(struct hists *hists, struct hist_entry *he) 325 { 326 struct rb_root_cached *root_in; 327 struct rb_root_cached *root_out; 328 329 if (he->parent_he) { 330 root_in = &he->parent_he->hroot_in; 331 root_out = &he->parent_he->hroot_out; 332 } else { 333 if (hists__has(hists, need_collapse)) 334 root_in = &hists->entries_collapsed; 335 else 336 root_in = hists->entries_in; 337 root_out = &hists->entries; 338 } 339 340 rb_erase_cached(&he->rb_node_in, root_in); 341 rb_erase_cached(&he->rb_node, root_out); 342 343 --hists->nr_entries; 344 if (!he->filtered) 345 --hists->nr_non_filtered_entries; 346 347 hist_entry__delete(he); 348 } 349 350 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 351 { 352 struct rb_node *next = rb_first_cached(&hists->entries); 353 struct hist_entry *n; 354 355 while (next) { 356 n = rb_entry(next, struct hist_entry, rb_node); 357 next = rb_next(&n->rb_node); 358 if (((zap_user && n->level == '.') || 359 (zap_kernel && n->level != '.') || 360 hists__decay_entry(hists, n))) { 361 hists__delete_entry(hists, n); 362 } 363 } 364 } 365 366 void hists__delete_entries(struct hists *hists) 367 { 368 struct rb_node *next = rb_first_cached(&hists->entries); 369 struct hist_entry *n; 370 371 while (next) { 372 n = rb_entry(next, struct hist_entry, rb_node); 373 next = rb_next(&n->rb_node); 374 375 hists__delete_entry(hists, n); 376 } 377 } 378 379 /* 380 * histogram, sorted on item, collects periods 381 */ 382 383 static int hist_entry__init(struct hist_entry *he, 384 struct hist_entry *template, 385 bool sample_self, 386 size_t callchain_size) 387 { 388 *he = *template; 389 he->callchain_size = callchain_size; 390 391 if (symbol_conf.cumulate_callchain) { 392 he->stat_acc = malloc(sizeof(he->stat)); 393 if (he->stat_acc == NULL) 394 return -ENOMEM; 395 memcpy(he->stat_acc, &he->stat, sizeof(he->stat)); 396 if (!sample_self) 397 memset(&he->stat, 0, sizeof(he->stat)); 398 } 399 400 map__get(he->ms.map); 401 402 if (he->branch_info) { 403 /* 404 * This branch info is (a part of) allocated from 405 * sample__resolve_bstack() and will be freed after 406 * adding new entries. So we need to save a copy. 407 */ 408 he->branch_info = malloc(sizeof(*he->branch_info)); 409 if (he->branch_info == NULL) 410 goto err; 411 412 memcpy(he->branch_info, template->branch_info, 413 sizeof(*he->branch_info)); 414 415 map__get(he->branch_info->from.map); 416 map__get(he->branch_info->to.map); 417 } 418 419 if (he->mem_info) { 420 map__get(he->mem_info->iaddr.map); 421 map__get(he->mem_info->daddr.map); 422 } 423 424 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 425 callchain_init(he->callchain); 426 427 if (he->raw_data) { 428 he->raw_data = memdup(he->raw_data, he->raw_size); 429 if (he->raw_data == NULL) 430 goto err_infos; 431 } 432 433 if (he->srcline) { 434 he->srcline = strdup(he->srcline); 435 if (he->srcline == NULL) 436 goto err_rawdata; 437 } 438 439 if (symbol_conf.res_sample) { 440 he->res_samples = calloc(sizeof(struct res_sample), 441 symbol_conf.res_sample); 442 if (!he->res_samples) 443 goto err_srcline; 444 } 445 446 INIT_LIST_HEAD(&he->pairs.node); 447 thread__get(he->thread); 448 he->hroot_in = RB_ROOT_CACHED; 449 he->hroot_out = RB_ROOT_CACHED; 450 451 if (!symbol_conf.report_hierarchy) 452 he->leaf = true; 453 454 return 0; 455 456 err_srcline: 457 free(he->srcline); 458 459 err_rawdata: 460 free(he->raw_data); 461 462 err_infos: 463 if (he->branch_info) { 464 map__put(he->branch_info->from.map); 465 map__put(he->branch_info->to.map); 466 free(he->branch_info); 467 } 468 if (he->mem_info) { 469 map__put(he->mem_info->iaddr.map); 470 map__put(he->mem_info->daddr.map); 471 } 472 err: 473 map__zput(he->ms.map); 474 free(he->stat_acc); 475 return -ENOMEM; 476 } 477 478 static void *hist_entry__zalloc(size_t size) 479 { 480 return zalloc(size + sizeof(struct hist_entry)); 481 } 482 483 static void hist_entry__free(void *ptr) 484 { 485 free(ptr); 486 } 487 488 static struct hist_entry_ops default_ops = { 489 .new = hist_entry__zalloc, 490 .free = hist_entry__free, 491 }; 492 493 static struct hist_entry *hist_entry__new(struct hist_entry *template, 494 bool sample_self) 495 { 496 struct hist_entry_ops *ops = template->ops; 497 size_t callchain_size = 0; 498 struct hist_entry *he; 499 int err = 0; 500 501 if (!ops) 502 ops = template->ops = &default_ops; 503 504 if (symbol_conf.use_callchain) 505 callchain_size = sizeof(struct callchain_root); 506 507 he = ops->new(callchain_size); 508 if (he) { 509 err = hist_entry__init(he, template, sample_self, callchain_size); 510 if (err) { 511 ops->free(he); 512 he = NULL; 513 } 514 } 515 516 return he; 517 } 518 519 static u8 symbol__parent_filter(const struct symbol *parent) 520 { 521 if (symbol_conf.exclude_other && parent == NULL) 522 return 1 << HIST_FILTER__PARENT; 523 return 0; 524 } 525 526 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period) 527 { 528 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain) 529 return; 530 531 he->hists->callchain_period += period; 532 if (!he->filtered) 533 he->hists->callchain_non_filtered_period += period; 534 } 535 536 static struct hist_entry *hists__findnew_entry(struct hists *hists, 537 struct hist_entry *entry, 538 struct addr_location *al, 539 bool sample_self) 540 { 541 struct rb_node **p; 542 struct rb_node *parent = NULL; 543 struct hist_entry *he; 544 int64_t cmp; 545 u64 period = entry->stat.period; 546 u64 weight = entry->stat.weight; 547 bool leftmost = true; 548 549 p = &hists->entries_in->rb_root.rb_node; 550 551 while (*p != NULL) { 552 parent = *p; 553 he = rb_entry(parent, struct hist_entry, rb_node_in); 554 555 /* 556 * Make sure that it receives arguments in a same order as 557 * hist_entry__collapse() so that we can use an appropriate 558 * function when searching an entry regardless which sort 559 * keys were used. 560 */ 561 cmp = hist_entry__cmp(he, entry); 562 563 if (!cmp) { 564 if (sample_self) { 565 he_stat__add_period(&he->stat, period, weight); 566 hist_entry__add_callchain_period(he, period); 567 } 568 if (symbol_conf.cumulate_callchain) 569 he_stat__add_period(he->stat_acc, period, weight); 570 571 /* 572 * This mem info was allocated from sample__resolve_mem 573 * and will not be used anymore. 574 */ 575 mem_info__zput(entry->mem_info); 576 577 /* If the map of an existing hist_entry has 578 * become out-of-date due to an exec() or 579 * similar, update it. Otherwise we will 580 * mis-adjust symbol addresses when computing 581 * the history counter to increment. 582 */ 583 if (he->ms.map != entry->ms.map) { 584 map__put(he->ms.map); 585 he->ms.map = map__get(entry->ms.map); 586 } 587 goto out; 588 } 589 590 if (cmp < 0) 591 p = &(*p)->rb_left; 592 else { 593 p = &(*p)->rb_right; 594 leftmost = false; 595 } 596 } 597 598 he = hist_entry__new(entry, sample_self); 599 if (!he) 600 return NULL; 601 602 if (sample_self) 603 hist_entry__add_callchain_period(he, period); 604 hists->nr_entries++; 605 606 rb_link_node(&he->rb_node_in, parent, p); 607 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost); 608 out: 609 if (sample_self) 610 he_stat__add_cpumode_period(&he->stat, al->cpumode, period); 611 if (symbol_conf.cumulate_callchain) 612 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); 613 return he; 614 } 615 616 static unsigned random_max(unsigned high) 617 { 618 unsigned thresh = -high % high; 619 for (;;) { 620 unsigned r = random(); 621 if (r >= thresh) 622 return r % high; 623 } 624 } 625 626 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample) 627 { 628 struct res_sample *r; 629 int j; 630 631 if (he->num_res < symbol_conf.res_sample) { 632 j = he->num_res++; 633 } else { 634 j = random_max(symbol_conf.res_sample); 635 } 636 r = &he->res_samples[j]; 637 r->time = sample->time; 638 r->cpu = sample->cpu; 639 r->tid = sample->tid; 640 } 641 642 static struct hist_entry* 643 __hists__add_entry(struct hists *hists, 644 struct addr_location *al, 645 struct symbol *sym_parent, 646 struct branch_info *bi, 647 struct mem_info *mi, 648 struct perf_sample *sample, 649 bool sample_self, 650 struct hist_entry_ops *ops) 651 { 652 struct namespaces *ns = thread__namespaces(al->thread); 653 struct hist_entry entry = { 654 .thread = al->thread, 655 .comm = thread__comm(al->thread), 656 .cgroup_id = { 657 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0, 658 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0, 659 }, 660 .ms = { 661 .map = al->map, 662 .sym = al->sym, 663 }, 664 .srcline = (char *) al->srcline, 665 .socket = al->socket, 666 .cpu = al->cpu, 667 .cpumode = al->cpumode, 668 .ip = al->addr, 669 .level = al->level, 670 .stat = { 671 .nr_events = 1, 672 .period = sample->period, 673 .weight = sample->weight, 674 }, 675 .parent = sym_parent, 676 .filtered = symbol__parent_filter(sym_parent) | al->filtered, 677 .hists = hists, 678 .branch_info = bi, 679 .mem_info = mi, 680 .transaction = sample->transaction, 681 .raw_data = sample->raw_data, 682 .raw_size = sample->raw_size, 683 .ops = ops, 684 .time = hist_time(sample->time), 685 }, *he = hists__findnew_entry(hists, &entry, al, sample_self); 686 687 if (!hists->has_callchains && he && he->callchain_size != 0) 688 hists->has_callchains = true; 689 if (he && symbol_conf.res_sample) 690 hists__res_sample(he, sample); 691 return he; 692 } 693 694 struct hist_entry *hists__add_entry(struct hists *hists, 695 struct addr_location *al, 696 struct symbol *sym_parent, 697 struct branch_info *bi, 698 struct mem_info *mi, 699 struct perf_sample *sample, 700 bool sample_self) 701 { 702 return __hists__add_entry(hists, al, sym_parent, bi, mi, 703 sample, sample_self, NULL); 704 } 705 706 struct hist_entry *hists__add_entry_ops(struct hists *hists, 707 struct hist_entry_ops *ops, 708 struct addr_location *al, 709 struct symbol *sym_parent, 710 struct branch_info *bi, 711 struct mem_info *mi, 712 struct perf_sample *sample, 713 bool sample_self) 714 { 715 return __hists__add_entry(hists, al, sym_parent, bi, mi, 716 sample, sample_self, ops); 717 } 718 719 static int 720 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 721 struct addr_location *al __maybe_unused) 722 { 723 return 0; 724 } 725 726 static int 727 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 728 struct addr_location *al __maybe_unused) 729 { 730 return 0; 731 } 732 733 static int 734 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 735 { 736 struct perf_sample *sample = iter->sample; 737 struct mem_info *mi; 738 739 mi = sample__resolve_mem(sample, al); 740 if (mi == NULL) 741 return -ENOMEM; 742 743 iter->priv = mi; 744 return 0; 745 } 746 747 static int 748 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 749 { 750 u64 cost; 751 struct mem_info *mi = iter->priv; 752 struct hists *hists = evsel__hists(iter->evsel); 753 struct perf_sample *sample = iter->sample; 754 struct hist_entry *he; 755 756 if (mi == NULL) 757 return -EINVAL; 758 759 cost = sample->weight; 760 if (!cost) 761 cost = 1; 762 763 /* 764 * must pass period=weight in order to get the correct 765 * sorting from hists__collapse_resort() which is solely 766 * based on periods. We want sorting be done on nr_events * weight 767 * and this is indirectly achieved by passing period=weight here 768 * and the he_stat__add_period() function. 769 */ 770 sample->period = cost; 771 772 he = hists__add_entry(hists, al, iter->parent, NULL, mi, 773 sample, true); 774 if (!he) 775 return -ENOMEM; 776 777 iter->he = he; 778 return 0; 779 } 780 781 static int 782 iter_finish_mem_entry(struct hist_entry_iter *iter, 783 struct addr_location *al __maybe_unused) 784 { 785 struct perf_evsel *evsel = iter->evsel; 786 struct hists *hists = evsel__hists(evsel); 787 struct hist_entry *he = iter->he; 788 int err = -EINVAL; 789 790 if (he == NULL) 791 goto out; 792 793 hists__inc_nr_samples(hists, he->filtered); 794 795 err = hist_entry__append_callchain(he, iter->sample); 796 797 out: 798 /* 799 * We don't need to free iter->priv (mem_info) here since the mem info 800 * was either already freed in hists__findnew_entry() or passed to a 801 * new hist entry by hist_entry__new(). 802 */ 803 iter->priv = NULL; 804 805 iter->he = NULL; 806 return err; 807 } 808 809 static int 810 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 811 { 812 struct branch_info *bi; 813 struct perf_sample *sample = iter->sample; 814 815 bi = sample__resolve_bstack(sample, al); 816 if (!bi) 817 return -ENOMEM; 818 819 iter->curr = 0; 820 iter->total = sample->branch_stack->nr; 821 822 iter->priv = bi; 823 return 0; 824 } 825 826 static int 827 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, 828 struct addr_location *al __maybe_unused) 829 { 830 return 0; 831 } 832 833 static int 834 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 835 { 836 struct branch_info *bi = iter->priv; 837 int i = iter->curr; 838 839 if (bi == NULL) 840 return 0; 841 842 if (iter->curr >= iter->total) 843 return 0; 844 845 al->map = bi[i].to.map; 846 al->sym = bi[i].to.sym; 847 al->addr = bi[i].to.addr; 848 return 1; 849 } 850 851 static int 852 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 853 { 854 struct branch_info *bi; 855 struct perf_evsel *evsel = iter->evsel; 856 struct hists *hists = evsel__hists(evsel); 857 struct perf_sample *sample = iter->sample; 858 struct hist_entry *he = NULL; 859 int i = iter->curr; 860 int err = 0; 861 862 bi = iter->priv; 863 864 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) 865 goto out; 866 867 /* 868 * The report shows the percentage of total branches captured 869 * and not events sampled. Thus we use a pseudo period of 1. 870 */ 871 sample->period = 1; 872 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1; 873 874 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, 875 sample, true); 876 if (he == NULL) 877 return -ENOMEM; 878 879 hists__inc_nr_samples(hists, he->filtered); 880 881 out: 882 iter->he = he; 883 iter->curr++; 884 return err; 885 } 886 887 static int 888 iter_finish_branch_entry(struct hist_entry_iter *iter, 889 struct addr_location *al __maybe_unused) 890 { 891 zfree(&iter->priv); 892 iter->he = NULL; 893 894 return iter->curr >= iter->total ? 0 : -1; 895 } 896 897 static int 898 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused, 899 struct addr_location *al __maybe_unused) 900 { 901 return 0; 902 } 903 904 static int 905 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) 906 { 907 struct perf_evsel *evsel = iter->evsel; 908 struct perf_sample *sample = iter->sample; 909 struct hist_entry *he; 910 911 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 912 sample, true); 913 if (he == NULL) 914 return -ENOMEM; 915 916 iter->he = he; 917 return 0; 918 } 919 920 static int 921 iter_finish_normal_entry(struct hist_entry_iter *iter, 922 struct addr_location *al __maybe_unused) 923 { 924 struct hist_entry *he = iter->he; 925 struct perf_evsel *evsel = iter->evsel; 926 struct perf_sample *sample = iter->sample; 927 928 if (he == NULL) 929 return 0; 930 931 iter->he = NULL; 932 933 hists__inc_nr_samples(evsel__hists(evsel), he->filtered); 934 935 return hist_entry__append_callchain(he, sample); 936 } 937 938 static int 939 iter_prepare_cumulative_entry(struct hist_entry_iter *iter, 940 struct addr_location *al __maybe_unused) 941 { 942 struct hist_entry **he_cache; 943 944 callchain_cursor_commit(&callchain_cursor); 945 946 /* 947 * This is for detecting cycles or recursions so that they're 948 * cumulated only one time to prevent entries more than 100% 949 * overhead. 950 */ 951 he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1)); 952 if (he_cache == NULL) 953 return -ENOMEM; 954 955 iter->priv = he_cache; 956 iter->curr = 0; 957 958 return 0; 959 } 960 961 static int 962 iter_add_single_cumulative_entry(struct hist_entry_iter *iter, 963 struct addr_location *al) 964 { 965 struct perf_evsel *evsel = iter->evsel; 966 struct hists *hists = evsel__hists(evsel); 967 struct perf_sample *sample = iter->sample; 968 struct hist_entry **he_cache = iter->priv; 969 struct hist_entry *he; 970 int err = 0; 971 972 he = hists__add_entry(hists, al, iter->parent, NULL, NULL, 973 sample, true); 974 if (he == NULL) 975 return -ENOMEM; 976 977 iter->he = he; 978 he_cache[iter->curr++] = he; 979 980 hist_entry__append_callchain(he, sample); 981 982 /* 983 * We need to re-initialize the cursor since callchain_append() 984 * advanced the cursor to the end. 985 */ 986 callchain_cursor_commit(&callchain_cursor); 987 988 hists__inc_nr_samples(hists, he->filtered); 989 990 return err; 991 } 992 993 static int 994 iter_next_cumulative_entry(struct hist_entry_iter *iter, 995 struct addr_location *al) 996 { 997 struct callchain_cursor_node *node; 998 999 node = callchain_cursor_current(&callchain_cursor); 1000 if (node == NULL) 1001 return 0; 1002 1003 return fill_callchain_info(al, node, iter->hide_unresolved); 1004 } 1005 1006 static int 1007 iter_add_next_cumulative_entry(struct hist_entry_iter *iter, 1008 struct addr_location *al) 1009 { 1010 struct perf_evsel *evsel = iter->evsel; 1011 struct perf_sample *sample = iter->sample; 1012 struct hist_entry **he_cache = iter->priv; 1013 struct hist_entry *he; 1014 struct hist_entry he_tmp = { 1015 .hists = evsel__hists(evsel), 1016 .cpu = al->cpu, 1017 .thread = al->thread, 1018 .comm = thread__comm(al->thread), 1019 .ip = al->addr, 1020 .ms = { 1021 .map = al->map, 1022 .sym = al->sym, 1023 }, 1024 .srcline = (char *) al->srcline, 1025 .parent = iter->parent, 1026 .raw_data = sample->raw_data, 1027 .raw_size = sample->raw_size, 1028 }; 1029 int i; 1030 struct callchain_cursor cursor; 1031 1032 callchain_cursor_snapshot(&cursor, &callchain_cursor); 1033 1034 callchain_cursor_advance(&callchain_cursor); 1035 1036 /* 1037 * Check if there's duplicate entries in the callchain. 1038 * It's possible that it has cycles or recursive calls. 1039 */ 1040 for (i = 0; i < iter->curr; i++) { 1041 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) { 1042 /* to avoid calling callback function */ 1043 iter->he = NULL; 1044 return 0; 1045 } 1046 } 1047 1048 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 1049 sample, false); 1050 if (he == NULL) 1051 return -ENOMEM; 1052 1053 iter->he = he; 1054 he_cache[iter->curr++] = he; 1055 1056 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 1057 callchain_append(he->callchain, &cursor, sample->period); 1058 return 0; 1059 } 1060 1061 static int 1062 iter_finish_cumulative_entry(struct hist_entry_iter *iter, 1063 struct addr_location *al __maybe_unused) 1064 { 1065 zfree(&iter->priv); 1066 iter->he = NULL; 1067 1068 return 0; 1069 } 1070 1071 const struct hist_iter_ops hist_iter_mem = { 1072 .prepare_entry = iter_prepare_mem_entry, 1073 .add_single_entry = iter_add_single_mem_entry, 1074 .next_entry = iter_next_nop_entry, 1075 .add_next_entry = iter_add_next_nop_entry, 1076 .finish_entry = iter_finish_mem_entry, 1077 }; 1078 1079 const struct hist_iter_ops hist_iter_branch = { 1080 .prepare_entry = iter_prepare_branch_entry, 1081 .add_single_entry = iter_add_single_branch_entry, 1082 .next_entry = iter_next_branch_entry, 1083 .add_next_entry = iter_add_next_branch_entry, 1084 .finish_entry = iter_finish_branch_entry, 1085 }; 1086 1087 const struct hist_iter_ops hist_iter_normal = { 1088 .prepare_entry = iter_prepare_normal_entry, 1089 .add_single_entry = iter_add_single_normal_entry, 1090 .next_entry = iter_next_nop_entry, 1091 .add_next_entry = iter_add_next_nop_entry, 1092 .finish_entry = iter_finish_normal_entry, 1093 }; 1094 1095 const struct hist_iter_ops hist_iter_cumulative = { 1096 .prepare_entry = iter_prepare_cumulative_entry, 1097 .add_single_entry = iter_add_single_cumulative_entry, 1098 .next_entry = iter_next_cumulative_entry, 1099 .add_next_entry = iter_add_next_cumulative_entry, 1100 .finish_entry = iter_finish_cumulative_entry, 1101 }; 1102 1103 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, 1104 int max_stack_depth, void *arg) 1105 { 1106 int err, err2; 1107 struct map *alm = NULL; 1108 1109 if (al) 1110 alm = map__get(al->map); 1111 1112 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, 1113 iter->evsel, al, max_stack_depth); 1114 if (err) { 1115 map__put(alm); 1116 return err; 1117 } 1118 1119 err = iter->ops->prepare_entry(iter, al); 1120 if (err) 1121 goto out; 1122 1123 err = iter->ops->add_single_entry(iter, al); 1124 if (err) 1125 goto out; 1126 1127 if (iter->he && iter->add_entry_cb) { 1128 err = iter->add_entry_cb(iter, al, true, arg); 1129 if (err) 1130 goto out; 1131 } 1132 1133 while (iter->ops->next_entry(iter, al)) { 1134 err = iter->ops->add_next_entry(iter, al); 1135 if (err) 1136 break; 1137 1138 if (iter->he && iter->add_entry_cb) { 1139 err = iter->add_entry_cb(iter, al, false, arg); 1140 if (err) 1141 goto out; 1142 } 1143 } 1144 1145 out: 1146 err2 = iter->ops->finish_entry(iter, al); 1147 if (!err) 1148 err = err2; 1149 1150 map__put(alm); 1151 1152 return err; 1153 } 1154 1155 int64_t 1156 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) 1157 { 1158 struct hists *hists = left->hists; 1159 struct perf_hpp_fmt *fmt; 1160 int64_t cmp = 0; 1161 1162 hists__for_each_sort_list(hists, fmt) { 1163 if (perf_hpp__is_dynamic_entry(fmt) && 1164 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1165 continue; 1166 1167 cmp = fmt->cmp(fmt, left, right); 1168 if (cmp) 1169 break; 1170 } 1171 1172 return cmp; 1173 } 1174 1175 int64_t 1176 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) 1177 { 1178 struct hists *hists = left->hists; 1179 struct perf_hpp_fmt *fmt; 1180 int64_t cmp = 0; 1181 1182 hists__for_each_sort_list(hists, fmt) { 1183 if (perf_hpp__is_dynamic_entry(fmt) && 1184 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1185 continue; 1186 1187 cmp = fmt->collapse(fmt, left, right); 1188 if (cmp) 1189 break; 1190 } 1191 1192 return cmp; 1193 } 1194 1195 void hist_entry__delete(struct hist_entry *he) 1196 { 1197 struct hist_entry_ops *ops = he->ops; 1198 1199 thread__zput(he->thread); 1200 map__zput(he->ms.map); 1201 1202 if (he->branch_info) { 1203 map__zput(he->branch_info->from.map); 1204 map__zput(he->branch_info->to.map); 1205 free_srcline(he->branch_info->srcline_from); 1206 free_srcline(he->branch_info->srcline_to); 1207 zfree(&he->branch_info); 1208 } 1209 1210 if (he->mem_info) { 1211 map__zput(he->mem_info->iaddr.map); 1212 map__zput(he->mem_info->daddr.map); 1213 mem_info__zput(he->mem_info); 1214 } 1215 1216 zfree(&he->res_samples); 1217 zfree(&he->stat_acc); 1218 free_srcline(he->srcline); 1219 if (he->srcfile && he->srcfile[0]) 1220 free(he->srcfile); 1221 free_callchain(he->callchain); 1222 free(he->trace_output); 1223 free(he->raw_data); 1224 ops->free(he); 1225 } 1226 1227 /* 1228 * If this is not the last column, then we need to pad it according to the 1229 * pre-calculated max length for this column, otherwise don't bother adding 1230 * spaces because that would break viewing this with, for instance, 'less', 1231 * that would show tons of trailing spaces when a long C++ demangled method 1232 * names is sampled. 1233 */ 1234 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp, 1235 struct perf_hpp_fmt *fmt, int printed) 1236 { 1237 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) { 1238 const int width = fmt->width(fmt, hpp, he->hists); 1239 if (printed < width) { 1240 advance_hpp(hpp, printed); 1241 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " "); 1242 } 1243 } 1244 1245 return printed; 1246 } 1247 1248 /* 1249 * collapse the histogram 1250 */ 1251 1252 static void hists__apply_filters(struct hists *hists, struct hist_entry *he); 1253 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he, 1254 enum hist_filter type); 1255 1256 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt); 1257 1258 static bool check_thread_entry(struct perf_hpp_fmt *fmt) 1259 { 1260 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt); 1261 } 1262 1263 static void hist_entry__check_and_remove_filter(struct hist_entry *he, 1264 enum hist_filter type, 1265 fmt_chk_fn check) 1266 { 1267 struct perf_hpp_fmt *fmt; 1268 bool type_match = false; 1269 struct hist_entry *parent = he->parent_he; 1270 1271 switch (type) { 1272 case HIST_FILTER__THREAD: 1273 if (symbol_conf.comm_list == NULL && 1274 symbol_conf.pid_list == NULL && 1275 symbol_conf.tid_list == NULL) 1276 return; 1277 break; 1278 case HIST_FILTER__DSO: 1279 if (symbol_conf.dso_list == NULL) 1280 return; 1281 break; 1282 case HIST_FILTER__SYMBOL: 1283 if (symbol_conf.sym_list == NULL) 1284 return; 1285 break; 1286 case HIST_FILTER__PARENT: 1287 case HIST_FILTER__GUEST: 1288 case HIST_FILTER__HOST: 1289 case HIST_FILTER__SOCKET: 1290 case HIST_FILTER__C2C: 1291 default: 1292 return; 1293 } 1294 1295 /* if it's filtered by own fmt, it has to have filter bits */ 1296 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1297 if (check(fmt)) { 1298 type_match = true; 1299 break; 1300 } 1301 } 1302 1303 if (type_match) { 1304 /* 1305 * If the filter is for current level entry, propagate 1306 * filter marker to parents. The marker bit was 1307 * already set by default so it only needs to clear 1308 * non-filtered entries. 1309 */ 1310 if (!(he->filtered & (1 << type))) { 1311 while (parent) { 1312 parent->filtered &= ~(1 << type); 1313 parent = parent->parent_he; 1314 } 1315 } 1316 } else { 1317 /* 1318 * If current entry doesn't have matching formats, set 1319 * filter marker for upper level entries. it will be 1320 * cleared if its lower level entries is not filtered. 1321 * 1322 * For lower-level entries, it inherits parent's 1323 * filter bit so that lower level entries of a 1324 * non-filtered entry won't set the filter marker. 1325 */ 1326 if (parent == NULL) 1327 he->filtered |= (1 << type); 1328 else 1329 he->filtered |= (parent->filtered & (1 << type)); 1330 } 1331 } 1332 1333 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he) 1334 { 1335 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD, 1336 check_thread_entry); 1337 1338 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO, 1339 perf_hpp__is_dso_entry); 1340 1341 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL, 1342 perf_hpp__is_sym_entry); 1343 1344 hists__apply_filters(he->hists, he); 1345 } 1346 1347 static struct hist_entry *hierarchy_insert_entry(struct hists *hists, 1348 struct rb_root_cached *root, 1349 struct hist_entry *he, 1350 struct hist_entry *parent_he, 1351 struct perf_hpp_list *hpp_list) 1352 { 1353 struct rb_node **p = &root->rb_root.rb_node; 1354 struct rb_node *parent = NULL; 1355 struct hist_entry *iter, *new; 1356 struct perf_hpp_fmt *fmt; 1357 int64_t cmp; 1358 bool leftmost = true; 1359 1360 while (*p != NULL) { 1361 parent = *p; 1362 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1363 1364 cmp = 0; 1365 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1366 cmp = fmt->collapse(fmt, iter, he); 1367 if (cmp) 1368 break; 1369 } 1370 1371 if (!cmp) { 1372 he_stat__add_stat(&iter->stat, &he->stat); 1373 return iter; 1374 } 1375 1376 if (cmp < 0) 1377 p = &parent->rb_left; 1378 else { 1379 p = &parent->rb_right; 1380 leftmost = false; 1381 } 1382 } 1383 1384 new = hist_entry__new(he, true); 1385 if (new == NULL) 1386 return NULL; 1387 1388 hists->nr_entries++; 1389 1390 /* save related format list for output */ 1391 new->hpp_list = hpp_list; 1392 new->parent_he = parent_he; 1393 1394 hist_entry__apply_hierarchy_filters(new); 1395 1396 /* some fields are now passed to 'new' */ 1397 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1398 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 1399 he->trace_output = NULL; 1400 else 1401 new->trace_output = NULL; 1402 1403 if (perf_hpp__is_srcline_entry(fmt)) 1404 he->srcline = NULL; 1405 else 1406 new->srcline = NULL; 1407 1408 if (perf_hpp__is_srcfile_entry(fmt)) 1409 he->srcfile = NULL; 1410 else 1411 new->srcfile = NULL; 1412 } 1413 1414 rb_link_node(&new->rb_node_in, parent, p); 1415 rb_insert_color_cached(&new->rb_node_in, root, leftmost); 1416 return new; 1417 } 1418 1419 static int hists__hierarchy_insert_entry(struct hists *hists, 1420 struct rb_root_cached *root, 1421 struct hist_entry *he) 1422 { 1423 struct perf_hpp_list_node *node; 1424 struct hist_entry *new_he = NULL; 1425 struct hist_entry *parent = NULL; 1426 int depth = 0; 1427 int ret = 0; 1428 1429 list_for_each_entry(node, &hists->hpp_formats, list) { 1430 /* skip period (overhead) and elided columns */ 1431 if (node->level == 0 || node->skip) 1432 continue; 1433 1434 /* insert copy of 'he' for each fmt into the hierarchy */ 1435 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp); 1436 if (new_he == NULL) { 1437 ret = -1; 1438 break; 1439 } 1440 1441 root = &new_he->hroot_in; 1442 new_he->depth = depth++; 1443 parent = new_he; 1444 } 1445 1446 if (new_he) { 1447 new_he->leaf = true; 1448 1449 if (hist_entry__has_callchains(new_he) && 1450 symbol_conf.use_callchain) { 1451 callchain_cursor_reset(&callchain_cursor); 1452 if (callchain_merge(&callchain_cursor, 1453 new_he->callchain, 1454 he->callchain) < 0) 1455 ret = -1; 1456 } 1457 } 1458 1459 /* 'he' is no longer used */ 1460 hist_entry__delete(he); 1461 1462 /* return 0 (or -1) since it already applied filters */ 1463 return ret; 1464 } 1465 1466 static int hists__collapse_insert_entry(struct hists *hists, 1467 struct rb_root_cached *root, 1468 struct hist_entry *he) 1469 { 1470 struct rb_node **p = &root->rb_root.rb_node; 1471 struct rb_node *parent = NULL; 1472 struct hist_entry *iter; 1473 int64_t cmp; 1474 bool leftmost = true; 1475 1476 if (symbol_conf.report_hierarchy) 1477 return hists__hierarchy_insert_entry(hists, root, he); 1478 1479 while (*p != NULL) { 1480 parent = *p; 1481 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1482 1483 cmp = hist_entry__collapse(iter, he); 1484 1485 if (!cmp) { 1486 int ret = 0; 1487 1488 he_stat__add_stat(&iter->stat, &he->stat); 1489 if (symbol_conf.cumulate_callchain) 1490 he_stat__add_stat(iter->stat_acc, he->stat_acc); 1491 1492 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) { 1493 callchain_cursor_reset(&callchain_cursor); 1494 if (callchain_merge(&callchain_cursor, 1495 iter->callchain, 1496 he->callchain) < 0) 1497 ret = -1; 1498 } 1499 hist_entry__delete(he); 1500 return ret; 1501 } 1502 1503 if (cmp < 0) 1504 p = &(*p)->rb_left; 1505 else { 1506 p = &(*p)->rb_right; 1507 leftmost = false; 1508 } 1509 } 1510 hists->nr_entries++; 1511 1512 rb_link_node(&he->rb_node_in, parent, p); 1513 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 1514 return 1; 1515 } 1516 1517 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists) 1518 { 1519 struct rb_root_cached *root; 1520 1521 pthread_mutex_lock(&hists->lock); 1522 1523 root = hists->entries_in; 1524 if (++hists->entries_in > &hists->entries_in_array[1]) 1525 hists->entries_in = &hists->entries_in_array[0]; 1526 1527 pthread_mutex_unlock(&hists->lock); 1528 1529 return root; 1530 } 1531 1532 static void hists__apply_filters(struct hists *hists, struct hist_entry *he) 1533 { 1534 hists__filter_entry_by_dso(hists, he); 1535 hists__filter_entry_by_thread(hists, he); 1536 hists__filter_entry_by_symbol(hists, he); 1537 hists__filter_entry_by_socket(hists, he); 1538 } 1539 1540 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog) 1541 { 1542 struct rb_root_cached *root; 1543 struct rb_node *next; 1544 struct hist_entry *n; 1545 int ret; 1546 1547 if (!hists__has(hists, need_collapse)) 1548 return 0; 1549 1550 hists->nr_entries = 0; 1551 1552 root = hists__get_rotate_entries_in(hists); 1553 1554 next = rb_first_cached(root); 1555 1556 while (next) { 1557 if (session_done()) 1558 break; 1559 n = rb_entry(next, struct hist_entry, rb_node_in); 1560 next = rb_next(&n->rb_node_in); 1561 1562 rb_erase_cached(&n->rb_node_in, root); 1563 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n); 1564 if (ret < 0) 1565 return -1; 1566 1567 if (ret) { 1568 /* 1569 * If it wasn't combined with one of the entries already 1570 * collapsed, we need to apply the filters that may have 1571 * been set by, say, the hist_browser. 1572 */ 1573 hists__apply_filters(hists, n); 1574 } 1575 if (prog) 1576 ui_progress__update(prog, 1); 1577 } 1578 return 0; 1579 } 1580 1581 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b) 1582 { 1583 struct hists *hists = a->hists; 1584 struct perf_hpp_fmt *fmt; 1585 int64_t cmp = 0; 1586 1587 hists__for_each_sort_list(hists, fmt) { 1588 if (perf_hpp__should_skip(fmt, a->hists)) 1589 continue; 1590 1591 cmp = fmt->sort(fmt, a, b); 1592 if (cmp) 1593 break; 1594 } 1595 1596 return cmp; 1597 } 1598 1599 static void hists__reset_filter_stats(struct hists *hists) 1600 { 1601 hists->nr_non_filtered_entries = 0; 1602 hists->stats.total_non_filtered_period = 0; 1603 } 1604 1605 void hists__reset_stats(struct hists *hists) 1606 { 1607 hists->nr_entries = 0; 1608 hists->stats.total_period = 0; 1609 1610 hists__reset_filter_stats(hists); 1611 } 1612 1613 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h) 1614 { 1615 hists->nr_non_filtered_entries++; 1616 hists->stats.total_non_filtered_period += h->stat.period; 1617 } 1618 1619 void hists__inc_stats(struct hists *hists, struct hist_entry *h) 1620 { 1621 if (!h->filtered) 1622 hists__inc_filter_stats(hists, h); 1623 1624 hists->nr_entries++; 1625 hists->stats.total_period += h->stat.period; 1626 } 1627 1628 static void hierarchy_recalc_total_periods(struct hists *hists) 1629 { 1630 struct rb_node *node; 1631 struct hist_entry *he; 1632 1633 node = rb_first_cached(&hists->entries); 1634 1635 hists->stats.total_period = 0; 1636 hists->stats.total_non_filtered_period = 0; 1637 1638 /* 1639 * recalculate total period using top-level entries only 1640 * since lower level entries only see non-filtered entries 1641 * but upper level entries have sum of both entries. 1642 */ 1643 while (node) { 1644 he = rb_entry(node, struct hist_entry, rb_node); 1645 node = rb_next(node); 1646 1647 hists->stats.total_period += he->stat.period; 1648 if (!he->filtered) 1649 hists->stats.total_non_filtered_period += he->stat.period; 1650 } 1651 } 1652 1653 static void hierarchy_insert_output_entry(struct rb_root_cached *root, 1654 struct hist_entry *he) 1655 { 1656 struct rb_node **p = &root->rb_root.rb_node; 1657 struct rb_node *parent = NULL; 1658 struct hist_entry *iter; 1659 struct perf_hpp_fmt *fmt; 1660 bool leftmost = true; 1661 1662 while (*p != NULL) { 1663 parent = *p; 1664 iter = rb_entry(parent, struct hist_entry, rb_node); 1665 1666 if (hist_entry__sort(he, iter) > 0) 1667 p = &parent->rb_left; 1668 else { 1669 p = &parent->rb_right; 1670 leftmost = false; 1671 } 1672 } 1673 1674 rb_link_node(&he->rb_node, parent, p); 1675 rb_insert_color_cached(&he->rb_node, root, leftmost); 1676 1677 /* update column width of dynamic entry */ 1678 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 1679 if (perf_hpp__is_dynamic_entry(fmt)) 1680 fmt->sort(fmt, he, NULL); 1681 } 1682 } 1683 1684 static void hists__hierarchy_output_resort(struct hists *hists, 1685 struct ui_progress *prog, 1686 struct rb_root_cached *root_in, 1687 struct rb_root_cached *root_out, 1688 u64 min_callchain_hits, 1689 bool use_callchain) 1690 { 1691 struct rb_node *node; 1692 struct hist_entry *he; 1693 1694 *root_out = RB_ROOT_CACHED; 1695 node = rb_first_cached(root_in); 1696 1697 while (node) { 1698 he = rb_entry(node, struct hist_entry, rb_node_in); 1699 node = rb_next(node); 1700 1701 hierarchy_insert_output_entry(root_out, he); 1702 1703 if (prog) 1704 ui_progress__update(prog, 1); 1705 1706 hists->nr_entries++; 1707 if (!he->filtered) { 1708 hists->nr_non_filtered_entries++; 1709 hists__calc_col_len(hists, he); 1710 } 1711 1712 if (!he->leaf) { 1713 hists__hierarchy_output_resort(hists, prog, 1714 &he->hroot_in, 1715 &he->hroot_out, 1716 min_callchain_hits, 1717 use_callchain); 1718 continue; 1719 } 1720 1721 if (!use_callchain) 1722 continue; 1723 1724 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1725 u64 total = he->stat.period; 1726 1727 if (symbol_conf.cumulate_callchain) 1728 total = he->stat_acc->period; 1729 1730 min_callchain_hits = total * (callchain_param.min_percent / 100); 1731 } 1732 1733 callchain_param.sort(&he->sorted_chain, he->callchain, 1734 min_callchain_hits, &callchain_param); 1735 } 1736 } 1737 1738 static void __hists__insert_output_entry(struct rb_root_cached *entries, 1739 struct hist_entry *he, 1740 u64 min_callchain_hits, 1741 bool use_callchain) 1742 { 1743 struct rb_node **p = &entries->rb_root.rb_node; 1744 struct rb_node *parent = NULL; 1745 struct hist_entry *iter; 1746 struct perf_hpp_fmt *fmt; 1747 bool leftmost = true; 1748 1749 if (use_callchain) { 1750 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1751 u64 total = he->stat.period; 1752 1753 if (symbol_conf.cumulate_callchain) 1754 total = he->stat_acc->period; 1755 1756 min_callchain_hits = total * (callchain_param.min_percent / 100); 1757 } 1758 callchain_param.sort(&he->sorted_chain, he->callchain, 1759 min_callchain_hits, &callchain_param); 1760 } 1761 1762 while (*p != NULL) { 1763 parent = *p; 1764 iter = rb_entry(parent, struct hist_entry, rb_node); 1765 1766 if (hist_entry__sort(he, iter) > 0) 1767 p = &(*p)->rb_left; 1768 else { 1769 p = &(*p)->rb_right; 1770 leftmost = false; 1771 } 1772 } 1773 1774 rb_link_node(&he->rb_node, parent, p); 1775 rb_insert_color_cached(&he->rb_node, entries, leftmost); 1776 1777 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) { 1778 if (perf_hpp__is_dynamic_entry(fmt) && 1779 perf_hpp__defined_dynamic_entry(fmt, he->hists)) 1780 fmt->sort(fmt, he, NULL); /* update column width */ 1781 } 1782 } 1783 1784 static void output_resort(struct hists *hists, struct ui_progress *prog, 1785 bool use_callchain, hists__resort_cb_t cb, 1786 void *cb_arg) 1787 { 1788 struct rb_root_cached *root; 1789 struct rb_node *next; 1790 struct hist_entry *n; 1791 u64 callchain_total; 1792 u64 min_callchain_hits; 1793 1794 callchain_total = hists->callchain_period; 1795 if (symbol_conf.filter_relative) 1796 callchain_total = hists->callchain_non_filtered_period; 1797 1798 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100); 1799 1800 hists__reset_stats(hists); 1801 hists__reset_col_len(hists); 1802 1803 if (symbol_conf.report_hierarchy) { 1804 hists__hierarchy_output_resort(hists, prog, 1805 &hists->entries_collapsed, 1806 &hists->entries, 1807 min_callchain_hits, 1808 use_callchain); 1809 hierarchy_recalc_total_periods(hists); 1810 return; 1811 } 1812 1813 if (hists__has(hists, need_collapse)) 1814 root = &hists->entries_collapsed; 1815 else 1816 root = hists->entries_in; 1817 1818 next = rb_first_cached(root); 1819 hists->entries = RB_ROOT_CACHED; 1820 1821 while (next) { 1822 n = rb_entry(next, struct hist_entry, rb_node_in); 1823 next = rb_next(&n->rb_node_in); 1824 1825 if (cb && cb(n, cb_arg)) 1826 continue; 1827 1828 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain); 1829 hists__inc_stats(hists, n); 1830 1831 if (!n->filtered) 1832 hists__calc_col_len(hists, n); 1833 1834 if (prog) 1835 ui_progress__update(prog, 1); 1836 } 1837 } 1838 1839 void perf_evsel__output_resort_cb(struct perf_evsel *evsel, struct ui_progress *prog, 1840 hists__resort_cb_t cb, void *cb_arg) 1841 { 1842 bool use_callchain; 1843 1844 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph) 1845 use_callchain = evsel__has_callchain(evsel); 1846 else 1847 use_callchain = symbol_conf.use_callchain; 1848 1849 use_callchain |= symbol_conf.show_branchflag_count; 1850 1851 output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg); 1852 } 1853 1854 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog) 1855 { 1856 return perf_evsel__output_resort_cb(evsel, prog, NULL, NULL); 1857 } 1858 1859 void hists__output_resort(struct hists *hists, struct ui_progress *prog) 1860 { 1861 output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL); 1862 } 1863 1864 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog, 1865 hists__resort_cb_t cb) 1866 { 1867 output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL); 1868 } 1869 1870 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd) 1871 { 1872 if (he->leaf || hmd == HMD_FORCE_SIBLING) 1873 return false; 1874 1875 if (he->unfolded || hmd == HMD_FORCE_CHILD) 1876 return true; 1877 1878 return false; 1879 } 1880 1881 struct rb_node *rb_hierarchy_last(struct rb_node *node) 1882 { 1883 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1884 1885 while (can_goto_child(he, HMD_NORMAL)) { 1886 node = rb_last(&he->hroot_out.rb_root); 1887 he = rb_entry(node, struct hist_entry, rb_node); 1888 } 1889 return node; 1890 } 1891 1892 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd) 1893 { 1894 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1895 1896 if (can_goto_child(he, hmd)) 1897 node = rb_first_cached(&he->hroot_out); 1898 else 1899 node = rb_next(node); 1900 1901 while (node == NULL) { 1902 he = he->parent_he; 1903 if (he == NULL) 1904 break; 1905 1906 node = rb_next(&he->rb_node); 1907 } 1908 return node; 1909 } 1910 1911 struct rb_node *rb_hierarchy_prev(struct rb_node *node) 1912 { 1913 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1914 1915 node = rb_prev(node); 1916 if (node) 1917 return rb_hierarchy_last(node); 1918 1919 he = he->parent_he; 1920 if (he == NULL) 1921 return NULL; 1922 1923 return &he->rb_node; 1924 } 1925 1926 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit) 1927 { 1928 struct rb_node *node; 1929 struct hist_entry *child; 1930 float percent; 1931 1932 if (he->leaf) 1933 return false; 1934 1935 node = rb_first_cached(&he->hroot_out); 1936 child = rb_entry(node, struct hist_entry, rb_node); 1937 1938 while (node && child->filtered) { 1939 node = rb_next(node); 1940 child = rb_entry(node, struct hist_entry, rb_node); 1941 } 1942 1943 if (node) 1944 percent = hist_entry__get_percent_limit(child); 1945 else 1946 percent = 0; 1947 1948 return node && percent >= limit; 1949 } 1950 1951 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, 1952 enum hist_filter filter) 1953 { 1954 h->filtered &= ~(1 << filter); 1955 1956 if (symbol_conf.report_hierarchy) { 1957 struct hist_entry *parent = h->parent_he; 1958 1959 while (parent) { 1960 he_stat__add_stat(&parent->stat, &h->stat); 1961 1962 parent->filtered &= ~(1 << filter); 1963 1964 if (parent->filtered) 1965 goto next; 1966 1967 /* force fold unfiltered entry for simplicity */ 1968 parent->unfolded = false; 1969 parent->has_no_entry = false; 1970 parent->row_offset = 0; 1971 parent->nr_rows = 0; 1972 next: 1973 parent = parent->parent_he; 1974 } 1975 } 1976 1977 if (h->filtered) 1978 return; 1979 1980 /* force fold unfiltered entry for simplicity */ 1981 h->unfolded = false; 1982 h->has_no_entry = false; 1983 h->row_offset = 0; 1984 h->nr_rows = 0; 1985 1986 hists->stats.nr_non_filtered_samples += h->stat.nr_events; 1987 1988 hists__inc_filter_stats(hists, h); 1989 hists__calc_col_len(hists, h); 1990 } 1991 1992 1993 static bool hists__filter_entry_by_dso(struct hists *hists, 1994 struct hist_entry *he) 1995 { 1996 if (hists->dso_filter != NULL && 1997 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { 1998 he->filtered |= (1 << HIST_FILTER__DSO); 1999 return true; 2000 } 2001 2002 return false; 2003 } 2004 2005 static bool hists__filter_entry_by_thread(struct hists *hists, 2006 struct hist_entry *he) 2007 { 2008 if (hists->thread_filter != NULL && 2009 he->thread != hists->thread_filter) { 2010 he->filtered |= (1 << HIST_FILTER__THREAD); 2011 return true; 2012 } 2013 2014 return false; 2015 } 2016 2017 static bool hists__filter_entry_by_symbol(struct hists *hists, 2018 struct hist_entry *he) 2019 { 2020 if (hists->symbol_filter_str != NULL && 2021 (!he->ms.sym || strstr(he->ms.sym->name, 2022 hists->symbol_filter_str) == NULL)) { 2023 he->filtered |= (1 << HIST_FILTER__SYMBOL); 2024 return true; 2025 } 2026 2027 return false; 2028 } 2029 2030 static bool hists__filter_entry_by_socket(struct hists *hists, 2031 struct hist_entry *he) 2032 { 2033 if ((hists->socket_filter > -1) && 2034 (he->socket != hists->socket_filter)) { 2035 he->filtered |= (1 << HIST_FILTER__SOCKET); 2036 return true; 2037 } 2038 2039 return false; 2040 } 2041 2042 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he); 2043 2044 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter) 2045 { 2046 struct rb_node *nd; 2047 2048 hists->stats.nr_non_filtered_samples = 0; 2049 2050 hists__reset_filter_stats(hists); 2051 hists__reset_col_len(hists); 2052 2053 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) { 2054 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2055 2056 if (filter(hists, h)) 2057 continue; 2058 2059 hists__remove_entry_filter(hists, h, type); 2060 } 2061 } 2062 2063 static void resort_filtered_entry(struct rb_root_cached *root, 2064 struct hist_entry *he) 2065 { 2066 struct rb_node **p = &root->rb_root.rb_node; 2067 struct rb_node *parent = NULL; 2068 struct hist_entry *iter; 2069 struct rb_root_cached new_root = RB_ROOT_CACHED; 2070 struct rb_node *nd; 2071 bool leftmost = true; 2072 2073 while (*p != NULL) { 2074 parent = *p; 2075 iter = rb_entry(parent, struct hist_entry, rb_node); 2076 2077 if (hist_entry__sort(he, iter) > 0) 2078 p = &(*p)->rb_left; 2079 else { 2080 p = &(*p)->rb_right; 2081 leftmost = false; 2082 } 2083 } 2084 2085 rb_link_node(&he->rb_node, parent, p); 2086 rb_insert_color_cached(&he->rb_node, root, leftmost); 2087 2088 if (he->leaf || he->filtered) 2089 return; 2090 2091 nd = rb_first_cached(&he->hroot_out); 2092 while (nd) { 2093 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2094 2095 nd = rb_next(nd); 2096 rb_erase_cached(&h->rb_node, &he->hroot_out); 2097 2098 resort_filtered_entry(&new_root, h); 2099 } 2100 2101 he->hroot_out = new_root; 2102 } 2103 2104 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg) 2105 { 2106 struct rb_node *nd; 2107 struct rb_root_cached new_root = RB_ROOT_CACHED; 2108 2109 hists->stats.nr_non_filtered_samples = 0; 2110 2111 hists__reset_filter_stats(hists); 2112 hists__reset_col_len(hists); 2113 2114 nd = rb_first_cached(&hists->entries); 2115 while (nd) { 2116 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2117 int ret; 2118 2119 ret = hist_entry__filter(h, type, arg); 2120 2121 /* 2122 * case 1. non-matching type 2123 * zero out the period, set filter marker and move to child 2124 */ 2125 if (ret < 0) { 2126 memset(&h->stat, 0, sizeof(h->stat)); 2127 h->filtered |= (1 << type); 2128 2129 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD); 2130 } 2131 /* 2132 * case 2. matched type (filter out) 2133 * set filter marker and move to next 2134 */ 2135 else if (ret == 1) { 2136 h->filtered |= (1 << type); 2137 2138 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2139 } 2140 /* 2141 * case 3. ok (not filtered) 2142 * add period to hists and parents, erase the filter marker 2143 * and move to next sibling 2144 */ 2145 else { 2146 hists__remove_entry_filter(hists, h, type); 2147 2148 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2149 } 2150 } 2151 2152 hierarchy_recalc_total_periods(hists); 2153 2154 /* 2155 * resort output after applying a new filter since filter in a lower 2156 * hierarchy can change periods in a upper hierarchy. 2157 */ 2158 nd = rb_first_cached(&hists->entries); 2159 while (nd) { 2160 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2161 2162 nd = rb_next(nd); 2163 rb_erase_cached(&h->rb_node, &hists->entries); 2164 2165 resort_filtered_entry(&new_root, h); 2166 } 2167 2168 hists->entries = new_root; 2169 } 2170 2171 void hists__filter_by_thread(struct hists *hists) 2172 { 2173 if (symbol_conf.report_hierarchy) 2174 hists__filter_hierarchy(hists, HIST_FILTER__THREAD, 2175 hists->thread_filter); 2176 else 2177 hists__filter_by_type(hists, HIST_FILTER__THREAD, 2178 hists__filter_entry_by_thread); 2179 } 2180 2181 void hists__filter_by_dso(struct hists *hists) 2182 { 2183 if (symbol_conf.report_hierarchy) 2184 hists__filter_hierarchy(hists, HIST_FILTER__DSO, 2185 hists->dso_filter); 2186 else 2187 hists__filter_by_type(hists, HIST_FILTER__DSO, 2188 hists__filter_entry_by_dso); 2189 } 2190 2191 void hists__filter_by_symbol(struct hists *hists) 2192 { 2193 if (symbol_conf.report_hierarchy) 2194 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL, 2195 hists->symbol_filter_str); 2196 else 2197 hists__filter_by_type(hists, HIST_FILTER__SYMBOL, 2198 hists__filter_entry_by_symbol); 2199 } 2200 2201 void hists__filter_by_socket(struct hists *hists) 2202 { 2203 if (symbol_conf.report_hierarchy) 2204 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET, 2205 &hists->socket_filter); 2206 else 2207 hists__filter_by_type(hists, HIST_FILTER__SOCKET, 2208 hists__filter_entry_by_socket); 2209 } 2210 2211 void events_stats__inc(struct events_stats *stats, u32 type) 2212 { 2213 ++stats->nr_events[0]; 2214 ++stats->nr_events[type]; 2215 } 2216 2217 void hists__inc_nr_events(struct hists *hists, u32 type) 2218 { 2219 events_stats__inc(&hists->stats, type); 2220 } 2221 2222 void hists__inc_nr_samples(struct hists *hists, bool filtered) 2223 { 2224 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE); 2225 if (!filtered) 2226 hists->stats.nr_non_filtered_samples++; 2227 } 2228 2229 static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 2230 struct hist_entry *pair) 2231 { 2232 struct rb_root_cached *root; 2233 struct rb_node **p; 2234 struct rb_node *parent = NULL; 2235 struct hist_entry *he; 2236 int64_t cmp; 2237 bool leftmost = true; 2238 2239 if (hists__has(hists, need_collapse)) 2240 root = &hists->entries_collapsed; 2241 else 2242 root = hists->entries_in; 2243 2244 p = &root->rb_root.rb_node; 2245 2246 while (*p != NULL) { 2247 parent = *p; 2248 he = rb_entry(parent, struct hist_entry, rb_node_in); 2249 2250 cmp = hist_entry__collapse(he, pair); 2251 2252 if (!cmp) 2253 goto out; 2254 2255 if (cmp < 0) 2256 p = &(*p)->rb_left; 2257 else { 2258 p = &(*p)->rb_right; 2259 leftmost = false; 2260 } 2261 } 2262 2263 he = hist_entry__new(pair, true); 2264 if (he) { 2265 memset(&he->stat, 0, sizeof(he->stat)); 2266 he->hists = hists; 2267 if (symbol_conf.cumulate_callchain) 2268 memset(he->stat_acc, 0, sizeof(he->stat)); 2269 rb_link_node(&he->rb_node_in, parent, p); 2270 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2271 hists__inc_stats(hists, he); 2272 he->dummy = true; 2273 } 2274 out: 2275 return he; 2276 } 2277 2278 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists, 2279 struct rb_root_cached *root, 2280 struct hist_entry *pair) 2281 { 2282 struct rb_node **p; 2283 struct rb_node *parent = NULL; 2284 struct hist_entry *he; 2285 struct perf_hpp_fmt *fmt; 2286 bool leftmost = true; 2287 2288 p = &root->rb_root.rb_node; 2289 while (*p != NULL) { 2290 int64_t cmp = 0; 2291 2292 parent = *p; 2293 he = rb_entry(parent, struct hist_entry, rb_node_in); 2294 2295 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2296 cmp = fmt->collapse(fmt, he, pair); 2297 if (cmp) 2298 break; 2299 } 2300 if (!cmp) 2301 goto out; 2302 2303 if (cmp < 0) 2304 p = &parent->rb_left; 2305 else { 2306 p = &parent->rb_right; 2307 leftmost = false; 2308 } 2309 } 2310 2311 he = hist_entry__new(pair, true); 2312 if (he) { 2313 rb_link_node(&he->rb_node_in, parent, p); 2314 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2315 2316 he->dummy = true; 2317 he->hists = hists; 2318 memset(&he->stat, 0, sizeof(he->stat)); 2319 hists__inc_stats(hists, he); 2320 } 2321 out: 2322 return he; 2323 } 2324 2325 static struct hist_entry *hists__find_entry(struct hists *hists, 2326 struct hist_entry *he) 2327 { 2328 struct rb_node *n; 2329 2330 if (hists__has(hists, need_collapse)) 2331 n = hists->entries_collapsed.rb_root.rb_node; 2332 else 2333 n = hists->entries_in->rb_root.rb_node; 2334 2335 while (n) { 2336 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); 2337 int64_t cmp = hist_entry__collapse(iter, he); 2338 2339 if (cmp < 0) 2340 n = n->rb_left; 2341 else if (cmp > 0) 2342 n = n->rb_right; 2343 else 2344 return iter; 2345 } 2346 2347 return NULL; 2348 } 2349 2350 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root, 2351 struct hist_entry *he) 2352 { 2353 struct rb_node *n = root->rb_root.rb_node; 2354 2355 while (n) { 2356 struct hist_entry *iter; 2357 struct perf_hpp_fmt *fmt; 2358 int64_t cmp = 0; 2359 2360 iter = rb_entry(n, struct hist_entry, rb_node_in); 2361 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2362 cmp = fmt->collapse(fmt, iter, he); 2363 if (cmp) 2364 break; 2365 } 2366 2367 if (cmp < 0) 2368 n = n->rb_left; 2369 else if (cmp > 0) 2370 n = n->rb_right; 2371 else 2372 return iter; 2373 } 2374 2375 return NULL; 2376 } 2377 2378 static void hists__match_hierarchy(struct rb_root_cached *leader_root, 2379 struct rb_root_cached *other_root) 2380 { 2381 struct rb_node *nd; 2382 struct hist_entry *pos, *pair; 2383 2384 for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) { 2385 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2386 pair = hists__find_hierarchy_entry(other_root, pos); 2387 2388 if (pair) { 2389 hist_entry__add_pair(pair, pos); 2390 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in); 2391 } 2392 } 2393 } 2394 2395 /* 2396 * Look for pairs to link to the leader buckets (hist_entries): 2397 */ 2398 void hists__match(struct hists *leader, struct hists *other) 2399 { 2400 struct rb_root_cached *root; 2401 struct rb_node *nd; 2402 struct hist_entry *pos, *pair; 2403 2404 if (symbol_conf.report_hierarchy) { 2405 /* hierarchy report always collapses entries */ 2406 return hists__match_hierarchy(&leader->entries_collapsed, 2407 &other->entries_collapsed); 2408 } 2409 2410 if (hists__has(leader, need_collapse)) 2411 root = &leader->entries_collapsed; 2412 else 2413 root = leader->entries_in; 2414 2415 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2416 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2417 pair = hists__find_entry(other, pos); 2418 2419 if (pair) 2420 hist_entry__add_pair(pair, pos); 2421 } 2422 } 2423 2424 static int hists__link_hierarchy(struct hists *leader_hists, 2425 struct hist_entry *parent, 2426 struct rb_root_cached *leader_root, 2427 struct rb_root_cached *other_root) 2428 { 2429 struct rb_node *nd; 2430 struct hist_entry *pos, *leader; 2431 2432 for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) { 2433 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2434 2435 if (hist_entry__has_pairs(pos)) { 2436 bool found = false; 2437 2438 list_for_each_entry(leader, &pos->pairs.head, pairs.node) { 2439 if (leader->hists == leader_hists) { 2440 found = true; 2441 break; 2442 } 2443 } 2444 if (!found) 2445 return -1; 2446 } else { 2447 leader = add_dummy_hierarchy_entry(leader_hists, 2448 leader_root, pos); 2449 if (leader == NULL) 2450 return -1; 2451 2452 /* do not point parent in the pos */ 2453 leader->parent_he = parent; 2454 2455 hist_entry__add_pair(pos, leader); 2456 } 2457 2458 if (!pos->leaf) { 2459 if (hists__link_hierarchy(leader_hists, leader, 2460 &leader->hroot_in, 2461 &pos->hroot_in) < 0) 2462 return -1; 2463 } 2464 } 2465 return 0; 2466 } 2467 2468 /* 2469 * Look for entries in the other hists that are not present in the leader, if 2470 * we find them, just add a dummy entry on the leader hists, with period=0, 2471 * nr_events=0, to serve as the list header. 2472 */ 2473 int hists__link(struct hists *leader, struct hists *other) 2474 { 2475 struct rb_root_cached *root; 2476 struct rb_node *nd; 2477 struct hist_entry *pos, *pair; 2478 2479 if (symbol_conf.report_hierarchy) { 2480 /* hierarchy report always collapses entries */ 2481 return hists__link_hierarchy(leader, NULL, 2482 &leader->entries_collapsed, 2483 &other->entries_collapsed); 2484 } 2485 2486 if (hists__has(other, need_collapse)) 2487 root = &other->entries_collapsed; 2488 else 2489 root = other->entries_in; 2490 2491 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2492 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2493 2494 if (!hist_entry__has_pairs(pos)) { 2495 pair = hists__add_dummy_entry(leader, pos); 2496 if (pair == NULL) 2497 return -1; 2498 hist_entry__add_pair(pos, pair); 2499 } 2500 } 2501 2502 return 0; 2503 } 2504 2505 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, 2506 struct perf_sample *sample, bool nonany_branch_mode) 2507 { 2508 struct branch_info *bi; 2509 2510 /* If we have branch cycles always annotate them. */ 2511 if (bs && bs->nr && bs->entries[0].flags.cycles) { 2512 int i; 2513 2514 bi = sample__resolve_bstack(sample, al); 2515 if (bi) { 2516 struct addr_map_symbol *prev = NULL; 2517 2518 /* 2519 * Ignore errors, still want to process the 2520 * other entries. 2521 * 2522 * For non standard branch modes always 2523 * force no IPC (prev == NULL) 2524 * 2525 * Note that perf stores branches reversed from 2526 * program order! 2527 */ 2528 for (i = bs->nr - 1; i >= 0; i--) { 2529 addr_map_symbol__account_cycles(&bi[i].from, 2530 nonany_branch_mode ? NULL : prev, 2531 bi[i].flags.cycles); 2532 prev = &bi[i].to; 2533 } 2534 free(bi); 2535 } 2536 } 2537 } 2538 2539 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp) 2540 { 2541 struct perf_evsel *pos; 2542 size_t ret = 0; 2543 2544 evlist__for_each_entry(evlist, pos) { 2545 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 2546 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp); 2547 } 2548 2549 return ret; 2550 } 2551 2552 2553 u64 hists__total_period(struct hists *hists) 2554 { 2555 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period : 2556 hists->stats.total_period; 2557 } 2558 2559 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq) 2560 { 2561 char unit; 2562 int printed; 2563 const struct dso *dso = hists->dso_filter; 2564 const struct thread *thread = hists->thread_filter; 2565 int socket_id = hists->socket_filter; 2566 unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE]; 2567 u64 nr_events = hists->stats.total_period; 2568 struct perf_evsel *evsel = hists_to_evsel(hists); 2569 const char *ev_name = perf_evsel__name(evsel); 2570 char buf[512], sample_freq_str[64] = ""; 2571 size_t buflen = sizeof(buf); 2572 char ref[30] = " show reference callgraph, "; 2573 bool enable_ref = false; 2574 2575 if (symbol_conf.filter_relative) { 2576 nr_samples = hists->stats.nr_non_filtered_samples; 2577 nr_events = hists->stats.total_non_filtered_period; 2578 } 2579 2580 if (perf_evsel__is_group_event(evsel)) { 2581 struct perf_evsel *pos; 2582 2583 perf_evsel__group_desc(evsel, buf, buflen); 2584 ev_name = buf; 2585 2586 for_each_group_member(pos, evsel) { 2587 struct hists *pos_hists = evsel__hists(pos); 2588 2589 if (symbol_conf.filter_relative) { 2590 nr_samples += pos_hists->stats.nr_non_filtered_samples; 2591 nr_events += pos_hists->stats.total_non_filtered_period; 2592 } else { 2593 nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE]; 2594 nr_events += pos_hists->stats.total_period; 2595 } 2596 } 2597 } 2598 2599 if (symbol_conf.show_ref_callgraph && 2600 strstr(ev_name, "call-graph=no")) 2601 enable_ref = true; 2602 2603 if (show_freq) 2604 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->attr.sample_freq); 2605 2606 nr_samples = convert_unit(nr_samples, &unit); 2607 printed = scnprintf(bf, size, 2608 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64, 2609 nr_samples, unit, evsel->nr_members > 1 ? "s" : "", 2610 ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events); 2611 2612 2613 if (hists->uid_filter_str) 2614 printed += snprintf(bf + printed, size - printed, 2615 ", UID: %s", hists->uid_filter_str); 2616 if (thread) { 2617 if (hists__has(hists, thread)) { 2618 printed += scnprintf(bf + printed, size - printed, 2619 ", Thread: %s(%d)", 2620 (thread->comm_set ? thread__comm_str(thread) : ""), 2621 thread->tid); 2622 } else { 2623 printed += scnprintf(bf + printed, size - printed, 2624 ", Thread: %s", 2625 (thread->comm_set ? thread__comm_str(thread) : "")); 2626 } 2627 } 2628 if (dso) 2629 printed += scnprintf(bf + printed, size - printed, 2630 ", DSO: %s", dso->short_name); 2631 if (socket_id > -1) 2632 printed += scnprintf(bf + printed, size - printed, 2633 ", Processor Socket: %d", socket_id); 2634 2635 return printed; 2636 } 2637 2638 int parse_filter_percentage(const struct option *opt __maybe_unused, 2639 const char *arg, int unset __maybe_unused) 2640 { 2641 if (!strcmp(arg, "relative")) 2642 symbol_conf.filter_relative = true; 2643 else if (!strcmp(arg, "absolute")) 2644 symbol_conf.filter_relative = false; 2645 else { 2646 pr_debug("Invalid percentage: %s\n", arg); 2647 return -1; 2648 } 2649 2650 return 0; 2651 } 2652 2653 int perf_hist_config(const char *var, const char *value) 2654 { 2655 if (!strcmp(var, "hist.percentage")) 2656 return parse_filter_percentage(NULL, value, 0); 2657 2658 return 0; 2659 } 2660 2661 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list) 2662 { 2663 memset(hists, 0, sizeof(*hists)); 2664 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED; 2665 hists->entries_in = &hists->entries_in_array[0]; 2666 hists->entries_collapsed = RB_ROOT_CACHED; 2667 hists->entries = RB_ROOT_CACHED; 2668 pthread_mutex_init(&hists->lock, NULL); 2669 hists->socket_filter = -1; 2670 hists->hpp_list = hpp_list; 2671 INIT_LIST_HEAD(&hists->hpp_formats); 2672 return 0; 2673 } 2674 2675 static void hists__delete_remaining_entries(struct rb_root_cached *root) 2676 { 2677 struct rb_node *node; 2678 struct hist_entry *he; 2679 2680 while (!RB_EMPTY_ROOT(&root->rb_root)) { 2681 node = rb_first_cached(root); 2682 rb_erase_cached(node, root); 2683 2684 he = rb_entry(node, struct hist_entry, rb_node_in); 2685 hist_entry__delete(he); 2686 } 2687 } 2688 2689 static void hists__delete_all_entries(struct hists *hists) 2690 { 2691 hists__delete_entries(hists); 2692 hists__delete_remaining_entries(&hists->entries_in_array[0]); 2693 hists__delete_remaining_entries(&hists->entries_in_array[1]); 2694 hists__delete_remaining_entries(&hists->entries_collapsed); 2695 } 2696 2697 static void hists_evsel__exit(struct perf_evsel *evsel) 2698 { 2699 struct hists *hists = evsel__hists(evsel); 2700 struct perf_hpp_fmt *fmt, *pos; 2701 struct perf_hpp_list_node *node, *tmp; 2702 2703 hists__delete_all_entries(hists); 2704 2705 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) { 2706 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) { 2707 list_del(&fmt->list); 2708 free(fmt); 2709 } 2710 list_del(&node->list); 2711 free(node); 2712 } 2713 } 2714 2715 static int hists_evsel__init(struct perf_evsel *evsel) 2716 { 2717 struct hists *hists = evsel__hists(evsel); 2718 2719 __hists__init(hists, &perf_hpp_list); 2720 return 0; 2721 } 2722 2723 /* 2724 * XXX We probably need a hists_evsel__exit() to free the hist_entries 2725 * stored in the rbtree... 2726 */ 2727 2728 int hists__init(void) 2729 { 2730 int err = perf_evsel__object_config(sizeof(struct hists_evsel), 2731 hists_evsel__init, 2732 hists_evsel__exit); 2733 if (err) 2734 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr); 2735 2736 return err; 2737 } 2738 2739 void perf_hpp_list__init(struct perf_hpp_list *list) 2740 { 2741 INIT_LIST_HEAD(&list->fields); 2742 INIT_LIST_HEAD(&list->sorts); 2743 } 2744