1 // SPDX-License-Identifier: GPL-2.0 2 #include "callchain.h" 3 #include "debug.h" 4 #include "dso.h" 5 #include "build-id.h" 6 #include "hist.h" 7 #include "kvm-stat.h" 8 #include "map.h" 9 #include "map_symbol.h" 10 #include "branch.h" 11 #include "mem-events.h" 12 #include "session.h" 13 #include "namespaces.h" 14 #include "cgroup.h" 15 #include "sort.h" 16 #include "units.h" 17 #include "evlist.h" 18 #include "evsel.h" 19 #include "annotate.h" 20 #include "srcline.h" 21 #include "symbol.h" 22 #include "thread.h" 23 #include "block-info.h" 24 #include "ui/progress.h" 25 #include <errno.h> 26 #include <math.h> 27 #include <inttypes.h> 28 #include <sys/param.h> 29 #include <linux/rbtree.h> 30 #include <linux/string.h> 31 #include <linux/time64.h> 32 #include <linux/zalloc.h> 33 34 static bool hists__filter_entry_by_dso(struct hists *hists, 35 struct hist_entry *he); 36 static bool hists__filter_entry_by_thread(struct hists *hists, 37 struct hist_entry *he); 38 static bool hists__filter_entry_by_symbol(struct hists *hists, 39 struct hist_entry *he); 40 static bool hists__filter_entry_by_socket(struct hists *hists, 41 struct hist_entry *he); 42 43 u16 hists__col_len(struct hists *hists, enum hist_column col) 44 { 45 return hists->col_len[col]; 46 } 47 48 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) 49 { 50 hists->col_len[col] = len; 51 } 52 53 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) 54 { 55 if (len > hists__col_len(hists, col)) { 56 hists__set_col_len(hists, col, len); 57 return true; 58 } 59 return false; 60 } 61 62 void hists__reset_col_len(struct hists *hists) 63 { 64 enum hist_column col; 65 66 for (col = 0; col < HISTC_NR_COLS; ++col) 67 hists__set_col_len(hists, col, 0); 68 } 69 70 static void hists__set_unres_dso_col_len(struct hists *hists, int dso) 71 { 72 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 73 74 if (hists__col_len(hists, dso) < unresolved_col_width && 75 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 76 !symbol_conf.dso_list) 77 hists__set_col_len(hists, dso, unresolved_col_width); 78 } 79 80 void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 81 { 82 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 83 int symlen; 84 u16 len; 85 86 if (h->block_info) 87 return; 88 /* 89 * +4 accounts for '[x] ' priv level info 90 * +2 accounts for 0x prefix on raw addresses 91 * +3 accounts for ' y ' symtab origin info 92 */ 93 if (h->ms.sym) { 94 symlen = h->ms.sym->namelen + 4; 95 if (verbose > 0) 96 symlen += BITS_PER_LONG / 4 + 2 + 3; 97 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 98 } else { 99 symlen = unresolved_col_width + 4 + 2; 100 hists__new_col_len(hists, HISTC_SYMBOL, symlen); 101 hists__set_unres_dso_col_len(hists, HISTC_DSO); 102 } 103 104 len = thread__comm_len(h->thread); 105 if (hists__new_col_len(hists, HISTC_COMM, len)) 106 hists__set_col_len(hists, HISTC_THREAD, len + 8); 107 108 if (h->ms.map) { 109 len = dso__name_len(map__dso(h->ms.map)); 110 hists__new_col_len(hists, HISTC_DSO, len); 111 } 112 113 if (h->parent) 114 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); 115 116 if (h->branch_info) { 117 if (h->branch_info->from.ms.sym) { 118 symlen = (int)h->branch_info->from.ms.sym->namelen + 4; 119 if (verbose > 0) 120 symlen += BITS_PER_LONG / 4 + 2 + 3; 121 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 122 123 symlen = dso__name_len(map__dso(h->branch_info->from.ms.map)); 124 hists__new_col_len(hists, HISTC_DSO_FROM, symlen); 125 } else { 126 symlen = unresolved_col_width + 4 + 2; 127 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 128 hists__new_col_len(hists, HISTC_ADDR_FROM, symlen); 129 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); 130 } 131 132 if (h->branch_info->to.ms.sym) { 133 symlen = (int)h->branch_info->to.ms.sym->namelen + 4; 134 if (verbose > 0) 135 symlen += BITS_PER_LONG / 4 + 2 + 3; 136 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 137 138 symlen = dso__name_len(map__dso(h->branch_info->to.ms.map)); 139 hists__new_col_len(hists, HISTC_DSO_TO, symlen); 140 } else { 141 symlen = unresolved_col_width + 4 + 2; 142 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 143 hists__new_col_len(hists, HISTC_ADDR_TO, symlen); 144 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 145 } 146 147 if (h->branch_info->srcline_from) 148 hists__new_col_len(hists, HISTC_SRCLINE_FROM, 149 strlen(h->branch_info->srcline_from)); 150 if (h->branch_info->srcline_to) 151 hists__new_col_len(hists, HISTC_SRCLINE_TO, 152 strlen(h->branch_info->srcline_to)); 153 } 154 155 if (h->mem_info) { 156 if (h->mem_info->daddr.ms.sym) { 157 symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4 158 + unresolved_col_width + 2; 159 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 160 symlen); 161 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 162 symlen + 1); 163 } else { 164 symlen = unresolved_col_width + 4 + 2; 165 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, 166 symlen); 167 hists__new_col_len(hists, HISTC_MEM_DCACHELINE, 168 symlen); 169 } 170 171 if (h->mem_info->iaddr.ms.sym) { 172 symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4 173 + unresolved_col_width + 2; 174 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 175 symlen); 176 } else { 177 symlen = unresolved_col_width + 4 + 2; 178 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, 179 symlen); 180 } 181 182 if (h->mem_info->daddr.ms.map) { 183 symlen = dso__name_len(map__dso(h->mem_info->daddr.ms.map)); 184 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, 185 symlen); 186 } else { 187 symlen = unresolved_col_width + 4 + 2; 188 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 189 } 190 191 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR, 192 unresolved_col_width + 4 + 2); 193 194 hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE, 195 unresolved_col_width + 4 + 2); 196 197 } else { 198 symlen = unresolved_col_width + 4 + 2; 199 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); 200 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen); 201 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); 202 } 203 204 hists__new_col_len(hists, HISTC_CGROUP, 6); 205 hists__new_col_len(hists, HISTC_CGROUP_ID, 20); 206 hists__new_col_len(hists, HISTC_CPU, 3); 207 hists__new_col_len(hists, HISTC_SOCKET, 6); 208 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); 209 hists__new_col_len(hists, HISTC_MEM_TLB, 22); 210 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); 211 hists__new_col_len(hists, HISTC_MEM_LVL, 36 + 3); 212 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); 213 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); 214 hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10); 215 hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13); 216 hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13); 217 hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13); 218 hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13); 219 hists__new_col_len(hists, HISTC_ADDR, BITS_PER_LONG / 4 + 2); 220 221 if (symbol_conf.nanosecs) 222 hists__new_col_len(hists, HISTC_TIME, 16); 223 else 224 hists__new_col_len(hists, HISTC_TIME, 12); 225 hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6); 226 227 if (h->srcline) { 228 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header)); 229 hists__new_col_len(hists, HISTC_SRCLINE, len); 230 } 231 232 if (h->srcfile) 233 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile)); 234 235 if (h->transaction) 236 hists__new_col_len(hists, HISTC_TRANSACTION, 237 hist_entry__transaction_len()); 238 239 if (h->trace_output) 240 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output)); 241 242 if (h->cgroup) { 243 const char *cgrp_name = "unknown"; 244 struct cgroup *cgrp = cgroup__find(maps__machine(h->ms.maps)->env, 245 h->cgroup); 246 if (cgrp != NULL) 247 cgrp_name = cgrp->name; 248 249 hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name)); 250 } 251 } 252 253 void hists__output_recalc_col_len(struct hists *hists, int max_rows) 254 { 255 struct rb_node *next = rb_first_cached(&hists->entries); 256 struct hist_entry *n; 257 int row = 0; 258 259 hists__reset_col_len(hists); 260 261 while (next && row++ < max_rows) { 262 n = rb_entry(next, struct hist_entry, rb_node); 263 if (!n->filtered) 264 hists__calc_col_len(hists, n); 265 next = rb_next(&n->rb_node); 266 } 267 } 268 269 static void he_stat__add_cpumode_period(struct he_stat *he_stat, 270 unsigned int cpumode, u64 period) 271 { 272 switch (cpumode) { 273 case PERF_RECORD_MISC_KERNEL: 274 he_stat->period_sys += period; 275 break; 276 case PERF_RECORD_MISC_USER: 277 he_stat->period_us += period; 278 break; 279 case PERF_RECORD_MISC_GUEST_KERNEL: 280 he_stat->period_guest_sys += period; 281 break; 282 case PERF_RECORD_MISC_GUEST_USER: 283 he_stat->period_guest_us += period; 284 break; 285 default: 286 break; 287 } 288 } 289 290 static long hist_time(unsigned long htime) 291 { 292 unsigned long time_quantum = symbol_conf.time_quantum; 293 if (time_quantum) 294 return (htime / time_quantum) * time_quantum; 295 return htime; 296 } 297 298 static void he_stat__add_period(struct he_stat *he_stat, u64 period) 299 { 300 he_stat->period += period; 301 he_stat->nr_events += 1; 302 } 303 304 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) 305 { 306 dest->period += src->period; 307 dest->period_sys += src->period_sys; 308 dest->period_us += src->period_us; 309 dest->period_guest_sys += src->period_guest_sys; 310 dest->period_guest_us += src->period_guest_us; 311 dest->weight1 += src->weight1; 312 dest->weight2 += src->weight2; 313 dest->weight3 += src->weight3; 314 dest->nr_events += src->nr_events; 315 } 316 317 static void he_stat__decay(struct he_stat *he_stat) 318 { 319 he_stat->period = (he_stat->period * 7) / 8; 320 he_stat->nr_events = (he_stat->nr_events * 7) / 8; 321 he_stat->weight1 = (he_stat->weight1 * 7) / 8; 322 he_stat->weight2 = (he_stat->weight2 * 7) / 8; 323 he_stat->weight3 = (he_stat->weight3 * 7) / 8; 324 } 325 326 static void hists__delete_entry(struct hists *hists, struct hist_entry *he); 327 328 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) 329 { 330 u64 prev_period = he->stat.period; 331 u64 diff; 332 333 if (prev_period == 0) 334 return true; 335 336 he_stat__decay(&he->stat); 337 if (symbol_conf.cumulate_callchain) 338 he_stat__decay(he->stat_acc); 339 decay_callchain(he->callchain); 340 341 diff = prev_period - he->stat.period; 342 343 if (!he->depth) { 344 hists->stats.total_period -= diff; 345 if (!he->filtered) 346 hists->stats.total_non_filtered_period -= diff; 347 } 348 349 if (!he->leaf) { 350 struct hist_entry *child; 351 struct rb_node *node = rb_first_cached(&he->hroot_out); 352 while (node) { 353 child = rb_entry(node, struct hist_entry, rb_node); 354 node = rb_next(node); 355 356 if (hists__decay_entry(hists, child)) 357 hists__delete_entry(hists, child); 358 } 359 } 360 361 return he->stat.period == 0; 362 } 363 364 static void hists__delete_entry(struct hists *hists, struct hist_entry *he) 365 { 366 struct rb_root_cached *root_in; 367 struct rb_root_cached *root_out; 368 369 if (he->parent_he) { 370 root_in = &he->parent_he->hroot_in; 371 root_out = &he->parent_he->hroot_out; 372 } else { 373 if (hists__has(hists, need_collapse)) 374 root_in = &hists->entries_collapsed; 375 else 376 root_in = hists->entries_in; 377 root_out = &hists->entries; 378 } 379 380 rb_erase_cached(&he->rb_node_in, root_in); 381 rb_erase_cached(&he->rb_node, root_out); 382 383 --hists->nr_entries; 384 if (!he->filtered) 385 --hists->nr_non_filtered_entries; 386 387 hist_entry__delete(he); 388 } 389 390 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 391 { 392 struct rb_node *next = rb_first_cached(&hists->entries); 393 struct hist_entry *n; 394 395 while (next) { 396 n = rb_entry(next, struct hist_entry, rb_node); 397 next = rb_next(&n->rb_node); 398 if (((zap_user && n->level == '.') || 399 (zap_kernel && n->level != '.') || 400 hists__decay_entry(hists, n))) { 401 hists__delete_entry(hists, n); 402 } 403 } 404 } 405 406 void hists__delete_entries(struct hists *hists) 407 { 408 struct rb_node *next = rb_first_cached(&hists->entries); 409 struct hist_entry *n; 410 411 while (next) { 412 n = rb_entry(next, struct hist_entry, rb_node); 413 next = rb_next(&n->rb_node); 414 415 hists__delete_entry(hists, n); 416 } 417 } 418 419 struct hist_entry *hists__get_entry(struct hists *hists, int idx) 420 { 421 struct rb_node *next = rb_first_cached(&hists->entries); 422 struct hist_entry *n; 423 int i = 0; 424 425 while (next) { 426 n = rb_entry(next, struct hist_entry, rb_node); 427 if (i == idx) 428 return n; 429 430 next = rb_next(&n->rb_node); 431 i++; 432 } 433 434 return NULL; 435 } 436 437 /* 438 * histogram, sorted on item, collects periods 439 */ 440 441 static int hist_entry__init(struct hist_entry *he, 442 struct hist_entry *template, 443 bool sample_self, 444 size_t callchain_size) 445 { 446 *he = *template; 447 he->callchain_size = callchain_size; 448 449 if (symbol_conf.cumulate_callchain) { 450 he->stat_acc = malloc(sizeof(he->stat)); 451 if (he->stat_acc == NULL) 452 return -ENOMEM; 453 memcpy(he->stat_acc, &he->stat, sizeof(he->stat)); 454 if (!sample_self) 455 memset(&he->stat, 0, sizeof(he->stat)); 456 } 457 458 he->ms.maps = maps__get(he->ms.maps); 459 he->ms.map = map__get(he->ms.map); 460 461 if (he->branch_info) { 462 /* 463 * This branch info is (a part of) allocated from 464 * sample__resolve_bstack() and will be freed after 465 * adding new entries. So we need to save a copy. 466 */ 467 he->branch_info = malloc(sizeof(*he->branch_info)); 468 if (he->branch_info == NULL) 469 goto err; 470 471 memcpy(he->branch_info, template->branch_info, 472 sizeof(*he->branch_info)); 473 474 he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map); 475 he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map); 476 } 477 478 if (he->mem_info) { 479 he->mem_info->iaddr.ms.map = map__get(he->mem_info->iaddr.ms.map); 480 he->mem_info->daddr.ms.map = map__get(he->mem_info->daddr.ms.map); 481 } 482 483 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 484 callchain_init(he->callchain); 485 486 if (he->raw_data) { 487 he->raw_data = memdup(he->raw_data, he->raw_size); 488 if (he->raw_data == NULL) 489 goto err_infos; 490 } 491 492 if (he->srcline && he->srcline != SRCLINE_UNKNOWN) { 493 he->srcline = strdup(he->srcline); 494 if (he->srcline == NULL) 495 goto err_rawdata; 496 } 497 498 if (symbol_conf.res_sample) { 499 he->res_samples = calloc(symbol_conf.res_sample, 500 sizeof(struct res_sample)); 501 if (!he->res_samples) 502 goto err_srcline; 503 } 504 505 INIT_LIST_HEAD(&he->pairs.node); 506 he->thread = thread__get(he->thread); 507 he->hroot_in = RB_ROOT_CACHED; 508 he->hroot_out = RB_ROOT_CACHED; 509 510 if (!symbol_conf.report_hierarchy) 511 he->leaf = true; 512 513 return 0; 514 515 err_srcline: 516 zfree(&he->srcline); 517 518 err_rawdata: 519 zfree(&he->raw_data); 520 521 err_infos: 522 if (he->branch_info) { 523 map_symbol__exit(&he->branch_info->from.ms); 524 map_symbol__exit(&he->branch_info->to.ms); 525 zfree(&he->branch_info); 526 } 527 if (he->mem_info) { 528 map_symbol__exit(&he->mem_info->iaddr.ms); 529 map_symbol__exit(&he->mem_info->daddr.ms); 530 } 531 err: 532 map_symbol__exit(&he->ms); 533 zfree(&he->stat_acc); 534 return -ENOMEM; 535 } 536 537 static void *hist_entry__zalloc(size_t size) 538 { 539 return zalloc(size + sizeof(struct hist_entry)); 540 } 541 542 static void hist_entry__free(void *ptr) 543 { 544 free(ptr); 545 } 546 547 static struct hist_entry_ops default_ops = { 548 .new = hist_entry__zalloc, 549 .free = hist_entry__free, 550 }; 551 552 static struct hist_entry *hist_entry__new(struct hist_entry *template, 553 bool sample_self) 554 { 555 struct hist_entry_ops *ops = template->ops; 556 size_t callchain_size = 0; 557 struct hist_entry *he; 558 int err = 0; 559 560 if (!ops) 561 ops = template->ops = &default_ops; 562 563 if (symbol_conf.use_callchain) 564 callchain_size = sizeof(struct callchain_root); 565 566 he = ops->new(callchain_size); 567 if (he) { 568 err = hist_entry__init(he, template, sample_self, callchain_size); 569 if (err) { 570 ops->free(he); 571 he = NULL; 572 } 573 } 574 575 return he; 576 } 577 578 static u8 symbol__parent_filter(const struct symbol *parent) 579 { 580 if (symbol_conf.exclude_other && parent == NULL) 581 return 1 << HIST_FILTER__PARENT; 582 return 0; 583 } 584 585 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period) 586 { 587 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain) 588 return; 589 590 he->hists->callchain_period += period; 591 if (!he->filtered) 592 he->hists->callchain_non_filtered_period += period; 593 } 594 595 static struct hist_entry *hists__findnew_entry(struct hists *hists, 596 struct hist_entry *entry, 597 const struct addr_location *al, 598 bool sample_self) 599 { 600 struct rb_node **p; 601 struct rb_node *parent = NULL; 602 struct hist_entry *he; 603 int64_t cmp; 604 u64 period = entry->stat.period; 605 bool leftmost = true; 606 607 p = &hists->entries_in->rb_root.rb_node; 608 609 while (*p != NULL) { 610 parent = *p; 611 he = rb_entry(parent, struct hist_entry, rb_node_in); 612 613 /* 614 * Make sure that it receives arguments in a same order as 615 * hist_entry__collapse() so that we can use an appropriate 616 * function when searching an entry regardless which sort 617 * keys were used. 618 */ 619 cmp = hist_entry__cmp(he, entry); 620 if (!cmp) { 621 if (sample_self) { 622 he_stat__add_stat(&he->stat, &entry->stat); 623 hist_entry__add_callchain_period(he, period); 624 } 625 if (symbol_conf.cumulate_callchain) 626 he_stat__add_period(he->stat_acc, period); 627 628 /* 629 * This mem info was allocated from sample__resolve_mem 630 * and will not be used anymore. 631 */ 632 mem_info__zput(entry->mem_info); 633 634 block_info__zput(entry->block_info); 635 636 kvm_info__zput(entry->kvm_info); 637 638 /* If the map of an existing hist_entry has 639 * become out-of-date due to an exec() or 640 * similar, update it. Otherwise we will 641 * mis-adjust symbol addresses when computing 642 * the history counter to increment. 643 */ 644 if (he->ms.map != entry->ms.map) { 645 map__put(he->ms.map); 646 he->ms.map = map__get(entry->ms.map); 647 } 648 goto out; 649 } 650 651 if (cmp < 0) 652 p = &(*p)->rb_left; 653 else { 654 p = &(*p)->rb_right; 655 leftmost = false; 656 } 657 } 658 659 he = hist_entry__new(entry, sample_self); 660 if (!he) 661 return NULL; 662 663 if (sample_self) 664 hist_entry__add_callchain_period(he, period); 665 hists->nr_entries++; 666 667 rb_link_node(&he->rb_node_in, parent, p); 668 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost); 669 out: 670 if (sample_self) 671 he_stat__add_cpumode_period(&he->stat, al->cpumode, period); 672 if (symbol_conf.cumulate_callchain) 673 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); 674 return he; 675 } 676 677 static unsigned random_max(unsigned high) 678 { 679 unsigned thresh = -high % high; 680 for (;;) { 681 unsigned r = random(); 682 if (r >= thresh) 683 return r % high; 684 } 685 } 686 687 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample) 688 { 689 struct res_sample *r; 690 int j; 691 692 if (he->num_res < symbol_conf.res_sample) { 693 j = he->num_res++; 694 } else { 695 j = random_max(symbol_conf.res_sample); 696 } 697 r = &he->res_samples[j]; 698 r->time = sample->time; 699 r->cpu = sample->cpu; 700 r->tid = sample->tid; 701 } 702 703 static struct hist_entry* 704 __hists__add_entry(struct hists *hists, 705 struct addr_location *al, 706 struct symbol *sym_parent, 707 struct branch_info *bi, 708 struct mem_info *mi, 709 struct kvm_info *ki, 710 struct block_info *block_info, 711 struct perf_sample *sample, 712 bool sample_self, 713 struct hist_entry_ops *ops) 714 { 715 struct namespaces *ns = thread__namespaces(al->thread); 716 struct hist_entry entry = { 717 .thread = al->thread, 718 .comm = thread__comm(al->thread), 719 .cgroup_id = { 720 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0, 721 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0, 722 }, 723 .cgroup = sample->cgroup, 724 .ms = { 725 .maps = al->maps, 726 .map = al->map, 727 .sym = al->sym, 728 }, 729 .srcline = (char *) al->srcline, 730 .socket = al->socket, 731 .cpu = al->cpu, 732 .cpumode = al->cpumode, 733 .ip = al->addr, 734 .level = al->level, 735 .code_page_size = sample->code_page_size, 736 .stat = { 737 .nr_events = 1, 738 .period = sample->period, 739 .weight1 = sample->weight, 740 .weight2 = sample->ins_lat, 741 .weight3 = sample->p_stage_cyc, 742 }, 743 .parent = sym_parent, 744 .filtered = symbol__parent_filter(sym_parent) | al->filtered, 745 .hists = hists, 746 .branch_info = bi, 747 .mem_info = mi, 748 .kvm_info = ki, 749 .block_info = block_info, 750 .transaction = sample->transaction, 751 .raw_data = sample->raw_data, 752 .raw_size = sample->raw_size, 753 .ops = ops, 754 .time = hist_time(sample->time), 755 .weight = sample->weight, 756 .ins_lat = sample->ins_lat, 757 .p_stage_cyc = sample->p_stage_cyc, 758 .simd_flags = sample->simd_flags, 759 }, *he = hists__findnew_entry(hists, &entry, al, sample_self); 760 761 if (!hists->has_callchains && he && he->callchain_size != 0) 762 hists->has_callchains = true; 763 if (he && symbol_conf.res_sample) 764 hists__res_sample(he, sample); 765 return he; 766 } 767 768 struct hist_entry *hists__add_entry(struct hists *hists, 769 struct addr_location *al, 770 struct symbol *sym_parent, 771 struct branch_info *bi, 772 struct mem_info *mi, 773 struct kvm_info *ki, 774 struct perf_sample *sample, 775 bool sample_self) 776 { 777 return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL, 778 sample, sample_self, NULL); 779 } 780 781 struct hist_entry *hists__add_entry_ops(struct hists *hists, 782 struct hist_entry_ops *ops, 783 struct addr_location *al, 784 struct symbol *sym_parent, 785 struct branch_info *bi, 786 struct mem_info *mi, 787 struct kvm_info *ki, 788 struct perf_sample *sample, 789 bool sample_self) 790 { 791 return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL, 792 sample, sample_self, ops); 793 } 794 795 struct hist_entry *hists__add_entry_block(struct hists *hists, 796 struct addr_location *al, 797 struct block_info *block_info) 798 { 799 struct hist_entry entry = { 800 .block_info = block_info, 801 .hists = hists, 802 .ms = { 803 .maps = al->maps, 804 .map = al->map, 805 .sym = al->sym, 806 }, 807 }, *he = hists__findnew_entry(hists, &entry, al, false); 808 809 return he; 810 } 811 812 static int 813 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 814 struct addr_location *al __maybe_unused) 815 { 816 return 0; 817 } 818 819 static int 820 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, 821 struct addr_location *al __maybe_unused) 822 { 823 return 0; 824 } 825 826 static int 827 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 828 { 829 struct perf_sample *sample = iter->sample; 830 struct mem_info *mi; 831 832 mi = sample__resolve_mem(sample, al); 833 if (mi == NULL) 834 return -ENOMEM; 835 836 iter->priv = mi; 837 return 0; 838 } 839 840 static int 841 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) 842 { 843 u64 cost; 844 struct mem_info *mi = iter->priv; 845 struct hists *hists = evsel__hists(iter->evsel); 846 struct perf_sample *sample = iter->sample; 847 struct hist_entry *he; 848 849 if (mi == NULL) 850 return -EINVAL; 851 852 cost = sample->weight; 853 if (!cost) 854 cost = 1; 855 856 /* 857 * must pass period=weight in order to get the correct 858 * sorting from hists__collapse_resort() which is solely 859 * based on periods. We want sorting be done on nr_events * weight 860 * and this is indirectly achieved by passing period=weight here 861 * and the he_stat__add_period() function. 862 */ 863 sample->period = cost; 864 865 he = hists__add_entry(hists, al, iter->parent, NULL, mi, NULL, 866 sample, true); 867 if (!he) 868 return -ENOMEM; 869 870 iter->he = he; 871 return 0; 872 } 873 874 static int 875 iter_finish_mem_entry(struct hist_entry_iter *iter, 876 struct addr_location *al __maybe_unused) 877 { 878 struct evsel *evsel = iter->evsel; 879 struct hists *hists = evsel__hists(evsel); 880 struct hist_entry *he = iter->he; 881 int err = -EINVAL; 882 883 if (he == NULL) 884 goto out; 885 886 hists__inc_nr_samples(hists, he->filtered); 887 888 err = hist_entry__append_callchain(he, iter->sample); 889 890 out: 891 /* 892 * We don't need to free iter->priv (mem_info) here since the mem info 893 * was either already freed in hists__findnew_entry() or passed to a 894 * new hist entry by hist_entry__new(). 895 */ 896 iter->priv = NULL; 897 898 iter->he = NULL; 899 return err; 900 } 901 902 static int 903 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 904 { 905 struct branch_info *bi; 906 struct perf_sample *sample = iter->sample; 907 908 bi = sample__resolve_bstack(sample, al); 909 if (!bi) 910 return -ENOMEM; 911 912 iter->curr = 0; 913 iter->total = sample->branch_stack->nr; 914 915 iter->priv = bi; 916 return 0; 917 } 918 919 static int 920 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, 921 struct addr_location *al __maybe_unused) 922 { 923 return 0; 924 } 925 926 static int 927 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 928 { 929 struct branch_info *bi = iter->priv; 930 int i = iter->curr; 931 932 if (bi == NULL) 933 return 0; 934 935 if (iter->curr >= iter->total) 936 return 0; 937 938 maps__put(al->maps); 939 al->maps = maps__get(bi[i].to.ms.maps); 940 map__put(al->map); 941 al->map = map__get(bi[i].to.ms.map); 942 al->sym = bi[i].to.ms.sym; 943 al->addr = bi[i].to.addr; 944 return 1; 945 } 946 947 static int 948 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) 949 { 950 struct branch_info *bi; 951 struct evsel *evsel = iter->evsel; 952 struct hists *hists = evsel__hists(evsel); 953 struct perf_sample *sample = iter->sample; 954 struct hist_entry *he = NULL; 955 int i = iter->curr; 956 int err = 0; 957 958 bi = iter->priv; 959 960 if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym)) 961 goto out; 962 963 /* 964 * The report shows the percentage of total branches captured 965 * and not events sampled. Thus we use a pseudo period of 1. 966 */ 967 sample->period = 1; 968 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1; 969 970 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, NULL, 971 sample, true); 972 if (he == NULL) 973 return -ENOMEM; 974 975 hists__inc_nr_samples(hists, he->filtered); 976 977 out: 978 iter->he = he; 979 iter->curr++; 980 return err; 981 } 982 983 static int 984 iter_finish_branch_entry(struct hist_entry_iter *iter, 985 struct addr_location *al __maybe_unused) 986 { 987 zfree(&iter->priv); 988 iter->he = NULL; 989 990 return iter->curr >= iter->total ? 0 : -1; 991 } 992 993 static int 994 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused, 995 struct addr_location *al __maybe_unused) 996 { 997 return 0; 998 } 999 1000 static int 1001 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) 1002 { 1003 struct evsel *evsel = iter->evsel; 1004 struct perf_sample *sample = iter->sample; 1005 struct hist_entry *he; 1006 1007 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 1008 NULL, sample, true); 1009 if (he == NULL) 1010 return -ENOMEM; 1011 1012 iter->he = he; 1013 return 0; 1014 } 1015 1016 static int 1017 iter_finish_normal_entry(struct hist_entry_iter *iter, 1018 struct addr_location *al __maybe_unused) 1019 { 1020 struct hist_entry *he = iter->he; 1021 struct evsel *evsel = iter->evsel; 1022 struct perf_sample *sample = iter->sample; 1023 1024 if (he == NULL) 1025 return 0; 1026 1027 iter->he = NULL; 1028 1029 hists__inc_nr_samples(evsel__hists(evsel), he->filtered); 1030 1031 return hist_entry__append_callchain(he, sample); 1032 } 1033 1034 static int 1035 iter_prepare_cumulative_entry(struct hist_entry_iter *iter, 1036 struct addr_location *al __maybe_unused) 1037 { 1038 struct hist_entry **he_cache; 1039 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 1040 1041 if (cursor == NULL) 1042 return -ENOMEM; 1043 1044 callchain_cursor_commit(cursor); 1045 1046 /* 1047 * This is for detecting cycles or recursions so that they're 1048 * cumulated only one time to prevent entries more than 100% 1049 * overhead. 1050 */ 1051 he_cache = malloc(sizeof(*he_cache) * (cursor->nr + 1)); 1052 if (he_cache == NULL) 1053 return -ENOMEM; 1054 1055 iter->priv = he_cache; 1056 iter->curr = 0; 1057 1058 return 0; 1059 } 1060 1061 static int 1062 iter_add_single_cumulative_entry(struct hist_entry_iter *iter, 1063 struct addr_location *al) 1064 { 1065 struct evsel *evsel = iter->evsel; 1066 struct hists *hists = evsel__hists(evsel); 1067 struct perf_sample *sample = iter->sample; 1068 struct hist_entry **he_cache = iter->priv; 1069 struct hist_entry *he; 1070 int err = 0; 1071 1072 he = hists__add_entry(hists, al, iter->parent, NULL, NULL, NULL, 1073 sample, true); 1074 if (he == NULL) 1075 return -ENOMEM; 1076 1077 iter->he = he; 1078 he_cache[iter->curr++] = he; 1079 1080 hist_entry__append_callchain(he, sample); 1081 1082 /* 1083 * We need to re-initialize the cursor since callchain_append() 1084 * advanced the cursor to the end. 1085 */ 1086 callchain_cursor_commit(get_tls_callchain_cursor()); 1087 1088 hists__inc_nr_samples(hists, he->filtered); 1089 1090 return err; 1091 } 1092 1093 static int 1094 iter_next_cumulative_entry(struct hist_entry_iter *iter, 1095 struct addr_location *al) 1096 { 1097 struct callchain_cursor_node *node; 1098 1099 node = callchain_cursor_current(get_tls_callchain_cursor()); 1100 if (node == NULL) 1101 return 0; 1102 1103 return fill_callchain_info(al, node, iter->hide_unresolved); 1104 } 1105 1106 static bool 1107 hist_entry__fast__sym_diff(struct hist_entry *left, 1108 struct hist_entry *right) 1109 { 1110 struct symbol *sym_l = left->ms.sym; 1111 struct symbol *sym_r = right->ms.sym; 1112 1113 if (!sym_l && !sym_r) 1114 return left->ip != right->ip; 1115 1116 return !!_sort__sym_cmp(sym_l, sym_r); 1117 } 1118 1119 1120 static int 1121 iter_add_next_cumulative_entry(struct hist_entry_iter *iter, 1122 struct addr_location *al) 1123 { 1124 struct evsel *evsel = iter->evsel; 1125 struct perf_sample *sample = iter->sample; 1126 struct hist_entry **he_cache = iter->priv; 1127 struct hist_entry *he; 1128 struct hist_entry he_tmp = { 1129 .hists = evsel__hists(evsel), 1130 .cpu = al->cpu, 1131 .thread = al->thread, 1132 .comm = thread__comm(al->thread), 1133 .ip = al->addr, 1134 .ms = { 1135 .maps = al->maps, 1136 .map = al->map, 1137 .sym = al->sym, 1138 }, 1139 .srcline = (char *) al->srcline, 1140 .parent = iter->parent, 1141 .raw_data = sample->raw_data, 1142 .raw_size = sample->raw_size, 1143 }; 1144 int i; 1145 struct callchain_cursor cursor, *tls_cursor = get_tls_callchain_cursor(); 1146 bool fast = hists__has(he_tmp.hists, sym); 1147 1148 if (tls_cursor == NULL) 1149 return -ENOMEM; 1150 1151 callchain_cursor_snapshot(&cursor, tls_cursor); 1152 1153 callchain_cursor_advance(tls_cursor); 1154 1155 /* 1156 * Check if there's duplicate entries in the callchain. 1157 * It's possible that it has cycles or recursive calls. 1158 */ 1159 for (i = 0; i < iter->curr; i++) { 1160 /* 1161 * For most cases, there are no duplicate entries in callchain. 1162 * The symbols are usually different. Do a quick check for 1163 * symbols first. 1164 */ 1165 if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp)) 1166 continue; 1167 1168 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) { 1169 /* to avoid calling callback function */ 1170 iter->he = NULL; 1171 return 0; 1172 } 1173 } 1174 1175 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, 1176 NULL, sample, false); 1177 if (he == NULL) 1178 return -ENOMEM; 1179 1180 iter->he = he; 1181 he_cache[iter->curr++] = he; 1182 1183 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) 1184 callchain_append(he->callchain, &cursor, sample->period); 1185 return 0; 1186 } 1187 1188 static int 1189 iter_finish_cumulative_entry(struct hist_entry_iter *iter, 1190 struct addr_location *al __maybe_unused) 1191 { 1192 zfree(&iter->priv); 1193 iter->he = NULL; 1194 1195 return 0; 1196 } 1197 1198 const struct hist_iter_ops hist_iter_mem = { 1199 .prepare_entry = iter_prepare_mem_entry, 1200 .add_single_entry = iter_add_single_mem_entry, 1201 .next_entry = iter_next_nop_entry, 1202 .add_next_entry = iter_add_next_nop_entry, 1203 .finish_entry = iter_finish_mem_entry, 1204 }; 1205 1206 const struct hist_iter_ops hist_iter_branch = { 1207 .prepare_entry = iter_prepare_branch_entry, 1208 .add_single_entry = iter_add_single_branch_entry, 1209 .next_entry = iter_next_branch_entry, 1210 .add_next_entry = iter_add_next_branch_entry, 1211 .finish_entry = iter_finish_branch_entry, 1212 }; 1213 1214 const struct hist_iter_ops hist_iter_normal = { 1215 .prepare_entry = iter_prepare_normal_entry, 1216 .add_single_entry = iter_add_single_normal_entry, 1217 .next_entry = iter_next_nop_entry, 1218 .add_next_entry = iter_add_next_nop_entry, 1219 .finish_entry = iter_finish_normal_entry, 1220 }; 1221 1222 const struct hist_iter_ops hist_iter_cumulative = { 1223 .prepare_entry = iter_prepare_cumulative_entry, 1224 .add_single_entry = iter_add_single_cumulative_entry, 1225 .next_entry = iter_next_cumulative_entry, 1226 .add_next_entry = iter_add_next_cumulative_entry, 1227 .finish_entry = iter_finish_cumulative_entry, 1228 }; 1229 1230 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, 1231 int max_stack_depth, void *arg) 1232 { 1233 int err, err2; 1234 struct map *alm = NULL; 1235 1236 if (al) 1237 alm = map__get(al->map); 1238 1239 err = sample__resolve_callchain(iter->sample, get_tls_callchain_cursor(), &iter->parent, 1240 iter->evsel, al, max_stack_depth); 1241 if (err) { 1242 map__put(alm); 1243 return err; 1244 } 1245 1246 err = iter->ops->prepare_entry(iter, al); 1247 if (err) 1248 goto out; 1249 1250 err = iter->ops->add_single_entry(iter, al); 1251 if (err) 1252 goto out; 1253 1254 if (iter->he && iter->add_entry_cb) { 1255 err = iter->add_entry_cb(iter, al, true, arg); 1256 if (err) 1257 goto out; 1258 } 1259 1260 while (iter->ops->next_entry(iter, al)) { 1261 err = iter->ops->add_next_entry(iter, al); 1262 if (err) 1263 break; 1264 1265 if (iter->he && iter->add_entry_cb) { 1266 err = iter->add_entry_cb(iter, al, false, arg); 1267 if (err) 1268 goto out; 1269 } 1270 } 1271 1272 out: 1273 err2 = iter->ops->finish_entry(iter, al); 1274 if (!err) 1275 err = err2; 1276 1277 map__put(alm); 1278 1279 return err; 1280 } 1281 1282 int64_t 1283 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) 1284 { 1285 struct hists *hists = left->hists; 1286 struct perf_hpp_fmt *fmt; 1287 int64_t cmp = 0; 1288 1289 hists__for_each_sort_list(hists, fmt) { 1290 if (perf_hpp__is_dynamic_entry(fmt) && 1291 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1292 continue; 1293 1294 cmp = fmt->cmp(fmt, left, right); 1295 if (cmp) 1296 break; 1297 } 1298 1299 return cmp; 1300 } 1301 1302 int64_t 1303 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) 1304 { 1305 struct hists *hists = left->hists; 1306 struct perf_hpp_fmt *fmt; 1307 int64_t cmp = 0; 1308 1309 hists__for_each_sort_list(hists, fmt) { 1310 if (perf_hpp__is_dynamic_entry(fmt) && 1311 !perf_hpp__defined_dynamic_entry(fmt, hists)) 1312 continue; 1313 1314 cmp = fmt->collapse(fmt, left, right); 1315 if (cmp) 1316 break; 1317 } 1318 1319 return cmp; 1320 } 1321 1322 void hist_entry__delete(struct hist_entry *he) 1323 { 1324 struct hist_entry_ops *ops = he->ops; 1325 1326 thread__zput(he->thread); 1327 map_symbol__exit(&he->ms); 1328 1329 if (he->branch_info) { 1330 map_symbol__exit(&he->branch_info->from.ms); 1331 map_symbol__exit(&he->branch_info->to.ms); 1332 zfree_srcline(&he->branch_info->srcline_from); 1333 zfree_srcline(&he->branch_info->srcline_to); 1334 zfree(&he->branch_info); 1335 } 1336 1337 if (he->mem_info) { 1338 map_symbol__exit(&he->mem_info->iaddr.ms); 1339 map_symbol__exit(&he->mem_info->daddr.ms); 1340 mem_info__zput(he->mem_info); 1341 } 1342 1343 if (he->block_info) 1344 block_info__zput(he->block_info); 1345 1346 if (he->kvm_info) 1347 kvm_info__zput(he->kvm_info); 1348 1349 zfree(&he->res_samples); 1350 zfree(&he->stat_acc); 1351 zfree_srcline(&he->srcline); 1352 if (he->srcfile && he->srcfile[0]) 1353 zfree(&he->srcfile); 1354 free_callchain(he->callchain); 1355 zfree(&he->trace_output); 1356 zfree(&he->raw_data); 1357 ops->free(he); 1358 } 1359 1360 /* 1361 * If this is not the last column, then we need to pad it according to the 1362 * pre-calculated max length for this column, otherwise don't bother adding 1363 * spaces because that would break viewing this with, for instance, 'less', 1364 * that would show tons of trailing spaces when a long C++ demangled method 1365 * names is sampled. 1366 */ 1367 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp, 1368 struct perf_hpp_fmt *fmt, int printed) 1369 { 1370 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) { 1371 const int width = fmt->width(fmt, hpp, he->hists); 1372 if (printed < width) { 1373 advance_hpp(hpp, printed); 1374 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " "); 1375 } 1376 } 1377 1378 return printed; 1379 } 1380 1381 /* 1382 * collapse the histogram 1383 */ 1384 1385 static void hists__apply_filters(struct hists *hists, struct hist_entry *he); 1386 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he, 1387 enum hist_filter type); 1388 1389 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt); 1390 1391 static bool check_thread_entry(struct perf_hpp_fmt *fmt) 1392 { 1393 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt); 1394 } 1395 1396 static void hist_entry__check_and_remove_filter(struct hist_entry *he, 1397 enum hist_filter type, 1398 fmt_chk_fn check) 1399 { 1400 struct perf_hpp_fmt *fmt; 1401 bool type_match = false; 1402 struct hist_entry *parent = he->parent_he; 1403 1404 switch (type) { 1405 case HIST_FILTER__THREAD: 1406 if (symbol_conf.comm_list == NULL && 1407 symbol_conf.pid_list == NULL && 1408 symbol_conf.tid_list == NULL) 1409 return; 1410 break; 1411 case HIST_FILTER__DSO: 1412 if (symbol_conf.dso_list == NULL) 1413 return; 1414 break; 1415 case HIST_FILTER__SYMBOL: 1416 if (symbol_conf.sym_list == NULL) 1417 return; 1418 break; 1419 case HIST_FILTER__PARENT: 1420 case HIST_FILTER__GUEST: 1421 case HIST_FILTER__HOST: 1422 case HIST_FILTER__SOCKET: 1423 case HIST_FILTER__C2C: 1424 default: 1425 return; 1426 } 1427 1428 /* if it's filtered by own fmt, it has to have filter bits */ 1429 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1430 if (check(fmt)) { 1431 type_match = true; 1432 break; 1433 } 1434 } 1435 1436 if (type_match) { 1437 /* 1438 * If the filter is for current level entry, propagate 1439 * filter marker to parents. The marker bit was 1440 * already set by default so it only needs to clear 1441 * non-filtered entries. 1442 */ 1443 if (!(he->filtered & (1 << type))) { 1444 while (parent) { 1445 parent->filtered &= ~(1 << type); 1446 parent = parent->parent_he; 1447 } 1448 } 1449 } else { 1450 /* 1451 * If current entry doesn't have matching formats, set 1452 * filter marker for upper level entries. it will be 1453 * cleared if its lower level entries is not filtered. 1454 * 1455 * For lower-level entries, it inherits parent's 1456 * filter bit so that lower level entries of a 1457 * non-filtered entry won't set the filter marker. 1458 */ 1459 if (parent == NULL) 1460 he->filtered |= (1 << type); 1461 else 1462 he->filtered |= (parent->filtered & (1 << type)); 1463 } 1464 } 1465 1466 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he) 1467 { 1468 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD, 1469 check_thread_entry); 1470 1471 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO, 1472 perf_hpp__is_dso_entry); 1473 1474 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL, 1475 perf_hpp__is_sym_entry); 1476 1477 hists__apply_filters(he->hists, he); 1478 } 1479 1480 static struct hist_entry *hierarchy_insert_entry(struct hists *hists, 1481 struct rb_root_cached *root, 1482 struct hist_entry *he, 1483 struct hist_entry *parent_he, 1484 struct perf_hpp_list *hpp_list) 1485 { 1486 struct rb_node **p = &root->rb_root.rb_node; 1487 struct rb_node *parent = NULL; 1488 struct hist_entry *iter, *new; 1489 struct perf_hpp_fmt *fmt; 1490 int64_t cmp; 1491 bool leftmost = true; 1492 1493 while (*p != NULL) { 1494 parent = *p; 1495 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1496 1497 cmp = 0; 1498 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1499 cmp = fmt->collapse(fmt, iter, he); 1500 if (cmp) 1501 break; 1502 } 1503 1504 if (!cmp) { 1505 he_stat__add_stat(&iter->stat, &he->stat); 1506 return iter; 1507 } 1508 1509 if (cmp < 0) 1510 p = &parent->rb_left; 1511 else { 1512 p = &parent->rb_right; 1513 leftmost = false; 1514 } 1515 } 1516 1517 new = hist_entry__new(he, true); 1518 if (new == NULL) 1519 return NULL; 1520 1521 hists->nr_entries++; 1522 1523 /* save related format list for output */ 1524 new->hpp_list = hpp_list; 1525 new->parent_he = parent_he; 1526 1527 hist_entry__apply_hierarchy_filters(new); 1528 1529 /* some fields are now passed to 'new' */ 1530 perf_hpp_list__for_each_sort_list(hpp_list, fmt) { 1531 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 1532 he->trace_output = NULL; 1533 else 1534 new->trace_output = NULL; 1535 1536 if (perf_hpp__is_srcline_entry(fmt)) 1537 he->srcline = NULL; 1538 else 1539 new->srcline = NULL; 1540 1541 if (perf_hpp__is_srcfile_entry(fmt)) 1542 he->srcfile = NULL; 1543 else 1544 new->srcfile = NULL; 1545 } 1546 1547 rb_link_node(&new->rb_node_in, parent, p); 1548 rb_insert_color_cached(&new->rb_node_in, root, leftmost); 1549 return new; 1550 } 1551 1552 static int hists__hierarchy_insert_entry(struct hists *hists, 1553 struct rb_root_cached *root, 1554 struct hist_entry *he) 1555 { 1556 struct perf_hpp_list_node *node; 1557 struct hist_entry *new_he = NULL; 1558 struct hist_entry *parent = NULL; 1559 int depth = 0; 1560 int ret = 0; 1561 1562 list_for_each_entry(node, &hists->hpp_formats, list) { 1563 /* skip period (overhead) and elided columns */ 1564 if (node->level == 0 || node->skip) 1565 continue; 1566 1567 /* insert copy of 'he' for each fmt into the hierarchy */ 1568 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp); 1569 if (new_he == NULL) { 1570 ret = -1; 1571 break; 1572 } 1573 1574 root = &new_he->hroot_in; 1575 new_he->depth = depth++; 1576 parent = new_he; 1577 } 1578 1579 if (new_he) { 1580 new_he->leaf = true; 1581 1582 if (hist_entry__has_callchains(new_he) && 1583 symbol_conf.use_callchain) { 1584 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 1585 1586 if (cursor == NULL) 1587 return -1; 1588 1589 callchain_cursor_reset(cursor); 1590 if (callchain_merge(cursor, 1591 new_he->callchain, 1592 he->callchain) < 0) 1593 ret = -1; 1594 } 1595 } 1596 1597 /* 'he' is no longer used */ 1598 hist_entry__delete(he); 1599 1600 /* return 0 (or -1) since it already applied filters */ 1601 return ret; 1602 } 1603 1604 static int hists__collapse_insert_entry(struct hists *hists, 1605 struct rb_root_cached *root, 1606 struct hist_entry *he) 1607 { 1608 struct rb_node **p = &root->rb_root.rb_node; 1609 struct rb_node *parent = NULL; 1610 struct hist_entry *iter; 1611 int64_t cmp; 1612 bool leftmost = true; 1613 1614 if (symbol_conf.report_hierarchy) 1615 return hists__hierarchy_insert_entry(hists, root, he); 1616 1617 while (*p != NULL) { 1618 parent = *p; 1619 iter = rb_entry(parent, struct hist_entry, rb_node_in); 1620 1621 cmp = hist_entry__collapse(iter, he); 1622 1623 if (!cmp) { 1624 int ret = 0; 1625 1626 he_stat__add_stat(&iter->stat, &he->stat); 1627 if (symbol_conf.cumulate_callchain) 1628 he_stat__add_stat(iter->stat_acc, he->stat_acc); 1629 1630 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) { 1631 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 1632 1633 if (cursor != NULL) { 1634 callchain_cursor_reset(cursor); 1635 if (callchain_merge(cursor, iter->callchain, he->callchain) < 0) 1636 ret = -1; 1637 } else { 1638 ret = 0; 1639 } 1640 } 1641 hist_entry__delete(he); 1642 return ret; 1643 } 1644 1645 if (cmp < 0) 1646 p = &(*p)->rb_left; 1647 else { 1648 p = &(*p)->rb_right; 1649 leftmost = false; 1650 } 1651 } 1652 hists->nr_entries++; 1653 1654 rb_link_node(&he->rb_node_in, parent, p); 1655 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 1656 return 1; 1657 } 1658 1659 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists) 1660 { 1661 struct rb_root_cached *root; 1662 1663 mutex_lock(&hists->lock); 1664 1665 root = hists->entries_in; 1666 if (++hists->entries_in > &hists->entries_in_array[1]) 1667 hists->entries_in = &hists->entries_in_array[0]; 1668 1669 mutex_unlock(&hists->lock); 1670 1671 return root; 1672 } 1673 1674 static void hists__apply_filters(struct hists *hists, struct hist_entry *he) 1675 { 1676 hists__filter_entry_by_dso(hists, he); 1677 hists__filter_entry_by_thread(hists, he); 1678 hists__filter_entry_by_symbol(hists, he); 1679 hists__filter_entry_by_socket(hists, he); 1680 } 1681 1682 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog) 1683 { 1684 struct rb_root_cached *root; 1685 struct rb_node *next; 1686 struct hist_entry *n; 1687 int ret; 1688 1689 if (!hists__has(hists, need_collapse)) 1690 return 0; 1691 1692 hists->nr_entries = 0; 1693 1694 root = hists__get_rotate_entries_in(hists); 1695 1696 next = rb_first_cached(root); 1697 1698 while (next) { 1699 if (session_done()) 1700 break; 1701 n = rb_entry(next, struct hist_entry, rb_node_in); 1702 next = rb_next(&n->rb_node_in); 1703 1704 rb_erase_cached(&n->rb_node_in, root); 1705 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n); 1706 if (ret < 0) 1707 return -1; 1708 1709 if (ret) { 1710 /* 1711 * If it wasn't combined with one of the entries already 1712 * collapsed, we need to apply the filters that may have 1713 * been set by, say, the hist_browser. 1714 */ 1715 hists__apply_filters(hists, n); 1716 } 1717 if (prog) 1718 ui_progress__update(prog, 1); 1719 } 1720 return 0; 1721 } 1722 1723 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b) 1724 { 1725 struct hists *hists = a->hists; 1726 struct perf_hpp_fmt *fmt; 1727 int64_t cmp = 0; 1728 1729 hists__for_each_sort_list(hists, fmt) { 1730 if (perf_hpp__should_skip(fmt, a->hists)) 1731 continue; 1732 1733 cmp = fmt->sort(fmt, a, b); 1734 if (cmp) 1735 break; 1736 } 1737 1738 return cmp; 1739 } 1740 1741 static void hists__reset_filter_stats(struct hists *hists) 1742 { 1743 hists->nr_non_filtered_entries = 0; 1744 hists->stats.total_non_filtered_period = 0; 1745 } 1746 1747 void hists__reset_stats(struct hists *hists) 1748 { 1749 hists->nr_entries = 0; 1750 hists->stats.total_period = 0; 1751 1752 hists__reset_filter_stats(hists); 1753 } 1754 1755 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h) 1756 { 1757 hists->nr_non_filtered_entries++; 1758 hists->stats.total_non_filtered_period += h->stat.period; 1759 } 1760 1761 void hists__inc_stats(struct hists *hists, struct hist_entry *h) 1762 { 1763 if (!h->filtered) 1764 hists__inc_filter_stats(hists, h); 1765 1766 hists->nr_entries++; 1767 hists->stats.total_period += h->stat.period; 1768 } 1769 1770 static void hierarchy_recalc_total_periods(struct hists *hists) 1771 { 1772 struct rb_node *node; 1773 struct hist_entry *he; 1774 1775 node = rb_first_cached(&hists->entries); 1776 1777 hists->stats.total_period = 0; 1778 hists->stats.total_non_filtered_period = 0; 1779 1780 /* 1781 * recalculate total period using top-level entries only 1782 * since lower level entries only see non-filtered entries 1783 * but upper level entries have sum of both entries. 1784 */ 1785 while (node) { 1786 he = rb_entry(node, struct hist_entry, rb_node); 1787 node = rb_next(node); 1788 1789 hists->stats.total_period += he->stat.period; 1790 if (!he->filtered) 1791 hists->stats.total_non_filtered_period += he->stat.period; 1792 } 1793 } 1794 1795 static void hierarchy_insert_output_entry(struct rb_root_cached *root, 1796 struct hist_entry *he) 1797 { 1798 struct rb_node **p = &root->rb_root.rb_node; 1799 struct rb_node *parent = NULL; 1800 struct hist_entry *iter; 1801 struct perf_hpp_fmt *fmt; 1802 bool leftmost = true; 1803 1804 while (*p != NULL) { 1805 parent = *p; 1806 iter = rb_entry(parent, struct hist_entry, rb_node); 1807 1808 if (hist_entry__sort(he, iter) > 0) 1809 p = &parent->rb_left; 1810 else { 1811 p = &parent->rb_right; 1812 leftmost = false; 1813 } 1814 } 1815 1816 rb_link_node(&he->rb_node, parent, p); 1817 rb_insert_color_cached(&he->rb_node, root, leftmost); 1818 1819 /* update column width of dynamic entry */ 1820 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 1821 if (fmt->init) 1822 fmt->init(fmt, he); 1823 } 1824 } 1825 1826 static void hists__hierarchy_output_resort(struct hists *hists, 1827 struct ui_progress *prog, 1828 struct rb_root_cached *root_in, 1829 struct rb_root_cached *root_out, 1830 u64 min_callchain_hits, 1831 bool use_callchain) 1832 { 1833 struct rb_node *node; 1834 struct hist_entry *he; 1835 1836 *root_out = RB_ROOT_CACHED; 1837 node = rb_first_cached(root_in); 1838 1839 while (node) { 1840 he = rb_entry(node, struct hist_entry, rb_node_in); 1841 node = rb_next(node); 1842 1843 hierarchy_insert_output_entry(root_out, he); 1844 1845 if (prog) 1846 ui_progress__update(prog, 1); 1847 1848 hists->nr_entries++; 1849 if (!he->filtered) { 1850 hists->nr_non_filtered_entries++; 1851 hists__calc_col_len(hists, he); 1852 } 1853 1854 if (!he->leaf) { 1855 hists__hierarchy_output_resort(hists, prog, 1856 &he->hroot_in, 1857 &he->hroot_out, 1858 min_callchain_hits, 1859 use_callchain); 1860 continue; 1861 } 1862 1863 if (!use_callchain) 1864 continue; 1865 1866 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1867 u64 total = he->stat.period; 1868 1869 if (symbol_conf.cumulate_callchain) 1870 total = he->stat_acc->period; 1871 1872 min_callchain_hits = total * (callchain_param.min_percent / 100); 1873 } 1874 1875 callchain_param.sort(&he->sorted_chain, he->callchain, 1876 min_callchain_hits, &callchain_param); 1877 } 1878 } 1879 1880 static void __hists__insert_output_entry(struct rb_root_cached *entries, 1881 struct hist_entry *he, 1882 u64 min_callchain_hits, 1883 bool use_callchain) 1884 { 1885 struct rb_node **p = &entries->rb_root.rb_node; 1886 struct rb_node *parent = NULL; 1887 struct hist_entry *iter; 1888 struct perf_hpp_fmt *fmt; 1889 bool leftmost = true; 1890 1891 if (use_callchain) { 1892 if (callchain_param.mode == CHAIN_GRAPH_REL) { 1893 u64 total = he->stat.period; 1894 1895 if (symbol_conf.cumulate_callchain) 1896 total = he->stat_acc->period; 1897 1898 min_callchain_hits = total * (callchain_param.min_percent / 100); 1899 } 1900 callchain_param.sort(&he->sorted_chain, he->callchain, 1901 min_callchain_hits, &callchain_param); 1902 } 1903 1904 while (*p != NULL) { 1905 parent = *p; 1906 iter = rb_entry(parent, struct hist_entry, rb_node); 1907 1908 if (hist_entry__sort(he, iter) > 0) 1909 p = &(*p)->rb_left; 1910 else { 1911 p = &(*p)->rb_right; 1912 leftmost = false; 1913 } 1914 } 1915 1916 rb_link_node(&he->rb_node, parent, p); 1917 rb_insert_color_cached(&he->rb_node, entries, leftmost); 1918 1919 /* update column width of dynamic entries */ 1920 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) { 1921 if (fmt->init) 1922 fmt->init(fmt, he); 1923 } 1924 } 1925 1926 static void output_resort(struct hists *hists, struct ui_progress *prog, 1927 bool use_callchain, hists__resort_cb_t cb, 1928 void *cb_arg) 1929 { 1930 struct rb_root_cached *root; 1931 struct rb_node *next; 1932 struct hist_entry *n; 1933 u64 callchain_total; 1934 u64 min_callchain_hits; 1935 1936 callchain_total = hists->callchain_period; 1937 if (symbol_conf.filter_relative) 1938 callchain_total = hists->callchain_non_filtered_period; 1939 1940 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100); 1941 1942 hists__reset_stats(hists); 1943 hists__reset_col_len(hists); 1944 1945 if (symbol_conf.report_hierarchy) { 1946 hists__hierarchy_output_resort(hists, prog, 1947 &hists->entries_collapsed, 1948 &hists->entries, 1949 min_callchain_hits, 1950 use_callchain); 1951 hierarchy_recalc_total_periods(hists); 1952 return; 1953 } 1954 1955 if (hists__has(hists, need_collapse)) 1956 root = &hists->entries_collapsed; 1957 else 1958 root = hists->entries_in; 1959 1960 next = rb_first_cached(root); 1961 hists->entries = RB_ROOT_CACHED; 1962 1963 while (next) { 1964 n = rb_entry(next, struct hist_entry, rb_node_in); 1965 next = rb_next(&n->rb_node_in); 1966 1967 if (cb && cb(n, cb_arg)) 1968 continue; 1969 1970 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain); 1971 hists__inc_stats(hists, n); 1972 1973 if (!n->filtered) 1974 hists__calc_col_len(hists, n); 1975 1976 if (prog) 1977 ui_progress__update(prog, 1); 1978 } 1979 } 1980 1981 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog, 1982 hists__resort_cb_t cb, void *cb_arg) 1983 { 1984 bool use_callchain; 1985 1986 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph) 1987 use_callchain = evsel__has_callchain(evsel); 1988 else 1989 use_callchain = symbol_conf.use_callchain; 1990 1991 use_callchain |= symbol_conf.show_branchflag_count; 1992 1993 output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg); 1994 } 1995 1996 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog) 1997 { 1998 return evsel__output_resort_cb(evsel, prog, NULL, NULL); 1999 } 2000 2001 void hists__output_resort(struct hists *hists, struct ui_progress *prog) 2002 { 2003 output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL); 2004 } 2005 2006 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog, 2007 hists__resort_cb_t cb) 2008 { 2009 output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL); 2010 } 2011 2012 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd) 2013 { 2014 if (he->leaf || hmd == HMD_FORCE_SIBLING) 2015 return false; 2016 2017 if (he->unfolded || hmd == HMD_FORCE_CHILD) 2018 return true; 2019 2020 return false; 2021 } 2022 2023 struct rb_node *rb_hierarchy_last(struct rb_node *node) 2024 { 2025 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 2026 2027 while (can_goto_child(he, HMD_NORMAL)) { 2028 node = rb_last(&he->hroot_out.rb_root); 2029 he = rb_entry(node, struct hist_entry, rb_node); 2030 } 2031 return node; 2032 } 2033 2034 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd) 2035 { 2036 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 2037 2038 if (can_goto_child(he, hmd)) 2039 node = rb_first_cached(&he->hroot_out); 2040 else 2041 node = rb_next(node); 2042 2043 while (node == NULL) { 2044 he = he->parent_he; 2045 if (he == NULL) 2046 break; 2047 2048 node = rb_next(&he->rb_node); 2049 } 2050 return node; 2051 } 2052 2053 struct rb_node *rb_hierarchy_prev(struct rb_node *node) 2054 { 2055 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 2056 2057 node = rb_prev(node); 2058 if (node) 2059 return rb_hierarchy_last(node); 2060 2061 he = he->parent_he; 2062 if (he == NULL) 2063 return NULL; 2064 2065 return &he->rb_node; 2066 } 2067 2068 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit) 2069 { 2070 struct rb_node *node; 2071 struct hist_entry *child; 2072 float percent; 2073 2074 if (he->leaf) 2075 return false; 2076 2077 node = rb_first_cached(&he->hroot_out); 2078 child = rb_entry(node, struct hist_entry, rb_node); 2079 2080 while (node && child->filtered) { 2081 node = rb_next(node); 2082 child = rb_entry(node, struct hist_entry, rb_node); 2083 } 2084 2085 if (node) 2086 percent = hist_entry__get_percent_limit(child); 2087 else 2088 percent = 0; 2089 2090 return node && percent >= limit; 2091 } 2092 2093 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, 2094 enum hist_filter filter) 2095 { 2096 h->filtered &= ~(1 << filter); 2097 2098 if (symbol_conf.report_hierarchy) { 2099 struct hist_entry *parent = h->parent_he; 2100 2101 while (parent) { 2102 he_stat__add_stat(&parent->stat, &h->stat); 2103 2104 parent->filtered &= ~(1 << filter); 2105 2106 if (parent->filtered) 2107 goto next; 2108 2109 /* force fold unfiltered entry for simplicity */ 2110 parent->unfolded = false; 2111 parent->has_no_entry = false; 2112 parent->row_offset = 0; 2113 parent->nr_rows = 0; 2114 next: 2115 parent = parent->parent_he; 2116 } 2117 } 2118 2119 if (h->filtered) 2120 return; 2121 2122 /* force fold unfiltered entry for simplicity */ 2123 h->unfolded = false; 2124 h->has_no_entry = false; 2125 h->row_offset = 0; 2126 h->nr_rows = 0; 2127 2128 hists->stats.nr_non_filtered_samples += h->stat.nr_events; 2129 2130 hists__inc_filter_stats(hists, h); 2131 hists__calc_col_len(hists, h); 2132 } 2133 2134 2135 static bool hists__filter_entry_by_dso(struct hists *hists, 2136 struct hist_entry *he) 2137 { 2138 if (hists->dso_filter != NULL && 2139 (he->ms.map == NULL || map__dso(he->ms.map) != hists->dso_filter)) { 2140 he->filtered |= (1 << HIST_FILTER__DSO); 2141 return true; 2142 } 2143 2144 return false; 2145 } 2146 2147 static bool hists__filter_entry_by_thread(struct hists *hists, 2148 struct hist_entry *he) 2149 { 2150 if (hists->thread_filter != NULL && 2151 !RC_CHK_EQUAL(he->thread, hists->thread_filter)) { 2152 he->filtered |= (1 << HIST_FILTER__THREAD); 2153 return true; 2154 } 2155 2156 return false; 2157 } 2158 2159 static bool hists__filter_entry_by_symbol(struct hists *hists, 2160 struct hist_entry *he) 2161 { 2162 if (hists->symbol_filter_str != NULL && 2163 (!he->ms.sym || strstr(he->ms.sym->name, 2164 hists->symbol_filter_str) == NULL)) { 2165 he->filtered |= (1 << HIST_FILTER__SYMBOL); 2166 return true; 2167 } 2168 2169 return false; 2170 } 2171 2172 static bool hists__filter_entry_by_socket(struct hists *hists, 2173 struct hist_entry *he) 2174 { 2175 if ((hists->socket_filter > -1) && 2176 (he->socket != hists->socket_filter)) { 2177 he->filtered |= (1 << HIST_FILTER__SOCKET); 2178 return true; 2179 } 2180 2181 return false; 2182 } 2183 2184 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he); 2185 2186 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter) 2187 { 2188 struct rb_node *nd; 2189 2190 hists->stats.nr_non_filtered_samples = 0; 2191 2192 hists__reset_filter_stats(hists); 2193 hists__reset_col_len(hists); 2194 2195 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) { 2196 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2197 2198 if (filter(hists, h)) 2199 continue; 2200 2201 hists__remove_entry_filter(hists, h, type); 2202 } 2203 } 2204 2205 static void resort_filtered_entry(struct rb_root_cached *root, 2206 struct hist_entry *he) 2207 { 2208 struct rb_node **p = &root->rb_root.rb_node; 2209 struct rb_node *parent = NULL; 2210 struct hist_entry *iter; 2211 struct rb_root_cached new_root = RB_ROOT_CACHED; 2212 struct rb_node *nd; 2213 bool leftmost = true; 2214 2215 while (*p != NULL) { 2216 parent = *p; 2217 iter = rb_entry(parent, struct hist_entry, rb_node); 2218 2219 if (hist_entry__sort(he, iter) > 0) 2220 p = &(*p)->rb_left; 2221 else { 2222 p = &(*p)->rb_right; 2223 leftmost = false; 2224 } 2225 } 2226 2227 rb_link_node(&he->rb_node, parent, p); 2228 rb_insert_color_cached(&he->rb_node, root, leftmost); 2229 2230 if (he->leaf || he->filtered) 2231 return; 2232 2233 nd = rb_first_cached(&he->hroot_out); 2234 while (nd) { 2235 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2236 2237 nd = rb_next(nd); 2238 rb_erase_cached(&h->rb_node, &he->hroot_out); 2239 2240 resort_filtered_entry(&new_root, h); 2241 } 2242 2243 he->hroot_out = new_root; 2244 } 2245 2246 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg) 2247 { 2248 struct rb_node *nd; 2249 struct rb_root_cached new_root = RB_ROOT_CACHED; 2250 2251 hists->stats.nr_non_filtered_samples = 0; 2252 2253 hists__reset_filter_stats(hists); 2254 hists__reset_col_len(hists); 2255 2256 nd = rb_first_cached(&hists->entries); 2257 while (nd) { 2258 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2259 int ret; 2260 2261 ret = hist_entry__filter(h, type, arg); 2262 2263 /* 2264 * case 1. non-matching type 2265 * zero out the period, set filter marker and move to child 2266 */ 2267 if (ret < 0) { 2268 memset(&h->stat, 0, sizeof(h->stat)); 2269 h->filtered |= (1 << type); 2270 2271 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD); 2272 } 2273 /* 2274 * case 2. matched type (filter out) 2275 * set filter marker and move to next 2276 */ 2277 else if (ret == 1) { 2278 h->filtered |= (1 << type); 2279 2280 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2281 } 2282 /* 2283 * case 3. ok (not filtered) 2284 * add period to hists and parents, erase the filter marker 2285 * and move to next sibling 2286 */ 2287 else { 2288 hists__remove_entry_filter(hists, h, type); 2289 2290 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING); 2291 } 2292 } 2293 2294 hierarchy_recalc_total_periods(hists); 2295 2296 /* 2297 * resort output after applying a new filter since filter in a lower 2298 * hierarchy can change periods in a upper hierarchy. 2299 */ 2300 nd = rb_first_cached(&hists->entries); 2301 while (nd) { 2302 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2303 2304 nd = rb_next(nd); 2305 rb_erase_cached(&h->rb_node, &hists->entries); 2306 2307 resort_filtered_entry(&new_root, h); 2308 } 2309 2310 hists->entries = new_root; 2311 } 2312 2313 void hists__filter_by_thread(struct hists *hists) 2314 { 2315 if (symbol_conf.report_hierarchy) 2316 hists__filter_hierarchy(hists, HIST_FILTER__THREAD, 2317 hists->thread_filter); 2318 else 2319 hists__filter_by_type(hists, HIST_FILTER__THREAD, 2320 hists__filter_entry_by_thread); 2321 } 2322 2323 void hists__filter_by_dso(struct hists *hists) 2324 { 2325 if (symbol_conf.report_hierarchy) 2326 hists__filter_hierarchy(hists, HIST_FILTER__DSO, 2327 hists->dso_filter); 2328 else 2329 hists__filter_by_type(hists, HIST_FILTER__DSO, 2330 hists__filter_entry_by_dso); 2331 } 2332 2333 void hists__filter_by_symbol(struct hists *hists) 2334 { 2335 if (symbol_conf.report_hierarchy) 2336 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL, 2337 hists->symbol_filter_str); 2338 else 2339 hists__filter_by_type(hists, HIST_FILTER__SYMBOL, 2340 hists__filter_entry_by_symbol); 2341 } 2342 2343 void hists__filter_by_socket(struct hists *hists) 2344 { 2345 if (symbol_conf.report_hierarchy) 2346 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET, 2347 &hists->socket_filter); 2348 else 2349 hists__filter_by_type(hists, HIST_FILTER__SOCKET, 2350 hists__filter_entry_by_socket); 2351 } 2352 2353 void events_stats__inc(struct events_stats *stats, u32 type) 2354 { 2355 ++stats->nr_events[0]; 2356 ++stats->nr_events[type]; 2357 } 2358 2359 static void hists_stats__inc(struct hists_stats *stats) 2360 { 2361 ++stats->nr_samples; 2362 } 2363 2364 void hists__inc_nr_events(struct hists *hists) 2365 { 2366 hists_stats__inc(&hists->stats); 2367 } 2368 2369 void hists__inc_nr_samples(struct hists *hists, bool filtered) 2370 { 2371 hists_stats__inc(&hists->stats); 2372 if (!filtered) 2373 hists->stats.nr_non_filtered_samples++; 2374 } 2375 2376 void hists__inc_nr_lost_samples(struct hists *hists, u32 lost) 2377 { 2378 hists->stats.nr_lost_samples += lost; 2379 } 2380 2381 static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 2382 struct hist_entry *pair) 2383 { 2384 struct rb_root_cached *root; 2385 struct rb_node **p; 2386 struct rb_node *parent = NULL; 2387 struct hist_entry *he; 2388 int64_t cmp; 2389 bool leftmost = true; 2390 2391 if (hists__has(hists, need_collapse)) 2392 root = &hists->entries_collapsed; 2393 else 2394 root = hists->entries_in; 2395 2396 p = &root->rb_root.rb_node; 2397 2398 while (*p != NULL) { 2399 parent = *p; 2400 he = rb_entry(parent, struct hist_entry, rb_node_in); 2401 2402 cmp = hist_entry__collapse(he, pair); 2403 2404 if (!cmp) 2405 goto out; 2406 2407 if (cmp < 0) 2408 p = &(*p)->rb_left; 2409 else { 2410 p = &(*p)->rb_right; 2411 leftmost = false; 2412 } 2413 } 2414 2415 he = hist_entry__new(pair, true); 2416 if (he) { 2417 memset(&he->stat, 0, sizeof(he->stat)); 2418 he->hists = hists; 2419 if (symbol_conf.cumulate_callchain) 2420 memset(he->stat_acc, 0, sizeof(he->stat)); 2421 rb_link_node(&he->rb_node_in, parent, p); 2422 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2423 hists__inc_stats(hists, he); 2424 he->dummy = true; 2425 } 2426 out: 2427 return he; 2428 } 2429 2430 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists, 2431 struct rb_root_cached *root, 2432 struct hist_entry *pair) 2433 { 2434 struct rb_node **p; 2435 struct rb_node *parent = NULL; 2436 struct hist_entry *he; 2437 struct perf_hpp_fmt *fmt; 2438 bool leftmost = true; 2439 2440 p = &root->rb_root.rb_node; 2441 while (*p != NULL) { 2442 int64_t cmp = 0; 2443 2444 parent = *p; 2445 he = rb_entry(parent, struct hist_entry, rb_node_in); 2446 2447 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2448 cmp = fmt->collapse(fmt, he, pair); 2449 if (cmp) 2450 break; 2451 } 2452 if (!cmp) 2453 goto out; 2454 2455 if (cmp < 0) 2456 p = &parent->rb_left; 2457 else { 2458 p = &parent->rb_right; 2459 leftmost = false; 2460 } 2461 } 2462 2463 he = hist_entry__new(pair, true); 2464 if (he) { 2465 rb_link_node(&he->rb_node_in, parent, p); 2466 rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2467 2468 he->dummy = true; 2469 he->hists = hists; 2470 memset(&he->stat, 0, sizeof(he->stat)); 2471 hists__inc_stats(hists, he); 2472 } 2473 out: 2474 return he; 2475 } 2476 2477 static struct hist_entry *hists__find_entry(struct hists *hists, 2478 struct hist_entry *he) 2479 { 2480 struct rb_node *n; 2481 2482 if (hists__has(hists, need_collapse)) 2483 n = hists->entries_collapsed.rb_root.rb_node; 2484 else 2485 n = hists->entries_in->rb_root.rb_node; 2486 2487 while (n) { 2488 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); 2489 int64_t cmp = hist_entry__collapse(iter, he); 2490 2491 if (cmp < 0) 2492 n = n->rb_left; 2493 else if (cmp > 0) 2494 n = n->rb_right; 2495 else 2496 return iter; 2497 } 2498 2499 return NULL; 2500 } 2501 2502 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root, 2503 struct hist_entry *he) 2504 { 2505 struct rb_node *n = root->rb_root.rb_node; 2506 2507 while (n) { 2508 struct hist_entry *iter; 2509 struct perf_hpp_fmt *fmt; 2510 int64_t cmp = 0; 2511 2512 iter = rb_entry(n, struct hist_entry, rb_node_in); 2513 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { 2514 cmp = fmt->collapse(fmt, iter, he); 2515 if (cmp) 2516 break; 2517 } 2518 2519 if (cmp < 0) 2520 n = n->rb_left; 2521 else if (cmp > 0) 2522 n = n->rb_right; 2523 else 2524 return iter; 2525 } 2526 2527 return NULL; 2528 } 2529 2530 static void hists__match_hierarchy(struct rb_root_cached *leader_root, 2531 struct rb_root_cached *other_root) 2532 { 2533 struct rb_node *nd; 2534 struct hist_entry *pos, *pair; 2535 2536 for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) { 2537 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2538 pair = hists__find_hierarchy_entry(other_root, pos); 2539 2540 if (pair) { 2541 hist_entry__add_pair(pair, pos); 2542 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in); 2543 } 2544 } 2545 } 2546 2547 /* 2548 * Look for pairs to link to the leader buckets (hist_entries): 2549 */ 2550 void hists__match(struct hists *leader, struct hists *other) 2551 { 2552 struct rb_root_cached *root; 2553 struct rb_node *nd; 2554 struct hist_entry *pos, *pair; 2555 2556 if (symbol_conf.report_hierarchy) { 2557 /* hierarchy report always collapses entries */ 2558 return hists__match_hierarchy(&leader->entries_collapsed, 2559 &other->entries_collapsed); 2560 } 2561 2562 if (hists__has(leader, need_collapse)) 2563 root = &leader->entries_collapsed; 2564 else 2565 root = leader->entries_in; 2566 2567 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2568 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2569 pair = hists__find_entry(other, pos); 2570 2571 if (pair) 2572 hist_entry__add_pair(pair, pos); 2573 } 2574 } 2575 2576 static int hists__link_hierarchy(struct hists *leader_hists, 2577 struct hist_entry *parent, 2578 struct rb_root_cached *leader_root, 2579 struct rb_root_cached *other_root) 2580 { 2581 struct rb_node *nd; 2582 struct hist_entry *pos, *leader; 2583 2584 for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) { 2585 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2586 2587 if (hist_entry__has_pairs(pos)) { 2588 bool found = false; 2589 2590 list_for_each_entry(leader, &pos->pairs.head, pairs.node) { 2591 if (leader->hists == leader_hists) { 2592 found = true; 2593 break; 2594 } 2595 } 2596 if (!found) 2597 return -1; 2598 } else { 2599 leader = add_dummy_hierarchy_entry(leader_hists, 2600 leader_root, pos); 2601 if (leader == NULL) 2602 return -1; 2603 2604 /* do not point parent in the pos */ 2605 leader->parent_he = parent; 2606 2607 hist_entry__add_pair(pos, leader); 2608 } 2609 2610 if (!pos->leaf) { 2611 if (hists__link_hierarchy(leader_hists, leader, 2612 &leader->hroot_in, 2613 &pos->hroot_in) < 0) 2614 return -1; 2615 } 2616 } 2617 return 0; 2618 } 2619 2620 /* 2621 * Look for entries in the other hists that are not present in the leader, if 2622 * we find them, just add a dummy entry on the leader hists, with period=0, 2623 * nr_events=0, to serve as the list header. 2624 */ 2625 int hists__link(struct hists *leader, struct hists *other) 2626 { 2627 struct rb_root_cached *root; 2628 struct rb_node *nd; 2629 struct hist_entry *pos, *pair; 2630 2631 if (symbol_conf.report_hierarchy) { 2632 /* hierarchy report always collapses entries */ 2633 return hists__link_hierarchy(leader, NULL, 2634 &leader->entries_collapsed, 2635 &other->entries_collapsed); 2636 } 2637 2638 if (hists__has(other, need_collapse)) 2639 root = &other->entries_collapsed; 2640 else 2641 root = other->entries_in; 2642 2643 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2644 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2645 2646 if (!hist_entry__has_pairs(pos)) { 2647 pair = hists__add_dummy_entry(leader, pos); 2648 if (pair == NULL) 2649 return -1; 2650 hist_entry__add_pair(pos, pair); 2651 } 2652 } 2653 2654 return 0; 2655 } 2656 2657 int hists__unlink(struct hists *hists) 2658 { 2659 struct rb_root_cached *root; 2660 struct rb_node *nd; 2661 struct hist_entry *pos; 2662 2663 if (hists__has(hists, need_collapse)) 2664 root = &hists->entries_collapsed; 2665 else 2666 root = hists->entries_in; 2667 2668 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2669 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2670 list_del_init(&pos->pairs.node); 2671 } 2672 2673 return 0; 2674 } 2675 2676 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, 2677 struct perf_sample *sample, bool nonany_branch_mode, 2678 u64 *total_cycles) 2679 { 2680 struct branch_info *bi; 2681 struct branch_entry *entries = perf_sample__branch_entries(sample); 2682 2683 /* If we have branch cycles always annotate them. */ 2684 if (bs && bs->nr && entries[0].flags.cycles) { 2685 bi = sample__resolve_bstack(sample, al); 2686 if (bi) { 2687 struct addr_map_symbol *prev = NULL; 2688 2689 /* 2690 * Ignore errors, still want to process the 2691 * other entries. 2692 * 2693 * For non standard branch modes always 2694 * force no IPC (prev == NULL) 2695 * 2696 * Note that perf stores branches reversed from 2697 * program order! 2698 */ 2699 for (int i = bs->nr - 1; i >= 0; i--) { 2700 addr_map_symbol__account_cycles(&bi[i].from, 2701 nonany_branch_mode ? NULL : prev, 2702 bi[i].flags.cycles); 2703 prev = &bi[i].to; 2704 2705 if (total_cycles) 2706 *total_cycles += bi[i].flags.cycles; 2707 } 2708 for (unsigned int i = 0; i < bs->nr; i++) { 2709 map_symbol__exit(&bi[i].to.ms); 2710 map_symbol__exit(&bi[i].from.ms); 2711 } 2712 free(bi); 2713 } 2714 } 2715 } 2716 2717 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp, 2718 bool skip_empty) 2719 { 2720 struct evsel *pos; 2721 size_t ret = 0; 2722 2723 evlist__for_each_entry(evlist, pos) { 2724 struct hists *hists = evsel__hists(pos); 2725 2726 if (skip_empty && !hists->stats.nr_samples && !hists->stats.nr_lost_samples) 2727 continue; 2728 2729 ret += fprintf(fp, "%s stats:\n", evsel__name(pos)); 2730 if (hists->stats.nr_samples) 2731 ret += fprintf(fp, "%16s events: %10d\n", 2732 "SAMPLE", hists->stats.nr_samples); 2733 if (hists->stats.nr_lost_samples) 2734 ret += fprintf(fp, "%16s events: %10d\n", 2735 "LOST_SAMPLES", hists->stats.nr_lost_samples); 2736 } 2737 2738 return ret; 2739 } 2740 2741 2742 u64 hists__total_period(struct hists *hists) 2743 { 2744 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period : 2745 hists->stats.total_period; 2746 } 2747 2748 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq) 2749 { 2750 char unit; 2751 int printed; 2752 const struct dso *dso = hists->dso_filter; 2753 struct thread *thread = hists->thread_filter; 2754 int socket_id = hists->socket_filter; 2755 unsigned long nr_samples = hists->stats.nr_samples; 2756 u64 nr_events = hists->stats.total_period; 2757 struct evsel *evsel = hists_to_evsel(hists); 2758 const char *ev_name = evsel__name(evsel); 2759 char buf[512], sample_freq_str[64] = ""; 2760 size_t buflen = sizeof(buf); 2761 char ref[30] = " show reference callgraph, "; 2762 bool enable_ref = false; 2763 2764 if (symbol_conf.filter_relative) { 2765 nr_samples = hists->stats.nr_non_filtered_samples; 2766 nr_events = hists->stats.total_non_filtered_period; 2767 } 2768 2769 if (evsel__is_group_event(evsel)) { 2770 struct evsel *pos; 2771 2772 evsel__group_desc(evsel, buf, buflen); 2773 ev_name = buf; 2774 2775 for_each_group_member(pos, evsel) { 2776 struct hists *pos_hists = evsel__hists(pos); 2777 2778 if (symbol_conf.filter_relative) { 2779 nr_samples += pos_hists->stats.nr_non_filtered_samples; 2780 nr_events += pos_hists->stats.total_non_filtered_period; 2781 } else { 2782 nr_samples += pos_hists->stats.nr_samples; 2783 nr_events += pos_hists->stats.total_period; 2784 } 2785 } 2786 } 2787 2788 if (symbol_conf.show_ref_callgraph && 2789 strstr(ev_name, "call-graph=no")) 2790 enable_ref = true; 2791 2792 if (show_freq) 2793 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq); 2794 2795 nr_samples = convert_unit(nr_samples, &unit); 2796 printed = scnprintf(bf, size, 2797 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64, 2798 nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "", 2799 ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events); 2800 2801 2802 if (hists->uid_filter_str) 2803 printed += snprintf(bf + printed, size - printed, 2804 ", UID: %s", hists->uid_filter_str); 2805 if (thread) { 2806 if (hists__has(hists, thread)) { 2807 printed += scnprintf(bf + printed, size - printed, 2808 ", Thread: %s(%d)", 2809 (thread__comm_set(thread) ? thread__comm_str(thread) : ""), 2810 thread__tid(thread)); 2811 } else { 2812 printed += scnprintf(bf + printed, size - printed, 2813 ", Thread: %s", 2814 (thread__comm_set(thread) ? thread__comm_str(thread) : "")); 2815 } 2816 } 2817 if (dso) 2818 printed += scnprintf(bf + printed, size - printed, 2819 ", DSO: %s", dso->short_name); 2820 if (socket_id > -1) 2821 printed += scnprintf(bf + printed, size - printed, 2822 ", Processor Socket: %d", socket_id); 2823 2824 return printed; 2825 } 2826 2827 int parse_filter_percentage(const struct option *opt __maybe_unused, 2828 const char *arg, int unset __maybe_unused) 2829 { 2830 if (!strcmp(arg, "relative")) 2831 symbol_conf.filter_relative = true; 2832 else if (!strcmp(arg, "absolute")) 2833 symbol_conf.filter_relative = false; 2834 else { 2835 pr_debug("Invalid percentage: %s\n", arg); 2836 return -1; 2837 } 2838 2839 return 0; 2840 } 2841 2842 int perf_hist_config(const char *var, const char *value) 2843 { 2844 if (!strcmp(var, "hist.percentage")) 2845 return parse_filter_percentage(NULL, value, 0); 2846 2847 return 0; 2848 } 2849 2850 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list) 2851 { 2852 memset(hists, 0, sizeof(*hists)); 2853 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED; 2854 hists->entries_in = &hists->entries_in_array[0]; 2855 hists->entries_collapsed = RB_ROOT_CACHED; 2856 hists->entries = RB_ROOT_CACHED; 2857 mutex_init(&hists->lock); 2858 hists->socket_filter = -1; 2859 hists->hpp_list = hpp_list; 2860 INIT_LIST_HEAD(&hists->hpp_formats); 2861 return 0; 2862 } 2863 2864 static void hists__delete_remaining_entries(struct rb_root_cached *root) 2865 { 2866 struct rb_node *node; 2867 struct hist_entry *he; 2868 2869 while (!RB_EMPTY_ROOT(&root->rb_root)) { 2870 node = rb_first_cached(root); 2871 rb_erase_cached(node, root); 2872 2873 he = rb_entry(node, struct hist_entry, rb_node_in); 2874 hist_entry__delete(he); 2875 } 2876 } 2877 2878 static void hists__delete_all_entries(struct hists *hists) 2879 { 2880 hists__delete_entries(hists); 2881 hists__delete_remaining_entries(&hists->entries_in_array[0]); 2882 hists__delete_remaining_entries(&hists->entries_in_array[1]); 2883 hists__delete_remaining_entries(&hists->entries_collapsed); 2884 } 2885 2886 static void hists_evsel__exit(struct evsel *evsel) 2887 { 2888 struct hists *hists = evsel__hists(evsel); 2889 struct perf_hpp_fmt *fmt, *pos; 2890 struct perf_hpp_list_node *node, *tmp; 2891 2892 hists__delete_all_entries(hists); 2893 2894 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) { 2895 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) { 2896 list_del_init(&fmt->list); 2897 free(fmt); 2898 } 2899 list_del_init(&node->list); 2900 free(node); 2901 } 2902 } 2903 2904 static int hists_evsel__init(struct evsel *evsel) 2905 { 2906 struct hists *hists = evsel__hists(evsel); 2907 2908 __hists__init(hists, &perf_hpp_list); 2909 return 0; 2910 } 2911 2912 /* 2913 * XXX We probably need a hists_evsel__exit() to free the hist_entries 2914 * stored in the rbtree... 2915 */ 2916 2917 int hists__init(void) 2918 { 2919 int err = evsel__object_config(sizeof(struct hists_evsel), 2920 hists_evsel__init, hists_evsel__exit); 2921 if (err) 2922 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr); 2923 2924 return err; 2925 } 2926 2927 void perf_hpp_list__init(struct perf_hpp_list *list) 2928 { 2929 INIT_LIST_HEAD(&list->fields); 2930 INIT_LIST_HEAD(&list->sorts); 2931 } 2932