1 /* 2 * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com> 3 * 4 * Handle the callchains from the stream in an ad-hoc radix tree and then 5 * sort them in an rbtree. 6 * 7 * Using a radix for code path provides a fast retrieval and factorizes 8 * memory use. Also that lets us use the paths in a hierarchical graph view. 9 * 10 */ 11 12 #include <stdlib.h> 13 #include <stdio.h> 14 #include <stdbool.h> 15 #include <errno.h> 16 #include <math.h> 17 18 #include "asm/bug.h" 19 20 #include "hist.h" 21 #include "util.h" 22 #include "sort.h" 23 #include "machine.h" 24 #include "callchain.h" 25 26 __thread struct callchain_cursor callchain_cursor; 27 28 int parse_callchain_record_opt(const char *arg, struct callchain_param *param) 29 { 30 return parse_callchain_record(arg, param); 31 } 32 33 static int parse_callchain_mode(const char *value) 34 { 35 if (!strncmp(value, "graph", strlen(value))) { 36 callchain_param.mode = CHAIN_GRAPH_ABS; 37 return 0; 38 } 39 if (!strncmp(value, "flat", strlen(value))) { 40 callchain_param.mode = CHAIN_FLAT; 41 return 0; 42 } 43 if (!strncmp(value, "fractal", strlen(value))) { 44 callchain_param.mode = CHAIN_GRAPH_REL; 45 return 0; 46 } 47 if (!strncmp(value, "folded", strlen(value))) { 48 callchain_param.mode = CHAIN_FOLDED; 49 return 0; 50 } 51 return -1; 52 } 53 54 static int parse_callchain_order(const char *value) 55 { 56 if (!strncmp(value, "caller", strlen(value))) { 57 callchain_param.order = ORDER_CALLER; 58 callchain_param.order_set = true; 59 return 0; 60 } 61 if (!strncmp(value, "callee", strlen(value))) { 62 callchain_param.order = ORDER_CALLEE; 63 callchain_param.order_set = true; 64 return 0; 65 } 66 return -1; 67 } 68 69 static int parse_callchain_sort_key(const char *value) 70 { 71 if (!strncmp(value, "function", strlen(value))) { 72 callchain_param.key = CCKEY_FUNCTION; 73 return 0; 74 } 75 if (!strncmp(value, "address", strlen(value))) { 76 callchain_param.key = CCKEY_ADDRESS; 77 return 0; 78 } 79 if (!strncmp(value, "branch", strlen(value))) { 80 callchain_param.branch_callstack = 1; 81 return 0; 82 } 83 return -1; 84 } 85 86 static int parse_callchain_value(const char *value) 87 { 88 if (!strncmp(value, "percent", strlen(value))) { 89 callchain_param.value = CCVAL_PERCENT; 90 return 0; 91 } 92 if (!strncmp(value, "period", strlen(value))) { 93 callchain_param.value = CCVAL_PERIOD; 94 return 0; 95 } 96 if (!strncmp(value, "count", strlen(value))) { 97 callchain_param.value = CCVAL_COUNT; 98 return 0; 99 } 100 return -1; 101 } 102 103 static int 104 __parse_callchain_report_opt(const char *arg, bool allow_record_opt) 105 { 106 char *tok; 107 char *endptr; 108 bool minpcnt_set = false; 109 bool record_opt_set = false; 110 bool try_stack_size = false; 111 112 callchain_param.enabled = true; 113 symbol_conf.use_callchain = true; 114 115 if (!arg) 116 return 0; 117 118 while ((tok = strtok((char *)arg, ",")) != NULL) { 119 if (!strncmp(tok, "none", strlen(tok))) { 120 callchain_param.mode = CHAIN_NONE; 121 callchain_param.enabled = false; 122 symbol_conf.use_callchain = false; 123 return 0; 124 } 125 126 if (!parse_callchain_mode(tok) || 127 !parse_callchain_order(tok) || 128 !parse_callchain_sort_key(tok) || 129 !parse_callchain_value(tok)) { 130 /* parsing ok - move on to the next */ 131 try_stack_size = false; 132 goto next; 133 } else if (allow_record_opt && !record_opt_set) { 134 if (parse_callchain_record(tok, &callchain_param)) 135 goto try_numbers; 136 137 /* assume that number followed by 'dwarf' is stack size */ 138 if (callchain_param.record_mode == CALLCHAIN_DWARF) 139 try_stack_size = true; 140 141 record_opt_set = true; 142 goto next; 143 } 144 145 try_numbers: 146 if (try_stack_size) { 147 unsigned long size = 0; 148 149 if (get_stack_size(tok, &size) < 0) 150 return -1; 151 callchain_param.dump_size = size; 152 try_stack_size = false; 153 } else if (!minpcnt_set) { 154 /* try to get the min percent */ 155 callchain_param.min_percent = strtod(tok, &endptr); 156 if (tok == endptr) 157 return -1; 158 minpcnt_set = true; 159 } else { 160 /* try print limit at last */ 161 callchain_param.print_limit = strtoul(tok, &endptr, 0); 162 if (tok == endptr) 163 return -1; 164 } 165 next: 166 arg = NULL; 167 } 168 169 if (callchain_register_param(&callchain_param) < 0) { 170 pr_err("Can't register callchain params\n"); 171 return -1; 172 } 173 return 0; 174 } 175 176 int parse_callchain_report_opt(const char *arg) 177 { 178 return __parse_callchain_report_opt(arg, false); 179 } 180 181 int parse_callchain_top_opt(const char *arg) 182 { 183 return __parse_callchain_report_opt(arg, true); 184 } 185 186 int perf_callchain_config(const char *var, const char *value) 187 { 188 char *endptr; 189 190 if (prefixcmp(var, "call-graph.")) 191 return 0; 192 var += sizeof("call-graph.") - 1; 193 194 if (!strcmp(var, "record-mode")) 195 return parse_callchain_record_opt(value, &callchain_param); 196 if (!strcmp(var, "dump-size")) { 197 unsigned long size = 0; 198 int ret; 199 200 ret = get_stack_size(value, &size); 201 callchain_param.dump_size = size; 202 203 return ret; 204 } 205 if (!strcmp(var, "print-type")) 206 return parse_callchain_mode(value); 207 if (!strcmp(var, "order")) 208 return parse_callchain_order(value); 209 if (!strcmp(var, "sort-key")) 210 return parse_callchain_sort_key(value); 211 if (!strcmp(var, "threshold")) { 212 callchain_param.min_percent = strtod(value, &endptr); 213 if (value == endptr) 214 return -1; 215 } 216 if (!strcmp(var, "print-limit")) { 217 callchain_param.print_limit = strtod(value, &endptr); 218 if (value == endptr) 219 return -1; 220 } 221 222 return 0; 223 } 224 225 static void 226 rb_insert_callchain(struct rb_root *root, struct callchain_node *chain, 227 enum chain_mode mode) 228 { 229 struct rb_node **p = &root->rb_node; 230 struct rb_node *parent = NULL; 231 struct callchain_node *rnode; 232 u64 chain_cumul = callchain_cumul_hits(chain); 233 234 while (*p) { 235 u64 rnode_cumul; 236 237 parent = *p; 238 rnode = rb_entry(parent, struct callchain_node, rb_node); 239 rnode_cumul = callchain_cumul_hits(rnode); 240 241 switch (mode) { 242 case CHAIN_FLAT: 243 case CHAIN_FOLDED: 244 if (rnode->hit < chain->hit) 245 p = &(*p)->rb_left; 246 else 247 p = &(*p)->rb_right; 248 break; 249 case CHAIN_GRAPH_ABS: /* Falldown */ 250 case CHAIN_GRAPH_REL: 251 if (rnode_cumul < chain_cumul) 252 p = &(*p)->rb_left; 253 else 254 p = &(*p)->rb_right; 255 break; 256 case CHAIN_NONE: 257 default: 258 break; 259 } 260 } 261 262 rb_link_node(&chain->rb_node, parent, p); 263 rb_insert_color(&chain->rb_node, root); 264 } 265 266 static void 267 __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node, 268 u64 min_hit) 269 { 270 struct rb_node *n; 271 struct callchain_node *child; 272 273 n = rb_first(&node->rb_root_in); 274 while (n) { 275 child = rb_entry(n, struct callchain_node, rb_node_in); 276 n = rb_next(n); 277 278 __sort_chain_flat(rb_root, child, min_hit); 279 } 280 281 if (node->hit && node->hit >= min_hit) 282 rb_insert_callchain(rb_root, node, CHAIN_FLAT); 283 } 284 285 /* 286 * Once we get every callchains from the stream, we can now 287 * sort them by hit 288 */ 289 static void 290 sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root, 291 u64 min_hit, struct callchain_param *param __maybe_unused) 292 { 293 *rb_root = RB_ROOT; 294 __sort_chain_flat(rb_root, &root->node, min_hit); 295 } 296 297 static void __sort_chain_graph_abs(struct callchain_node *node, 298 u64 min_hit) 299 { 300 struct rb_node *n; 301 struct callchain_node *child; 302 303 node->rb_root = RB_ROOT; 304 n = rb_first(&node->rb_root_in); 305 306 while (n) { 307 child = rb_entry(n, struct callchain_node, rb_node_in); 308 n = rb_next(n); 309 310 __sort_chain_graph_abs(child, min_hit); 311 if (callchain_cumul_hits(child) >= min_hit) 312 rb_insert_callchain(&node->rb_root, child, 313 CHAIN_GRAPH_ABS); 314 } 315 } 316 317 static void 318 sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root, 319 u64 min_hit, struct callchain_param *param __maybe_unused) 320 { 321 __sort_chain_graph_abs(&chain_root->node, min_hit); 322 rb_root->rb_node = chain_root->node.rb_root.rb_node; 323 } 324 325 static void __sort_chain_graph_rel(struct callchain_node *node, 326 double min_percent) 327 { 328 struct rb_node *n; 329 struct callchain_node *child; 330 u64 min_hit; 331 332 node->rb_root = RB_ROOT; 333 min_hit = ceil(node->children_hit * min_percent); 334 335 n = rb_first(&node->rb_root_in); 336 while (n) { 337 child = rb_entry(n, struct callchain_node, rb_node_in); 338 n = rb_next(n); 339 340 __sort_chain_graph_rel(child, min_percent); 341 if (callchain_cumul_hits(child) >= min_hit) 342 rb_insert_callchain(&node->rb_root, child, 343 CHAIN_GRAPH_REL); 344 } 345 } 346 347 static void 348 sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root, 349 u64 min_hit __maybe_unused, struct callchain_param *param) 350 { 351 __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0); 352 rb_root->rb_node = chain_root->node.rb_root.rb_node; 353 } 354 355 int callchain_register_param(struct callchain_param *param) 356 { 357 switch (param->mode) { 358 case CHAIN_GRAPH_ABS: 359 param->sort = sort_chain_graph_abs; 360 break; 361 case CHAIN_GRAPH_REL: 362 param->sort = sort_chain_graph_rel; 363 break; 364 case CHAIN_FLAT: 365 case CHAIN_FOLDED: 366 param->sort = sort_chain_flat; 367 break; 368 case CHAIN_NONE: 369 default: 370 return -1; 371 } 372 return 0; 373 } 374 375 /* 376 * Create a child for a parent. If inherit_children, then the new child 377 * will become the new parent of it's parent children 378 */ 379 static struct callchain_node * 380 create_child(struct callchain_node *parent, bool inherit_children) 381 { 382 struct callchain_node *new; 383 384 new = zalloc(sizeof(*new)); 385 if (!new) { 386 perror("not enough memory to create child for code path tree"); 387 return NULL; 388 } 389 new->parent = parent; 390 INIT_LIST_HEAD(&new->val); 391 INIT_LIST_HEAD(&new->parent_val); 392 393 if (inherit_children) { 394 struct rb_node *n; 395 struct callchain_node *child; 396 397 new->rb_root_in = parent->rb_root_in; 398 parent->rb_root_in = RB_ROOT; 399 400 n = rb_first(&new->rb_root_in); 401 while (n) { 402 child = rb_entry(n, struct callchain_node, rb_node_in); 403 child->parent = new; 404 n = rb_next(n); 405 } 406 407 /* make it the first child */ 408 rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node); 409 rb_insert_color(&new->rb_node_in, &parent->rb_root_in); 410 } 411 412 return new; 413 } 414 415 416 /* 417 * Fill the node with callchain values 418 */ 419 static int 420 fill_node(struct callchain_node *node, struct callchain_cursor *cursor) 421 { 422 struct callchain_cursor_node *cursor_node; 423 424 node->val_nr = cursor->nr - cursor->pos; 425 if (!node->val_nr) 426 pr_warning("Warning: empty node in callchain tree\n"); 427 428 cursor_node = callchain_cursor_current(cursor); 429 430 while (cursor_node) { 431 struct callchain_list *call; 432 433 call = zalloc(sizeof(*call)); 434 if (!call) { 435 perror("not enough memory for the code path tree"); 436 return -1; 437 } 438 call->ip = cursor_node->ip; 439 call->ms.sym = cursor_node->sym; 440 call->ms.map = map__get(cursor_node->map); 441 442 if (cursor_node->branch) { 443 call->branch_count = 1; 444 445 if (cursor_node->branch_flags.predicted) 446 call->predicted_count = 1; 447 448 if (cursor_node->branch_flags.abort) 449 call->abort_count = 1; 450 451 call->cycles_count = cursor_node->branch_flags.cycles; 452 call->iter_count = cursor_node->nr_loop_iter; 453 call->samples_count = cursor_node->samples; 454 } 455 456 list_add_tail(&call->list, &node->val); 457 458 callchain_cursor_advance(cursor); 459 cursor_node = callchain_cursor_current(cursor); 460 } 461 return 0; 462 } 463 464 static struct callchain_node * 465 add_child(struct callchain_node *parent, 466 struct callchain_cursor *cursor, 467 u64 period) 468 { 469 struct callchain_node *new; 470 471 new = create_child(parent, false); 472 if (new == NULL) 473 return NULL; 474 475 if (fill_node(new, cursor) < 0) { 476 struct callchain_list *call, *tmp; 477 478 list_for_each_entry_safe(call, tmp, &new->val, list) { 479 list_del(&call->list); 480 map__zput(call->ms.map); 481 free(call); 482 } 483 free(new); 484 return NULL; 485 } 486 487 new->children_hit = 0; 488 new->hit = period; 489 new->children_count = 0; 490 new->count = 1; 491 return new; 492 } 493 494 enum match_result { 495 MATCH_ERROR = -1, 496 MATCH_EQ, 497 MATCH_LT, 498 MATCH_GT, 499 }; 500 501 static enum match_result match_chain(struct callchain_cursor_node *node, 502 struct callchain_list *cnode) 503 { 504 struct symbol *sym = node->sym; 505 u64 left, right; 506 507 if (cnode->ms.sym && sym && 508 callchain_param.key == CCKEY_FUNCTION) { 509 left = cnode->ms.sym->start; 510 right = sym->start; 511 } else { 512 left = cnode->ip; 513 right = node->ip; 514 } 515 516 if (left == right) { 517 if (node->branch) { 518 cnode->branch_count++; 519 520 if (node->branch_flags.predicted) 521 cnode->predicted_count++; 522 523 if (node->branch_flags.abort) 524 cnode->abort_count++; 525 526 cnode->cycles_count += node->branch_flags.cycles; 527 cnode->iter_count += node->nr_loop_iter; 528 cnode->samples_count += node->samples; 529 } 530 531 return MATCH_EQ; 532 } 533 534 return left > right ? MATCH_GT : MATCH_LT; 535 } 536 537 /* 538 * Split the parent in two parts (a new child is created) and 539 * give a part of its callchain to the created child. 540 * Then create another child to host the given callchain of new branch 541 */ 542 static int 543 split_add_child(struct callchain_node *parent, 544 struct callchain_cursor *cursor, 545 struct callchain_list *to_split, 546 u64 idx_parents, u64 idx_local, u64 period) 547 { 548 struct callchain_node *new; 549 struct list_head *old_tail; 550 unsigned int idx_total = idx_parents + idx_local; 551 552 /* split */ 553 new = create_child(parent, true); 554 if (new == NULL) 555 return -1; 556 557 /* split the callchain and move a part to the new child */ 558 old_tail = parent->val.prev; 559 list_del_range(&to_split->list, old_tail); 560 new->val.next = &to_split->list; 561 new->val.prev = old_tail; 562 to_split->list.prev = &new->val; 563 old_tail->next = &new->val; 564 565 /* split the hits */ 566 new->hit = parent->hit; 567 new->children_hit = parent->children_hit; 568 parent->children_hit = callchain_cumul_hits(new); 569 new->val_nr = parent->val_nr - idx_local; 570 parent->val_nr = idx_local; 571 new->count = parent->count; 572 new->children_count = parent->children_count; 573 parent->children_count = callchain_cumul_counts(new); 574 575 /* create a new child for the new branch if any */ 576 if (idx_total < cursor->nr) { 577 struct callchain_node *first; 578 struct callchain_list *cnode; 579 struct callchain_cursor_node *node; 580 struct rb_node *p, **pp; 581 582 parent->hit = 0; 583 parent->children_hit += period; 584 parent->count = 0; 585 parent->children_count += 1; 586 587 node = callchain_cursor_current(cursor); 588 new = add_child(parent, cursor, period); 589 if (new == NULL) 590 return -1; 591 592 /* 593 * This is second child since we moved parent's children 594 * to new (first) child above. 595 */ 596 p = parent->rb_root_in.rb_node; 597 first = rb_entry(p, struct callchain_node, rb_node_in); 598 cnode = list_first_entry(&first->val, struct callchain_list, 599 list); 600 601 if (match_chain(node, cnode) == MATCH_LT) 602 pp = &p->rb_left; 603 else 604 pp = &p->rb_right; 605 606 rb_link_node(&new->rb_node_in, p, pp); 607 rb_insert_color(&new->rb_node_in, &parent->rb_root_in); 608 } else { 609 parent->hit = period; 610 parent->count = 1; 611 } 612 return 0; 613 } 614 615 static enum match_result 616 append_chain(struct callchain_node *root, 617 struct callchain_cursor *cursor, 618 u64 period); 619 620 static int 621 append_chain_children(struct callchain_node *root, 622 struct callchain_cursor *cursor, 623 u64 period) 624 { 625 struct callchain_node *rnode; 626 struct callchain_cursor_node *node; 627 struct rb_node **p = &root->rb_root_in.rb_node; 628 struct rb_node *parent = NULL; 629 630 node = callchain_cursor_current(cursor); 631 if (!node) 632 return -1; 633 634 /* lookup in childrens */ 635 while (*p) { 636 enum match_result ret; 637 638 parent = *p; 639 rnode = rb_entry(parent, struct callchain_node, rb_node_in); 640 641 /* If at least first entry matches, rely to children */ 642 ret = append_chain(rnode, cursor, period); 643 if (ret == MATCH_EQ) 644 goto inc_children_hit; 645 if (ret == MATCH_ERROR) 646 return -1; 647 648 if (ret == MATCH_LT) 649 p = &parent->rb_left; 650 else 651 p = &parent->rb_right; 652 } 653 /* nothing in children, add to the current node */ 654 rnode = add_child(root, cursor, period); 655 if (rnode == NULL) 656 return -1; 657 658 rb_link_node(&rnode->rb_node_in, parent, p); 659 rb_insert_color(&rnode->rb_node_in, &root->rb_root_in); 660 661 inc_children_hit: 662 root->children_hit += period; 663 root->children_count++; 664 return 0; 665 } 666 667 static enum match_result 668 append_chain(struct callchain_node *root, 669 struct callchain_cursor *cursor, 670 u64 period) 671 { 672 struct callchain_list *cnode; 673 u64 start = cursor->pos; 674 bool found = false; 675 u64 matches; 676 enum match_result cmp = MATCH_ERROR; 677 678 /* 679 * Lookup in the current node 680 * If we have a symbol, then compare the start to match 681 * anywhere inside a function, unless function 682 * mode is disabled. 683 */ 684 list_for_each_entry(cnode, &root->val, list) { 685 struct callchain_cursor_node *node; 686 687 node = callchain_cursor_current(cursor); 688 if (!node) 689 break; 690 691 cmp = match_chain(node, cnode); 692 if (cmp != MATCH_EQ) 693 break; 694 695 found = true; 696 697 callchain_cursor_advance(cursor); 698 } 699 700 /* matches not, relay no the parent */ 701 if (!found) { 702 WARN_ONCE(cmp == MATCH_ERROR, "Chain comparison error\n"); 703 return cmp; 704 } 705 706 matches = cursor->pos - start; 707 708 /* we match only a part of the node. Split it and add the new chain */ 709 if (matches < root->val_nr) { 710 if (split_add_child(root, cursor, cnode, start, matches, 711 period) < 0) 712 return MATCH_ERROR; 713 714 return MATCH_EQ; 715 } 716 717 /* we match 100% of the path, increment the hit */ 718 if (matches == root->val_nr && cursor->pos == cursor->nr) { 719 root->hit += period; 720 root->count++; 721 return MATCH_EQ; 722 } 723 724 /* We match the node and still have a part remaining */ 725 if (append_chain_children(root, cursor, period) < 0) 726 return MATCH_ERROR; 727 728 return MATCH_EQ; 729 } 730 731 int callchain_append(struct callchain_root *root, 732 struct callchain_cursor *cursor, 733 u64 period) 734 { 735 if (!cursor->nr) 736 return 0; 737 738 callchain_cursor_commit(cursor); 739 740 if (append_chain_children(&root->node, cursor, period) < 0) 741 return -1; 742 743 if (cursor->nr > root->max_depth) 744 root->max_depth = cursor->nr; 745 746 return 0; 747 } 748 749 static int 750 merge_chain_branch(struct callchain_cursor *cursor, 751 struct callchain_node *dst, struct callchain_node *src) 752 { 753 struct callchain_cursor_node **old_last = cursor->last; 754 struct callchain_node *child; 755 struct callchain_list *list, *next_list; 756 struct rb_node *n; 757 int old_pos = cursor->nr; 758 int err = 0; 759 760 list_for_each_entry_safe(list, next_list, &src->val, list) { 761 callchain_cursor_append(cursor, list->ip, 762 list->ms.map, list->ms.sym, 763 false, NULL, 0, 0); 764 list_del(&list->list); 765 map__zput(list->ms.map); 766 free(list); 767 } 768 769 if (src->hit) { 770 callchain_cursor_commit(cursor); 771 if (append_chain_children(dst, cursor, src->hit) < 0) 772 return -1; 773 } 774 775 n = rb_first(&src->rb_root_in); 776 while (n) { 777 child = container_of(n, struct callchain_node, rb_node_in); 778 n = rb_next(n); 779 rb_erase(&child->rb_node_in, &src->rb_root_in); 780 781 err = merge_chain_branch(cursor, dst, child); 782 if (err) 783 break; 784 785 free(child); 786 } 787 788 cursor->nr = old_pos; 789 cursor->last = old_last; 790 791 return err; 792 } 793 794 int callchain_merge(struct callchain_cursor *cursor, 795 struct callchain_root *dst, struct callchain_root *src) 796 { 797 return merge_chain_branch(cursor, &dst->node, &src->node); 798 } 799 800 int callchain_cursor_append(struct callchain_cursor *cursor, 801 u64 ip, struct map *map, struct symbol *sym, 802 bool branch, struct branch_flags *flags, 803 int nr_loop_iter, int samples) 804 { 805 struct callchain_cursor_node *node = *cursor->last; 806 807 if (!node) { 808 node = calloc(1, sizeof(*node)); 809 if (!node) 810 return -ENOMEM; 811 812 *cursor->last = node; 813 } 814 815 node->ip = ip; 816 map__zput(node->map); 817 node->map = map__get(map); 818 node->sym = sym; 819 node->branch = branch; 820 node->nr_loop_iter = nr_loop_iter; 821 node->samples = samples; 822 823 if (flags) 824 memcpy(&node->branch_flags, flags, 825 sizeof(struct branch_flags)); 826 827 cursor->nr++; 828 829 cursor->last = &node->next; 830 831 return 0; 832 } 833 834 int sample__resolve_callchain(struct perf_sample *sample, 835 struct callchain_cursor *cursor, struct symbol **parent, 836 struct perf_evsel *evsel, struct addr_location *al, 837 int max_stack) 838 { 839 if (sample->callchain == NULL) 840 return 0; 841 842 if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain || 843 perf_hpp_list.parent) { 844 return thread__resolve_callchain(al->thread, cursor, evsel, sample, 845 parent, al, max_stack); 846 } 847 return 0; 848 } 849 850 int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample) 851 { 852 if (!symbol_conf.use_callchain || sample->callchain == NULL) 853 return 0; 854 return callchain_append(he->callchain, &callchain_cursor, sample->period); 855 } 856 857 int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node, 858 bool hide_unresolved) 859 { 860 al->map = node->map; 861 al->sym = node->sym; 862 if (node->map) 863 al->addr = node->map->map_ip(node->map, node->ip); 864 else 865 al->addr = node->ip; 866 867 if (al->sym == NULL) { 868 if (hide_unresolved) 869 return 0; 870 if (al->map == NULL) 871 goto out; 872 } 873 874 if (al->map->groups == &al->machine->kmaps) { 875 if (machine__is_host(al->machine)) { 876 al->cpumode = PERF_RECORD_MISC_KERNEL; 877 al->level = 'k'; 878 } else { 879 al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL; 880 al->level = 'g'; 881 } 882 } else { 883 if (machine__is_host(al->machine)) { 884 al->cpumode = PERF_RECORD_MISC_USER; 885 al->level = '.'; 886 } else if (perf_guest) { 887 al->cpumode = PERF_RECORD_MISC_GUEST_USER; 888 al->level = 'u'; 889 } else { 890 al->cpumode = PERF_RECORD_MISC_HYPERVISOR; 891 al->level = 'H'; 892 } 893 } 894 895 out: 896 return 1; 897 } 898 899 char *callchain_list__sym_name(struct callchain_list *cl, 900 char *bf, size_t bfsize, bool show_dso) 901 { 902 int printed; 903 904 if (cl->ms.sym) { 905 if (callchain_param.key == CCKEY_ADDRESS && 906 cl->ms.map && !cl->srcline) 907 cl->srcline = get_srcline(cl->ms.map->dso, 908 map__rip_2objdump(cl->ms.map, 909 cl->ip), 910 cl->ms.sym, false); 911 if (cl->srcline) 912 printed = scnprintf(bf, bfsize, "%s %s", 913 cl->ms.sym->name, cl->srcline); 914 else 915 printed = scnprintf(bf, bfsize, "%s", cl->ms.sym->name); 916 } else 917 printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip); 918 919 if (show_dso) 920 scnprintf(bf + printed, bfsize - printed, " %s", 921 cl->ms.map ? 922 cl->ms.map->dso->short_name : 923 "unknown"); 924 925 return bf; 926 } 927 928 char *callchain_node__scnprintf_value(struct callchain_node *node, 929 char *bf, size_t bfsize, u64 total) 930 { 931 double percent = 0.0; 932 u64 period = callchain_cumul_hits(node); 933 unsigned count = callchain_cumul_counts(node); 934 935 if (callchain_param.mode == CHAIN_FOLDED) { 936 period = node->hit; 937 count = node->count; 938 } 939 940 switch (callchain_param.value) { 941 case CCVAL_PERIOD: 942 scnprintf(bf, bfsize, "%"PRIu64, period); 943 break; 944 case CCVAL_COUNT: 945 scnprintf(bf, bfsize, "%u", count); 946 break; 947 case CCVAL_PERCENT: 948 default: 949 if (total) 950 percent = period * 100.0 / total; 951 scnprintf(bf, bfsize, "%.2f%%", percent); 952 break; 953 } 954 return bf; 955 } 956 957 int callchain_node__fprintf_value(struct callchain_node *node, 958 FILE *fp, u64 total) 959 { 960 double percent = 0.0; 961 u64 period = callchain_cumul_hits(node); 962 unsigned count = callchain_cumul_counts(node); 963 964 if (callchain_param.mode == CHAIN_FOLDED) { 965 period = node->hit; 966 count = node->count; 967 } 968 969 switch (callchain_param.value) { 970 case CCVAL_PERIOD: 971 return fprintf(fp, "%"PRIu64, period); 972 case CCVAL_COUNT: 973 return fprintf(fp, "%u", count); 974 case CCVAL_PERCENT: 975 default: 976 if (total) 977 percent = period * 100.0 / total; 978 return percent_color_fprintf(fp, "%.2f%%", percent); 979 } 980 return 0; 981 } 982 983 static void callchain_counts_value(struct callchain_node *node, 984 u64 *branch_count, u64 *predicted_count, 985 u64 *abort_count, u64 *cycles_count) 986 { 987 struct callchain_list *clist; 988 989 list_for_each_entry(clist, &node->val, list) { 990 if (branch_count) 991 *branch_count += clist->branch_count; 992 993 if (predicted_count) 994 *predicted_count += clist->predicted_count; 995 996 if (abort_count) 997 *abort_count += clist->abort_count; 998 999 if (cycles_count) 1000 *cycles_count += clist->cycles_count; 1001 } 1002 } 1003 1004 static int callchain_node_branch_counts_cumul(struct callchain_node *node, 1005 u64 *branch_count, 1006 u64 *predicted_count, 1007 u64 *abort_count, 1008 u64 *cycles_count) 1009 { 1010 struct callchain_node *child; 1011 struct rb_node *n; 1012 1013 n = rb_first(&node->rb_root_in); 1014 while (n) { 1015 child = rb_entry(n, struct callchain_node, rb_node_in); 1016 n = rb_next(n); 1017 1018 callchain_node_branch_counts_cumul(child, branch_count, 1019 predicted_count, 1020 abort_count, 1021 cycles_count); 1022 1023 callchain_counts_value(child, branch_count, 1024 predicted_count, abort_count, 1025 cycles_count); 1026 } 1027 1028 return 0; 1029 } 1030 1031 int callchain_branch_counts(struct callchain_root *root, 1032 u64 *branch_count, u64 *predicted_count, 1033 u64 *abort_count, u64 *cycles_count) 1034 { 1035 if (branch_count) 1036 *branch_count = 0; 1037 1038 if (predicted_count) 1039 *predicted_count = 0; 1040 1041 if (abort_count) 1042 *abort_count = 0; 1043 1044 if (cycles_count) 1045 *cycles_count = 0; 1046 1047 return callchain_node_branch_counts_cumul(&root->node, 1048 branch_count, 1049 predicted_count, 1050 abort_count, 1051 cycles_count); 1052 } 1053 1054 static int callchain_counts_printf(FILE *fp, char *bf, int bfsize, 1055 u64 branch_count, u64 predicted_count, 1056 u64 abort_count, u64 cycles_count, 1057 u64 iter_count, u64 samples_count) 1058 { 1059 double predicted_percent = 0.0; 1060 const char *null_str = ""; 1061 char iter_str[32]; 1062 char *str; 1063 u64 cycles = 0; 1064 1065 if (branch_count == 0) { 1066 if (fp) 1067 return fprintf(fp, " (calltrace)"); 1068 1069 return scnprintf(bf, bfsize, " (calltrace)"); 1070 } 1071 1072 if (iter_count && samples_count) { 1073 scnprintf(iter_str, sizeof(iter_str), 1074 ", iterations:%" PRId64 "", 1075 iter_count / samples_count); 1076 str = iter_str; 1077 } else 1078 str = (char *)null_str; 1079 1080 predicted_percent = predicted_count * 100.0 / branch_count; 1081 cycles = cycles_count / branch_count; 1082 1083 if ((predicted_percent >= 100.0) && (abort_count == 0)) { 1084 if (fp) 1085 return fprintf(fp, " (cycles:%" PRId64 "%s)", 1086 cycles, str); 1087 1088 return scnprintf(bf, bfsize, " (cycles:%" PRId64 "%s)", 1089 cycles, str); 1090 } 1091 1092 if ((predicted_percent < 100.0) && (abort_count == 0)) { 1093 if (fp) 1094 return fprintf(fp, 1095 " (predicted:%.1f%%, cycles:%" PRId64 "%s)", 1096 predicted_percent, cycles, str); 1097 1098 return scnprintf(bf, bfsize, 1099 " (predicted:%.1f%%, cycles:%" PRId64 "%s)", 1100 predicted_percent, cycles, str); 1101 } 1102 1103 if (fp) 1104 return fprintf(fp, 1105 " (predicted:%.1f%%, abort:%" PRId64 ", cycles:%" PRId64 "%s)", 1106 predicted_percent, abort_count, cycles, str); 1107 1108 return scnprintf(bf, bfsize, 1109 " (predicted:%.1f%%, abort:%" PRId64 ", cycles:%" PRId64 "%s)", 1110 predicted_percent, abort_count, cycles, str); 1111 } 1112 1113 int callchain_list_counts__printf_value(struct callchain_node *node, 1114 struct callchain_list *clist, 1115 FILE *fp, char *bf, int bfsize) 1116 { 1117 u64 branch_count, predicted_count; 1118 u64 abort_count, cycles_count; 1119 u64 iter_count = 0, samples_count = 0; 1120 1121 branch_count = clist->branch_count; 1122 predicted_count = clist->predicted_count; 1123 abort_count = clist->abort_count; 1124 cycles_count = clist->cycles_count; 1125 1126 if (node) { 1127 struct callchain_list *call; 1128 1129 list_for_each_entry(call, &node->val, list) { 1130 iter_count += call->iter_count; 1131 samples_count += call->samples_count; 1132 } 1133 } 1134 1135 return callchain_counts_printf(fp, bf, bfsize, branch_count, 1136 predicted_count, abort_count, 1137 cycles_count, iter_count, samples_count); 1138 } 1139 1140 static void free_callchain_node(struct callchain_node *node) 1141 { 1142 struct callchain_list *list, *tmp; 1143 struct callchain_node *child; 1144 struct rb_node *n; 1145 1146 list_for_each_entry_safe(list, tmp, &node->parent_val, list) { 1147 list_del(&list->list); 1148 map__zput(list->ms.map); 1149 free(list); 1150 } 1151 1152 list_for_each_entry_safe(list, tmp, &node->val, list) { 1153 list_del(&list->list); 1154 map__zput(list->ms.map); 1155 free(list); 1156 } 1157 1158 n = rb_first(&node->rb_root_in); 1159 while (n) { 1160 child = container_of(n, struct callchain_node, rb_node_in); 1161 n = rb_next(n); 1162 rb_erase(&child->rb_node_in, &node->rb_root_in); 1163 1164 free_callchain_node(child); 1165 free(child); 1166 } 1167 } 1168 1169 void free_callchain(struct callchain_root *root) 1170 { 1171 if (!symbol_conf.use_callchain) 1172 return; 1173 1174 free_callchain_node(&root->node); 1175 } 1176 1177 static u64 decay_callchain_node(struct callchain_node *node) 1178 { 1179 struct callchain_node *child; 1180 struct rb_node *n; 1181 u64 child_hits = 0; 1182 1183 n = rb_first(&node->rb_root_in); 1184 while (n) { 1185 child = container_of(n, struct callchain_node, rb_node_in); 1186 1187 child_hits += decay_callchain_node(child); 1188 n = rb_next(n); 1189 } 1190 1191 node->hit = (node->hit * 7) / 8; 1192 node->children_hit = child_hits; 1193 1194 return node->hit; 1195 } 1196 1197 void decay_callchain(struct callchain_root *root) 1198 { 1199 if (!symbol_conf.use_callchain) 1200 return; 1201 1202 decay_callchain_node(&root->node); 1203 } 1204 1205 int callchain_node__make_parent_list(struct callchain_node *node) 1206 { 1207 struct callchain_node *parent = node->parent; 1208 struct callchain_list *chain, *new; 1209 LIST_HEAD(head); 1210 1211 while (parent) { 1212 list_for_each_entry_reverse(chain, &parent->val, list) { 1213 new = malloc(sizeof(*new)); 1214 if (new == NULL) 1215 goto out; 1216 *new = *chain; 1217 new->has_children = false; 1218 map__get(new->ms.map); 1219 list_add_tail(&new->list, &head); 1220 } 1221 parent = parent->parent; 1222 } 1223 1224 list_for_each_entry_safe_reverse(chain, new, &head, list) 1225 list_move_tail(&chain->list, &node->parent_val); 1226 1227 if (!list_empty(&node->parent_val)) { 1228 chain = list_first_entry(&node->parent_val, struct callchain_list, list); 1229 chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node); 1230 1231 chain = list_first_entry(&node->val, struct callchain_list, list); 1232 chain->has_children = false; 1233 } 1234 return 0; 1235 1236 out: 1237 list_for_each_entry_safe(chain, new, &head, list) { 1238 list_del(&chain->list); 1239 map__zput(chain->ms.map); 1240 free(chain); 1241 } 1242 return -ENOMEM; 1243 } 1244 1245 int callchain_cursor__copy(struct callchain_cursor *dst, 1246 struct callchain_cursor *src) 1247 { 1248 int rc = 0; 1249 1250 callchain_cursor_reset(dst); 1251 callchain_cursor_commit(src); 1252 1253 while (true) { 1254 struct callchain_cursor_node *node; 1255 1256 node = callchain_cursor_current(src); 1257 if (node == NULL) 1258 break; 1259 1260 rc = callchain_cursor_append(dst, node->ip, node->map, node->sym, 1261 node->branch, &node->branch_flags, 1262 node->nr_loop_iter, node->samples); 1263 if (rc) 1264 break; 1265 1266 callchain_cursor_advance(src); 1267 } 1268 1269 return rc; 1270 } 1271