xref: /linux/tools/perf/util/callchain.c (revision 8520a98dbab61e9e340cdfb72dd17ccc8a98961e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
4  *
5  * Handle the callchains from the stream in an ad-hoc radix tree and then
6  * sort them in an rbtree.
7  *
8  * Using a radix for code path provides a fast retrieval and factorizes
9  * memory use. Also that lets us use the paths in a hierarchical graph view.
10  *
11  */
12 
13 #include <inttypes.h>
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <stdbool.h>
17 #include <errno.h>
18 #include <math.h>
19 #include <linux/string.h>
20 #include <linux/zalloc.h>
21 
22 #include "asm/bug.h"
23 
24 #include "debug.h"
25 #include "hist.h"
26 #include "sort.h"
27 #include "machine.h"
28 #include "map.h"
29 #include "callchain.h"
30 #include "branch.h"
31 #include "symbol.h"
32 #include "../perf.h"
33 
34 #define CALLCHAIN_PARAM_DEFAULT			\
35 	.mode		= CHAIN_GRAPH_ABS,	\
36 	.min_percent	= 0.5,			\
37 	.order		= ORDER_CALLEE,		\
38 	.key		= CCKEY_FUNCTION,	\
39 	.value		= CCVAL_PERCENT,	\
40 
41 struct callchain_param callchain_param = {
42 	CALLCHAIN_PARAM_DEFAULT
43 };
44 
45 /*
46  * Are there any events usind DWARF callchains?
47  *
48  * I.e.
49  *
50  * -e cycles/call-graph=dwarf/
51  */
52 bool dwarf_callchain_users;
53 
54 struct callchain_param callchain_param_default = {
55 	CALLCHAIN_PARAM_DEFAULT
56 };
57 
58 __thread struct callchain_cursor callchain_cursor;
59 
60 int parse_callchain_record_opt(const char *arg, struct callchain_param *param)
61 {
62 	return parse_callchain_record(arg, param);
63 }
64 
65 static int parse_callchain_mode(const char *value)
66 {
67 	if (!strncmp(value, "graph", strlen(value))) {
68 		callchain_param.mode = CHAIN_GRAPH_ABS;
69 		return 0;
70 	}
71 	if (!strncmp(value, "flat", strlen(value))) {
72 		callchain_param.mode = CHAIN_FLAT;
73 		return 0;
74 	}
75 	if (!strncmp(value, "fractal", strlen(value))) {
76 		callchain_param.mode = CHAIN_GRAPH_REL;
77 		return 0;
78 	}
79 	if (!strncmp(value, "folded", strlen(value))) {
80 		callchain_param.mode = CHAIN_FOLDED;
81 		return 0;
82 	}
83 	return -1;
84 }
85 
86 static int parse_callchain_order(const char *value)
87 {
88 	if (!strncmp(value, "caller", strlen(value))) {
89 		callchain_param.order = ORDER_CALLER;
90 		callchain_param.order_set = true;
91 		return 0;
92 	}
93 	if (!strncmp(value, "callee", strlen(value))) {
94 		callchain_param.order = ORDER_CALLEE;
95 		callchain_param.order_set = true;
96 		return 0;
97 	}
98 	return -1;
99 }
100 
101 static int parse_callchain_sort_key(const char *value)
102 {
103 	if (!strncmp(value, "function", strlen(value))) {
104 		callchain_param.key = CCKEY_FUNCTION;
105 		return 0;
106 	}
107 	if (!strncmp(value, "address", strlen(value))) {
108 		callchain_param.key = CCKEY_ADDRESS;
109 		return 0;
110 	}
111 	if (!strncmp(value, "srcline", strlen(value))) {
112 		callchain_param.key = CCKEY_SRCLINE;
113 		return 0;
114 	}
115 	if (!strncmp(value, "branch", strlen(value))) {
116 		callchain_param.branch_callstack = 1;
117 		return 0;
118 	}
119 	return -1;
120 }
121 
122 static int parse_callchain_value(const char *value)
123 {
124 	if (!strncmp(value, "percent", strlen(value))) {
125 		callchain_param.value = CCVAL_PERCENT;
126 		return 0;
127 	}
128 	if (!strncmp(value, "period", strlen(value))) {
129 		callchain_param.value = CCVAL_PERIOD;
130 		return 0;
131 	}
132 	if (!strncmp(value, "count", strlen(value))) {
133 		callchain_param.value = CCVAL_COUNT;
134 		return 0;
135 	}
136 	return -1;
137 }
138 
139 static int get_stack_size(const char *str, unsigned long *_size)
140 {
141 	char *endptr;
142 	unsigned long size;
143 	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
144 
145 	size = strtoul(str, &endptr, 0);
146 
147 	do {
148 		if (*endptr)
149 			break;
150 
151 		size = round_up(size, sizeof(u64));
152 		if (!size || size > max_size)
153 			break;
154 
155 		*_size = size;
156 		return 0;
157 
158 	} while (0);
159 
160 	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
161 	       max_size, str);
162 	return -1;
163 }
164 
165 static int
166 __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
167 {
168 	char *tok;
169 	char *endptr, *saveptr = NULL;
170 	bool minpcnt_set = false;
171 	bool record_opt_set = false;
172 	bool try_stack_size = false;
173 
174 	callchain_param.enabled = true;
175 	symbol_conf.use_callchain = true;
176 
177 	if (!arg)
178 		return 0;
179 
180 	while ((tok = strtok_r((char *)arg, ",", &saveptr)) != NULL) {
181 		if (!strncmp(tok, "none", strlen(tok))) {
182 			callchain_param.mode = CHAIN_NONE;
183 			callchain_param.enabled = false;
184 			symbol_conf.use_callchain = false;
185 			return 0;
186 		}
187 
188 		if (!parse_callchain_mode(tok) ||
189 		    !parse_callchain_order(tok) ||
190 		    !parse_callchain_sort_key(tok) ||
191 		    !parse_callchain_value(tok)) {
192 			/* parsing ok - move on to the next */
193 			try_stack_size = false;
194 			goto next;
195 		} else if (allow_record_opt && !record_opt_set) {
196 			if (parse_callchain_record(tok, &callchain_param))
197 				goto try_numbers;
198 
199 			/* assume that number followed by 'dwarf' is stack size */
200 			if (callchain_param.record_mode == CALLCHAIN_DWARF)
201 				try_stack_size = true;
202 
203 			record_opt_set = true;
204 			goto next;
205 		}
206 
207 try_numbers:
208 		if (try_stack_size) {
209 			unsigned long size = 0;
210 
211 			if (get_stack_size(tok, &size) < 0)
212 				return -1;
213 			callchain_param.dump_size = size;
214 			try_stack_size = false;
215 		} else if (!minpcnt_set) {
216 			/* try to get the min percent */
217 			callchain_param.min_percent = strtod(tok, &endptr);
218 			if (tok == endptr)
219 				return -1;
220 			minpcnt_set = true;
221 		} else {
222 			/* try print limit at last */
223 			callchain_param.print_limit = strtoul(tok, &endptr, 0);
224 			if (tok == endptr)
225 				return -1;
226 		}
227 next:
228 		arg = NULL;
229 	}
230 
231 	if (callchain_register_param(&callchain_param) < 0) {
232 		pr_err("Can't register callchain params\n");
233 		return -1;
234 	}
235 	return 0;
236 }
237 
238 int parse_callchain_report_opt(const char *arg)
239 {
240 	return __parse_callchain_report_opt(arg, false);
241 }
242 
243 int parse_callchain_top_opt(const char *arg)
244 {
245 	return __parse_callchain_report_opt(arg, true);
246 }
247 
248 int parse_callchain_record(const char *arg, struct callchain_param *param)
249 {
250 	char *tok, *name, *saveptr = NULL;
251 	char *buf;
252 	int ret = -1;
253 
254 	/* We need buffer that we know we can write to. */
255 	buf = malloc(strlen(arg) + 1);
256 	if (!buf)
257 		return -ENOMEM;
258 
259 	strcpy(buf, arg);
260 
261 	tok = strtok_r((char *)buf, ",", &saveptr);
262 	name = tok ? : (char *)buf;
263 
264 	do {
265 		/* Framepointer style */
266 		if (!strncmp(name, "fp", sizeof("fp"))) {
267 			if (!strtok_r(NULL, ",", &saveptr)) {
268 				param->record_mode = CALLCHAIN_FP;
269 				ret = 0;
270 			} else
271 				pr_err("callchain: No more arguments "
272 				       "needed for --call-graph fp\n");
273 			break;
274 
275 		/* Dwarf style */
276 		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
277 			const unsigned long default_stack_dump_size = 8192;
278 
279 			ret = 0;
280 			param->record_mode = CALLCHAIN_DWARF;
281 			param->dump_size = default_stack_dump_size;
282 			dwarf_callchain_users = true;
283 
284 			tok = strtok_r(NULL, ",", &saveptr);
285 			if (tok) {
286 				unsigned long size = 0;
287 
288 				ret = get_stack_size(tok, &size);
289 				param->dump_size = size;
290 			}
291 		} else if (!strncmp(name, "lbr", sizeof("lbr"))) {
292 			if (!strtok_r(NULL, ",", &saveptr)) {
293 				param->record_mode = CALLCHAIN_LBR;
294 				ret = 0;
295 			} else
296 				pr_err("callchain: No more arguments "
297 					"needed for --call-graph lbr\n");
298 			break;
299 		} else {
300 			pr_err("callchain: Unknown --call-graph option "
301 			       "value: %s\n", arg);
302 			break;
303 		}
304 
305 	} while (0);
306 
307 	free(buf);
308 	return ret;
309 }
310 
311 int perf_callchain_config(const char *var, const char *value)
312 {
313 	char *endptr;
314 
315 	if (!strstarts(var, "call-graph."))
316 		return 0;
317 	var += sizeof("call-graph.") - 1;
318 
319 	if (!strcmp(var, "record-mode"))
320 		return parse_callchain_record_opt(value, &callchain_param);
321 	if (!strcmp(var, "dump-size")) {
322 		unsigned long size = 0;
323 		int ret;
324 
325 		ret = get_stack_size(value, &size);
326 		callchain_param.dump_size = size;
327 
328 		return ret;
329 	}
330 	if (!strcmp(var, "print-type")){
331 		int ret;
332 		ret = parse_callchain_mode(value);
333 		if (ret == -1)
334 			pr_err("Invalid callchain mode: %s\n", value);
335 		return ret;
336 	}
337 	if (!strcmp(var, "order")){
338 		int ret;
339 		ret = parse_callchain_order(value);
340 		if (ret == -1)
341 			pr_err("Invalid callchain order: %s\n", value);
342 		return ret;
343 	}
344 	if (!strcmp(var, "sort-key")){
345 		int ret;
346 		ret = parse_callchain_sort_key(value);
347 		if (ret == -1)
348 			pr_err("Invalid callchain sort key: %s\n", value);
349 		return ret;
350 	}
351 	if (!strcmp(var, "threshold")) {
352 		callchain_param.min_percent = strtod(value, &endptr);
353 		if (value == endptr) {
354 			pr_err("Invalid callchain threshold: %s\n", value);
355 			return -1;
356 		}
357 	}
358 	if (!strcmp(var, "print-limit")) {
359 		callchain_param.print_limit = strtod(value, &endptr);
360 		if (value == endptr) {
361 			pr_err("Invalid callchain print limit: %s\n", value);
362 			return -1;
363 		}
364 	}
365 
366 	return 0;
367 }
368 
369 static void
370 rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
371 		    enum chain_mode mode)
372 {
373 	struct rb_node **p = &root->rb_node;
374 	struct rb_node *parent = NULL;
375 	struct callchain_node *rnode;
376 	u64 chain_cumul = callchain_cumul_hits(chain);
377 
378 	while (*p) {
379 		u64 rnode_cumul;
380 
381 		parent = *p;
382 		rnode = rb_entry(parent, struct callchain_node, rb_node);
383 		rnode_cumul = callchain_cumul_hits(rnode);
384 
385 		switch (mode) {
386 		case CHAIN_FLAT:
387 		case CHAIN_FOLDED:
388 			if (rnode->hit < chain->hit)
389 				p = &(*p)->rb_left;
390 			else
391 				p = &(*p)->rb_right;
392 			break;
393 		case CHAIN_GRAPH_ABS: /* Falldown */
394 		case CHAIN_GRAPH_REL:
395 			if (rnode_cumul < chain_cumul)
396 				p = &(*p)->rb_left;
397 			else
398 				p = &(*p)->rb_right;
399 			break;
400 		case CHAIN_NONE:
401 		default:
402 			break;
403 		}
404 	}
405 
406 	rb_link_node(&chain->rb_node, parent, p);
407 	rb_insert_color(&chain->rb_node, root);
408 }
409 
410 static void
411 __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
412 		  u64 min_hit)
413 {
414 	struct rb_node *n;
415 	struct callchain_node *child;
416 
417 	n = rb_first(&node->rb_root_in);
418 	while (n) {
419 		child = rb_entry(n, struct callchain_node, rb_node_in);
420 		n = rb_next(n);
421 
422 		__sort_chain_flat(rb_root, child, min_hit);
423 	}
424 
425 	if (node->hit && node->hit >= min_hit)
426 		rb_insert_callchain(rb_root, node, CHAIN_FLAT);
427 }
428 
429 /*
430  * Once we get every callchains from the stream, we can now
431  * sort them by hit
432  */
433 static void
434 sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
435 		u64 min_hit, struct callchain_param *param __maybe_unused)
436 {
437 	*rb_root = RB_ROOT;
438 	__sort_chain_flat(rb_root, &root->node, min_hit);
439 }
440 
441 static void __sort_chain_graph_abs(struct callchain_node *node,
442 				   u64 min_hit)
443 {
444 	struct rb_node *n;
445 	struct callchain_node *child;
446 
447 	node->rb_root = RB_ROOT;
448 	n = rb_first(&node->rb_root_in);
449 
450 	while (n) {
451 		child = rb_entry(n, struct callchain_node, rb_node_in);
452 		n = rb_next(n);
453 
454 		__sort_chain_graph_abs(child, min_hit);
455 		if (callchain_cumul_hits(child) >= min_hit)
456 			rb_insert_callchain(&node->rb_root, child,
457 					    CHAIN_GRAPH_ABS);
458 	}
459 }
460 
461 static void
462 sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
463 		     u64 min_hit, struct callchain_param *param __maybe_unused)
464 {
465 	__sort_chain_graph_abs(&chain_root->node, min_hit);
466 	rb_root->rb_node = chain_root->node.rb_root.rb_node;
467 }
468 
469 static void __sort_chain_graph_rel(struct callchain_node *node,
470 				   double min_percent)
471 {
472 	struct rb_node *n;
473 	struct callchain_node *child;
474 	u64 min_hit;
475 
476 	node->rb_root = RB_ROOT;
477 	min_hit = ceil(node->children_hit * min_percent);
478 
479 	n = rb_first(&node->rb_root_in);
480 	while (n) {
481 		child = rb_entry(n, struct callchain_node, rb_node_in);
482 		n = rb_next(n);
483 
484 		__sort_chain_graph_rel(child, min_percent);
485 		if (callchain_cumul_hits(child) >= min_hit)
486 			rb_insert_callchain(&node->rb_root, child,
487 					    CHAIN_GRAPH_REL);
488 	}
489 }
490 
491 static void
492 sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
493 		     u64 min_hit __maybe_unused, struct callchain_param *param)
494 {
495 	__sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
496 	rb_root->rb_node = chain_root->node.rb_root.rb_node;
497 }
498 
499 int callchain_register_param(struct callchain_param *param)
500 {
501 	switch (param->mode) {
502 	case CHAIN_GRAPH_ABS:
503 		param->sort = sort_chain_graph_abs;
504 		break;
505 	case CHAIN_GRAPH_REL:
506 		param->sort = sort_chain_graph_rel;
507 		break;
508 	case CHAIN_FLAT:
509 	case CHAIN_FOLDED:
510 		param->sort = sort_chain_flat;
511 		break;
512 	case CHAIN_NONE:
513 	default:
514 		return -1;
515 	}
516 	return 0;
517 }
518 
519 /*
520  * Create a child for a parent. If inherit_children, then the new child
521  * will become the new parent of it's parent children
522  */
523 static struct callchain_node *
524 create_child(struct callchain_node *parent, bool inherit_children)
525 {
526 	struct callchain_node *new;
527 
528 	new = zalloc(sizeof(*new));
529 	if (!new) {
530 		perror("not enough memory to create child for code path tree");
531 		return NULL;
532 	}
533 	new->parent = parent;
534 	INIT_LIST_HEAD(&new->val);
535 	INIT_LIST_HEAD(&new->parent_val);
536 
537 	if (inherit_children) {
538 		struct rb_node *n;
539 		struct callchain_node *child;
540 
541 		new->rb_root_in = parent->rb_root_in;
542 		parent->rb_root_in = RB_ROOT;
543 
544 		n = rb_first(&new->rb_root_in);
545 		while (n) {
546 			child = rb_entry(n, struct callchain_node, rb_node_in);
547 			child->parent = new;
548 			n = rb_next(n);
549 		}
550 
551 		/* make it the first child */
552 		rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node);
553 		rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
554 	}
555 
556 	return new;
557 }
558 
559 
560 /*
561  * Fill the node with callchain values
562  */
563 static int
564 fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
565 {
566 	struct callchain_cursor_node *cursor_node;
567 
568 	node->val_nr = cursor->nr - cursor->pos;
569 	if (!node->val_nr)
570 		pr_warning("Warning: empty node in callchain tree\n");
571 
572 	cursor_node = callchain_cursor_current(cursor);
573 
574 	while (cursor_node) {
575 		struct callchain_list *call;
576 
577 		call = zalloc(sizeof(*call));
578 		if (!call) {
579 			perror("not enough memory for the code path tree");
580 			return -1;
581 		}
582 		call->ip = cursor_node->ip;
583 		call->ms.sym = cursor_node->sym;
584 		call->ms.map = map__get(cursor_node->map);
585 		call->srcline = cursor_node->srcline;
586 
587 		if (cursor_node->branch) {
588 			call->branch_count = 1;
589 
590 			if (cursor_node->branch_from) {
591 				/*
592 				 * branch_from is set with value somewhere else
593 				 * to imply it's "to" of a branch.
594 				 */
595 				call->brtype_stat.branch_to = true;
596 
597 				if (cursor_node->branch_flags.predicted)
598 					call->predicted_count = 1;
599 
600 				if (cursor_node->branch_flags.abort)
601 					call->abort_count = 1;
602 
603 				branch_type_count(&call->brtype_stat,
604 						  &cursor_node->branch_flags,
605 						  cursor_node->branch_from,
606 						  cursor_node->ip);
607 			} else {
608 				/*
609 				 * It's "from" of a branch
610 				 */
611 				call->brtype_stat.branch_to = false;
612 				call->cycles_count =
613 					cursor_node->branch_flags.cycles;
614 				call->iter_count = cursor_node->nr_loop_iter;
615 				call->iter_cycles = cursor_node->iter_cycles;
616 			}
617 		}
618 
619 		list_add_tail(&call->list, &node->val);
620 
621 		callchain_cursor_advance(cursor);
622 		cursor_node = callchain_cursor_current(cursor);
623 	}
624 	return 0;
625 }
626 
627 static struct callchain_node *
628 add_child(struct callchain_node *parent,
629 	  struct callchain_cursor *cursor,
630 	  u64 period)
631 {
632 	struct callchain_node *new;
633 
634 	new = create_child(parent, false);
635 	if (new == NULL)
636 		return NULL;
637 
638 	if (fill_node(new, cursor) < 0) {
639 		struct callchain_list *call, *tmp;
640 
641 		list_for_each_entry_safe(call, tmp, &new->val, list) {
642 			list_del_init(&call->list);
643 			map__zput(call->ms.map);
644 			free(call);
645 		}
646 		free(new);
647 		return NULL;
648 	}
649 
650 	new->children_hit = 0;
651 	new->hit = period;
652 	new->children_count = 0;
653 	new->count = 1;
654 	return new;
655 }
656 
657 enum match_result {
658 	MATCH_ERROR  = -1,
659 	MATCH_EQ,
660 	MATCH_LT,
661 	MATCH_GT,
662 };
663 
664 static enum match_result match_chain_strings(const char *left,
665 					     const char *right)
666 {
667 	enum match_result ret = MATCH_EQ;
668 	int cmp;
669 
670 	if (left && right)
671 		cmp = strcmp(left, right);
672 	else if (!left && right)
673 		cmp = 1;
674 	else if (left && !right)
675 		cmp = -1;
676 	else
677 		return MATCH_ERROR;
678 
679 	if (cmp != 0)
680 		ret = cmp < 0 ? MATCH_LT : MATCH_GT;
681 
682 	return ret;
683 }
684 
685 /*
686  * We need to always use relative addresses because we're aggregating
687  * callchains from multiple threads, i.e. different address spaces, so
688  * comparing absolute addresses make no sense as a symbol in a DSO may end up
689  * in a different address when used in a different binary or even the same
690  * binary but with some sort of address randomization technique, thus we need
691  * to compare just relative addresses. -acme
692  */
693 static enum match_result match_chain_dso_addresses(struct map *left_map, u64 left_ip,
694 						   struct map *right_map, u64 right_ip)
695 {
696 	struct dso *left_dso = left_map ? left_map->dso : NULL;
697 	struct dso *right_dso = right_map ? right_map->dso : NULL;
698 
699 	if (left_dso != right_dso)
700 		return left_dso < right_dso ? MATCH_LT : MATCH_GT;
701 
702 	if (left_ip != right_ip)
703  		return left_ip < right_ip ? MATCH_LT : MATCH_GT;
704 
705 	return MATCH_EQ;
706 }
707 
708 static enum match_result match_chain(struct callchain_cursor_node *node,
709 				     struct callchain_list *cnode)
710 {
711 	enum match_result match = MATCH_ERROR;
712 
713 	switch (callchain_param.key) {
714 	case CCKEY_SRCLINE:
715 		match = match_chain_strings(cnode->srcline, node->srcline);
716 		if (match != MATCH_ERROR)
717 			break;
718 		/* otherwise fall-back to symbol-based comparison below */
719 		__fallthrough;
720 	case CCKEY_FUNCTION:
721 		if (node->sym && cnode->ms.sym) {
722 			/*
723 			 * Compare inlined frames based on their symbol name
724 			 * because different inlined frames will have the same
725 			 * symbol start. Otherwise do a faster comparison based
726 			 * on the symbol start address.
727 			 */
728 			if (cnode->ms.sym->inlined || node->sym->inlined) {
729 				match = match_chain_strings(cnode->ms.sym->name,
730 							    node->sym->name);
731 				if (match != MATCH_ERROR)
732 					break;
733 			} else {
734 				match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start,
735 								  node->map, node->sym->start);
736 				break;
737 			}
738 		}
739 		/* otherwise fall-back to IP-based comparison below */
740 		__fallthrough;
741 	case CCKEY_ADDRESS:
742 	default:
743 		match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->map, node->ip);
744 		break;
745 	}
746 
747 	if (match == MATCH_EQ && node->branch) {
748 		cnode->branch_count++;
749 
750 		if (node->branch_from) {
751 			/*
752 			 * It's "to" of a branch
753 			 */
754 			cnode->brtype_stat.branch_to = true;
755 
756 			if (node->branch_flags.predicted)
757 				cnode->predicted_count++;
758 
759 			if (node->branch_flags.abort)
760 				cnode->abort_count++;
761 
762 			branch_type_count(&cnode->brtype_stat,
763 					  &node->branch_flags,
764 					  node->branch_from,
765 					  node->ip);
766 		} else {
767 			/*
768 			 * It's "from" of a branch
769 			 */
770 			cnode->brtype_stat.branch_to = false;
771 			cnode->cycles_count += node->branch_flags.cycles;
772 			cnode->iter_count += node->nr_loop_iter;
773 			cnode->iter_cycles += node->iter_cycles;
774 			cnode->from_count++;
775 		}
776 	}
777 
778 	return match;
779 }
780 
781 /*
782  * Split the parent in two parts (a new child is created) and
783  * give a part of its callchain to the created child.
784  * Then create another child to host the given callchain of new branch
785  */
786 static int
787 split_add_child(struct callchain_node *parent,
788 		struct callchain_cursor *cursor,
789 		struct callchain_list *to_split,
790 		u64 idx_parents, u64 idx_local, u64 period)
791 {
792 	struct callchain_node *new;
793 	struct list_head *old_tail;
794 	unsigned int idx_total = idx_parents + idx_local;
795 
796 	/* split */
797 	new = create_child(parent, true);
798 	if (new == NULL)
799 		return -1;
800 
801 	/* split the callchain and move a part to the new child */
802 	old_tail = parent->val.prev;
803 	list_del_range(&to_split->list, old_tail);
804 	new->val.next = &to_split->list;
805 	new->val.prev = old_tail;
806 	to_split->list.prev = &new->val;
807 	old_tail->next = &new->val;
808 
809 	/* split the hits */
810 	new->hit = parent->hit;
811 	new->children_hit = parent->children_hit;
812 	parent->children_hit = callchain_cumul_hits(new);
813 	new->val_nr = parent->val_nr - idx_local;
814 	parent->val_nr = idx_local;
815 	new->count = parent->count;
816 	new->children_count = parent->children_count;
817 	parent->children_count = callchain_cumul_counts(new);
818 
819 	/* create a new child for the new branch if any */
820 	if (idx_total < cursor->nr) {
821 		struct callchain_node *first;
822 		struct callchain_list *cnode;
823 		struct callchain_cursor_node *node;
824 		struct rb_node *p, **pp;
825 
826 		parent->hit = 0;
827 		parent->children_hit += period;
828 		parent->count = 0;
829 		parent->children_count += 1;
830 
831 		node = callchain_cursor_current(cursor);
832 		new = add_child(parent, cursor, period);
833 		if (new == NULL)
834 			return -1;
835 
836 		/*
837 		 * This is second child since we moved parent's children
838 		 * to new (first) child above.
839 		 */
840 		p = parent->rb_root_in.rb_node;
841 		first = rb_entry(p, struct callchain_node, rb_node_in);
842 		cnode = list_first_entry(&first->val, struct callchain_list,
843 					 list);
844 
845 		if (match_chain(node, cnode) == MATCH_LT)
846 			pp = &p->rb_left;
847 		else
848 			pp = &p->rb_right;
849 
850 		rb_link_node(&new->rb_node_in, p, pp);
851 		rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
852 	} else {
853 		parent->hit = period;
854 		parent->count = 1;
855 	}
856 	return 0;
857 }
858 
859 static enum match_result
860 append_chain(struct callchain_node *root,
861 	     struct callchain_cursor *cursor,
862 	     u64 period);
863 
864 static int
865 append_chain_children(struct callchain_node *root,
866 		      struct callchain_cursor *cursor,
867 		      u64 period)
868 {
869 	struct callchain_node *rnode;
870 	struct callchain_cursor_node *node;
871 	struct rb_node **p = &root->rb_root_in.rb_node;
872 	struct rb_node *parent = NULL;
873 
874 	node = callchain_cursor_current(cursor);
875 	if (!node)
876 		return -1;
877 
878 	/* lookup in childrens */
879 	while (*p) {
880 		enum match_result ret;
881 
882 		parent = *p;
883 		rnode = rb_entry(parent, struct callchain_node, rb_node_in);
884 
885 		/* If at least first entry matches, rely to children */
886 		ret = append_chain(rnode, cursor, period);
887 		if (ret == MATCH_EQ)
888 			goto inc_children_hit;
889 		if (ret == MATCH_ERROR)
890 			return -1;
891 
892 		if (ret == MATCH_LT)
893 			p = &parent->rb_left;
894 		else
895 			p = &parent->rb_right;
896 	}
897 	/* nothing in children, add to the current node */
898 	rnode = add_child(root, cursor, period);
899 	if (rnode == NULL)
900 		return -1;
901 
902 	rb_link_node(&rnode->rb_node_in, parent, p);
903 	rb_insert_color(&rnode->rb_node_in, &root->rb_root_in);
904 
905 inc_children_hit:
906 	root->children_hit += period;
907 	root->children_count++;
908 	return 0;
909 }
910 
911 static enum match_result
912 append_chain(struct callchain_node *root,
913 	     struct callchain_cursor *cursor,
914 	     u64 period)
915 {
916 	struct callchain_list *cnode;
917 	u64 start = cursor->pos;
918 	bool found = false;
919 	u64 matches;
920 	enum match_result cmp = MATCH_ERROR;
921 
922 	/*
923 	 * Lookup in the current node
924 	 * If we have a symbol, then compare the start to match
925 	 * anywhere inside a function, unless function
926 	 * mode is disabled.
927 	 */
928 	list_for_each_entry(cnode, &root->val, list) {
929 		struct callchain_cursor_node *node;
930 
931 		node = callchain_cursor_current(cursor);
932 		if (!node)
933 			break;
934 
935 		cmp = match_chain(node, cnode);
936 		if (cmp != MATCH_EQ)
937 			break;
938 
939 		found = true;
940 
941 		callchain_cursor_advance(cursor);
942 	}
943 
944 	/* matches not, relay no the parent */
945 	if (!found) {
946 		WARN_ONCE(cmp == MATCH_ERROR, "Chain comparison error\n");
947 		return cmp;
948 	}
949 
950 	matches = cursor->pos - start;
951 
952 	/* we match only a part of the node. Split it and add the new chain */
953 	if (matches < root->val_nr) {
954 		if (split_add_child(root, cursor, cnode, start, matches,
955 				    period) < 0)
956 			return MATCH_ERROR;
957 
958 		return MATCH_EQ;
959 	}
960 
961 	/* we match 100% of the path, increment the hit */
962 	if (matches == root->val_nr && cursor->pos == cursor->nr) {
963 		root->hit += period;
964 		root->count++;
965 		return MATCH_EQ;
966 	}
967 
968 	/* We match the node and still have a part remaining */
969 	if (append_chain_children(root, cursor, period) < 0)
970 		return MATCH_ERROR;
971 
972 	return MATCH_EQ;
973 }
974 
975 int callchain_append(struct callchain_root *root,
976 		     struct callchain_cursor *cursor,
977 		     u64 period)
978 {
979 	if (!cursor->nr)
980 		return 0;
981 
982 	callchain_cursor_commit(cursor);
983 
984 	if (append_chain_children(&root->node, cursor, period) < 0)
985 		return -1;
986 
987 	if (cursor->nr > root->max_depth)
988 		root->max_depth = cursor->nr;
989 
990 	return 0;
991 }
992 
993 static int
994 merge_chain_branch(struct callchain_cursor *cursor,
995 		   struct callchain_node *dst, struct callchain_node *src)
996 {
997 	struct callchain_cursor_node **old_last = cursor->last;
998 	struct callchain_node *child;
999 	struct callchain_list *list, *next_list;
1000 	struct rb_node *n;
1001 	int old_pos = cursor->nr;
1002 	int err = 0;
1003 
1004 	list_for_each_entry_safe(list, next_list, &src->val, list) {
1005 		callchain_cursor_append(cursor, list->ip,
1006 					list->ms.map, list->ms.sym,
1007 					false, NULL, 0, 0, 0, list->srcline);
1008 		list_del_init(&list->list);
1009 		map__zput(list->ms.map);
1010 		free(list);
1011 	}
1012 
1013 	if (src->hit) {
1014 		callchain_cursor_commit(cursor);
1015 		if (append_chain_children(dst, cursor, src->hit) < 0)
1016 			return -1;
1017 	}
1018 
1019 	n = rb_first(&src->rb_root_in);
1020 	while (n) {
1021 		child = container_of(n, struct callchain_node, rb_node_in);
1022 		n = rb_next(n);
1023 		rb_erase(&child->rb_node_in, &src->rb_root_in);
1024 
1025 		err = merge_chain_branch(cursor, dst, child);
1026 		if (err)
1027 			break;
1028 
1029 		free(child);
1030 	}
1031 
1032 	cursor->nr = old_pos;
1033 	cursor->last = old_last;
1034 
1035 	return err;
1036 }
1037 
1038 int callchain_merge(struct callchain_cursor *cursor,
1039 		    struct callchain_root *dst, struct callchain_root *src)
1040 {
1041 	return merge_chain_branch(cursor, &dst->node, &src->node);
1042 }
1043 
1044 int callchain_cursor_append(struct callchain_cursor *cursor,
1045 			    u64 ip, struct map *map, struct symbol *sym,
1046 			    bool branch, struct branch_flags *flags,
1047 			    int nr_loop_iter, u64 iter_cycles, u64 branch_from,
1048 			    const char *srcline)
1049 {
1050 	struct callchain_cursor_node *node = *cursor->last;
1051 
1052 	if (!node) {
1053 		node = calloc(1, sizeof(*node));
1054 		if (!node)
1055 			return -ENOMEM;
1056 
1057 		*cursor->last = node;
1058 	}
1059 
1060 	node->ip = ip;
1061 	map__zput(node->map);
1062 	node->map = map__get(map);
1063 	node->sym = sym;
1064 	node->branch = branch;
1065 	node->nr_loop_iter = nr_loop_iter;
1066 	node->iter_cycles = iter_cycles;
1067 	node->srcline = srcline;
1068 
1069 	if (flags)
1070 		memcpy(&node->branch_flags, flags,
1071 			sizeof(struct branch_flags));
1072 
1073 	node->branch_from = branch_from;
1074 	cursor->nr++;
1075 
1076 	cursor->last = &node->next;
1077 
1078 	return 0;
1079 }
1080 
1081 int sample__resolve_callchain(struct perf_sample *sample,
1082 			      struct callchain_cursor *cursor, struct symbol **parent,
1083 			      struct evsel *evsel, struct addr_location *al,
1084 			      int max_stack)
1085 {
1086 	if (sample->callchain == NULL && !symbol_conf.show_branchflag_count)
1087 		return 0;
1088 
1089 	if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
1090 	    perf_hpp_list.parent || symbol_conf.show_branchflag_count) {
1091 		return thread__resolve_callchain(al->thread, cursor, evsel, sample,
1092 						 parent, al, max_stack);
1093 	}
1094 	return 0;
1095 }
1096 
1097 int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
1098 {
1099 	if ((!symbol_conf.use_callchain || sample->callchain == NULL) &&
1100 		!symbol_conf.show_branchflag_count)
1101 		return 0;
1102 	return callchain_append(he->callchain, &callchain_cursor, sample->period);
1103 }
1104 
1105 int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
1106 			bool hide_unresolved)
1107 {
1108 	al->map = node->map;
1109 	al->sym = node->sym;
1110 	al->srcline = node->srcline;
1111 	al->addr = node->ip;
1112 
1113 	if (al->sym == NULL) {
1114 		if (hide_unresolved)
1115 			return 0;
1116 		if (al->map == NULL)
1117 			goto out;
1118 	}
1119 
1120 	if (al->map->groups == &al->machine->kmaps) {
1121 		if (machine__is_host(al->machine)) {
1122 			al->cpumode = PERF_RECORD_MISC_KERNEL;
1123 			al->level = 'k';
1124 		} else {
1125 			al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
1126 			al->level = 'g';
1127 		}
1128 	} else {
1129 		if (machine__is_host(al->machine)) {
1130 			al->cpumode = PERF_RECORD_MISC_USER;
1131 			al->level = '.';
1132 		} else if (perf_guest) {
1133 			al->cpumode = PERF_RECORD_MISC_GUEST_USER;
1134 			al->level = 'u';
1135 		} else {
1136 			al->cpumode = PERF_RECORD_MISC_HYPERVISOR;
1137 			al->level = 'H';
1138 		}
1139 	}
1140 
1141 out:
1142 	return 1;
1143 }
1144 
1145 char *callchain_list__sym_name(struct callchain_list *cl,
1146 			       char *bf, size_t bfsize, bool show_dso)
1147 {
1148 	bool show_addr = callchain_param.key == CCKEY_ADDRESS;
1149 	bool show_srcline = show_addr || callchain_param.key == CCKEY_SRCLINE;
1150 	int printed;
1151 
1152 	if (cl->ms.sym) {
1153 		const char *inlined = cl->ms.sym->inlined ? " (inlined)" : "";
1154 
1155 		if (show_srcline && cl->srcline)
1156 			printed = scnprintf(bf, bfsize, "%s %s%s",
1157 					    cl->ms.sym->name, cl->srcline,
1158 					    inlined);
1159 		else
1160 			printed = scnprintf(bf, bfsize, "%s%s",
1161 					    cl->ms.sym->name, inlined);
1162 	} else
1163 		printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
1164 
1165 	if (show_dso)
1166 		scnprintf(bf + printed, bfsize - printed, " %s",
1167 			  cl->ms.map ?
1168 			  cl->ms.map->dso->short_name :
1169 			  "unknown");
1170 
1171 	return bf;
1172 }
1173 
1174 char *callchain_node__scnprintf_value(struct callchain_node *node,
1175 				      char *bf, size_t bfsize, u64 total)
1176 {
1177 	double percent = 0.0;
1178 	u64 period = callchain_cumul_hits(node);
1179 	unsigned count = callchain_cumul_counts(node);
1180 
1181 	if (callchain_param.mode == CHAIN_FOLDED) {
1182 		period = node->hit;
1183 		count = node->count;
1184 	}
1185 
1186 	switch (callchain_param.value) {
1187 	case CCVAL_PERIOD:
1188 		scnprintf(bf, bfsize, "%"PRIu64, period);
1189 		break;
1190 	case CCVAL_COUNT:
1191 		scnprintf(bf, bfsize, "%u", count);
1192 		break;
1193 	case CCVAL_PERCENT:
1194 	default:
1195 		if (total)
1196 			percent = period * 100.0 / total;
1197 		scnprintf(bf, bfsize, "%.2f%%", percent);
1198 		break;
1199 	}
1200 	return bf;
1201 }
1202 
1203 int callchain_node__fprintf_value(struct callchain_node *node,
1204 				 FILE *fp, u64 total)
1205 {
1206 	double percent = 0.0;
1207 	u64 period = callchain_cumul_hits(node);
1208 	unsigned count = callchain_cumul_counts(node);
1209 
1210 	if (callchain_param.mode == CHAIN_FOLDED) {
1211 		period = node->hit;
1212 		count = node->count;
1213 	}
1214 
1215 	switch (callchain_param.value) {
1216 	case CCVAL_PERIOD:
1217 		return fprintf(fp, "%"PRIu64, period);
1218 	case CCVAL_COUNT:
1219 		return fprintf(fp, "%u", count);
1220 	case CCVAL_PERCENT:
1221 	default:
1222 		if (total)
1223 			percent = period * 100.0 / total;
1224 		return percent_color_fprintf(fp, "%.2f%%", percent);
1225 	}
1226 	return 0;
1227 }
1228 
1229 static void callchain_counts_value(struct callchain_node *node,
1230 				   u64 *branch_count, u64 *predicted_count,
1231 				   u64 *abort_count, u64 *cycles_count)
1232 {
1233 	struct callchain_list *clist;
1234 
1235 	list_for_each_entry(clist, &node->val, list) {
1236 		if (branch_count)
1237 			*branch_count += clist->branch_count;
1238 
1239 		if (predicted_count)
1240 			*predicted_count += clist->predicted_count;
1241 
1242 		if (abort_count)
1243 			*abort_count += clist->abort_count;
1244 
1245 		if (cycles_count)
1246 			*cycles_count += clist->cycles_count;
1247 	}
1248 }
1249 
1250 static int callchain_node_branch_counts_cumul(struct callchain_node *node,
1251 					      u64 *branch_count,
1252 					      u64 *predicted_count,
1253 					      u64 *abort_count,
1254 					      u64 *cycles_count)
1255 {
1256 	struct callchain_node *child;
1257 	struct rb_node *n;
1258 
1259 	n = rb_first(&node->rb_root_in);
1260 	while (n) {
1261 		child = rb_entry(n, struct callchain_node, rb_node_in);
1262 		n = rb_next(n);
1263 
1264 		callchain_node_branch_counts_cumul(child, branch_count,
1265 						   predicted_count,
1266 						   abort_count,
1267 						   cycles_count);
1268 
1269 		callchain_counts_value(child, branch_count,
1270 				       predicted_count, abort_count,
1271 				       cycles_count);
1272 	}
1273 
1274 	return 0;
1275 }
1276 
1277 int callchain_branch_counts(struct callchain_root *root,
1278 			    u64 *branch_count, u64 *predicted_count,
1279 			    u64 *abort_count, u64 *cycles_count)
1280 {
1281 	if (branch_count)
1282 		*branch_count = 0;
1283 
1284 	if (predicted_count)
1285 		*predicted_count = 0;
1286 
1287 	if (abort_count)
1288 		*abort_count = 0;
1289 
1290 	if (cycles_count)
1291 		*cycles_count = 0;
1292 
1293 	return callchain_node_branch_counts_cumul(&root->node,
1294 						  branch_count,
1295 						  predicted_count,
1296 						  abort_count,
1297 						  cycles_count);
1298 }
1299 
1300 static int count_pri64_printf(int idx, const char *str, u64 value, char *bf, int bfsize)
1301 {
1302 	int printed;
1303 
1304 	printed = scnprintf(bf, bfsize, "%s%s:%" PRId64 "", (idx) ? " " : " (", str, value);
1305 
1306 	return printed;
1307 }
1308 
1309 static int count_float_printf(int idx, const char *str, float value,
1310 			      char *bf, int bfsize, float threshold)
1311 {
1312 	int printed;
1313 
1314 	if (threshold != 0.0 && value < threshold)
1315 		return 0;
1316 
1317 	printed = scnprintf(bf, bfsize, "%s%s:%.1f%%", (idx) ? " " : " (", str, value);
1318 
1319 	return printed;
1320 }
1321 
1322 static int branch_to_str(char *bf, int bfsize,
1323 			 u64 branch_count, u64 predicted_count,
1324 			 u64 abort_count,
1325 			 struct branch_type_stat *brtype_stat)
1326 {
1327 	int printed, i = 0;
1328 
1329 	printed = branch_type_str(brtype_stat, bf, bfsize);
1330 	if (printed)
1331 		i++;
1332 
1333 	if (predicted_count < branch_count) {
1334 		printed += count_float_printf(i++, "predicted",
1335 				predicted_count * 100.0 / branch_count,
1336 				bf + printed, bfsize - printed, 0.0);
1337 	}
1338 
1339 	if (abort_count) {
1340 		printed += count_float_printf(i++, "abort",
1341 				abort_count * 100.0 / branch_count,
1342 				bf + printed, bfsize - printed, 0.1);
1343 	}
1344 
1345 	if (i)
1346 		printed += scnprintf(bf + printed, bfsize - printed, ")");
1347 
1348 	return printed;
1349 }
1350 
1351 static int branch_from_str(char *bf, int bfsize,
1352 			   u64 branch_count,
1353 			   u64 cycles_count, u64 iter_count,
1354 			   u64 iter_cycles, u64 from_count)
1355 {
1356 	int printed = 0, i = 0;
1357 	u64 cycles, v = 0;
1358 
1359 	cycles = cycles_count / branch_count;
1360 	if (cycles) {
1361 		printed += count_pri64_printf(i++, "cycles",
1362 				cycles,
1363 				bf + printed, bfsize - printed);
1364 	}
1365 
1366 	if (iter_count && from_count) {
1367 		v = iter_count / from_count;
1368 		if (v) {
1369 			printed += count_pri64_printf(i++, "iter",
1370 					v, bf + printed, bfsize - printed);
1371 
1372 			printed += count_pri64_printf(i++, "avg_cycles",
1373 					iter_cycles / iter_count,
1374 					bf + printed, bfsize - printed);
1375 		}
1376 	}
1377 
1378 	if (i)
1379 		printed += scnprintf(bf + printed, bfsize - printed, ")");
1380 
1381 	return printed;
1382 }
1383 
1384 static int counts_str_build(char *bf, int bfsize,
1385 			     u64 branch_count, u64 predicted_count,
1386 			     u64 abort_count, u64 cycles_count,
1387 			     u64 iter_count, u64 iter_cycles,
1388 			     u64 from_count,
1389 			     struct branch_type_stat *brtype_stat)
1390 {
1391 	int printed;
1392 
1393 	if (branch_count == 0)
1394 		return scnprintf(bf, bfsize, " (calltrace)");
1395 
1396 	if (brtype_stat->branch_to) {
1397 		printed = branch_to_str(bf, bfsize, branch_count,
1398 				predicted_count, abort_count, brtype_stat);
1399 	} else {
1400 		printed = branch_from_str(bf, bfsize, branch_count,
1401 				cycles_count, iter_count, iter_cycles,
1402 				from_count);
1403 	}
1404 
1405 	if (!printed)
1406 		bf[0] = 0;
1407 
1408 	return printed;
1409 }
1410 
1411 static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
1412 				   u64 branch_count, u64 predicted_count,
1413 				   u64 abort_count, u64 cycles_count,
1414 				   u64 iter_count, u64 iter_cycles,
1415 				   u64 from_count,
1416 				   struct branch_type_stat *brtype_stat)
1417 {
1418 	char str[256];
1419 
1420 	counts_str_build(str, sizeof(str), branch_count,
1421 			 predicted_count, abort_count, cycles_count,
1422 			 iter_count, iter_cycles, from_count, brtype_stat);
1423 
1424 	if (fp)
1425 		return fprintf(fp, "%s", str);
1426 
1427 	return scnprintf(bf, bfsize, "%s", str);
1428 }
1429 
1430 int callchain_list_counts__printf_value(struct callchain_list *clist,
1431 					FILE *fp, char *bf, int bfsize)
1432 {
1433 	u64 branch_count, predicted_count;
1434 	u64 abort_count, cycles_count;
1435 	u64 iter_count, iter_cycles;
1436 	u64 from_count;
1437 
1438 	branch_count = clist->branch_count;
1439 	predicted_count = clist->predicted_count;
1440 	abort_count = clist->abort_count;
1441 	cycles_count = clist->cycles_count;
1442 	iter_count = clist->iter_count;
1443 	iter_cycles = clist->iter_cycles;
1444 	from_count = clist->from_count;
1445 
1446 	return callchain_counts_printf(fp, bf, bfsize, branch_count,
1447 				       predicted_count, abort_count,
1448 				       cycles_count, iter_count, iter_cycles,
1449 				       from_count, &clist->brtype_stat);
1450 }
1451 
1452 static void free_callchain_node(struct callchain_node *node)
1453 {
1454 	struct callchain_list *list, *tmp;
1455 	struct callchain_node *child;
1456 	struct rb_node *n;
1457 
1458 	list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
1459 		list_del_init(&list->list);
1460 		map__zput(list->ms.map);
1461 		free(list);
1462 	}
1463 
1464 	list_for_each_entry_safe(list, tmp, &node->val, list) {
1465 		list_del_init(&list->list);
1466 		map__zput(list->ms.map);
1467 		free(list);
1468 	}
1469 
1470 	n = rb_first(&node->rb_root_in);
1471 	while (n) {
1472 		child = container_of(n, struct callchain_node, rb_node_in);
1473 		n = rb_next(n);
1474 		rb_erase(&child->rb_node_in, &node->rb_root_in);
1475 
1476 		free_callchain_node(child);
1477 		free(child);
1478 	}
1479 }
1480 
1481 void free_callchain(struct callchain_root *root)
1482 {
1483 	if (!symbol_conf.use_callchain)
1484 		return;
1485 
1486 	free_callchain_node(&root->node);
1487 }
1488 
1489 static u64 decay_callchain_node(struct callchain_node *node)
1490 {
1491 	struct callchain_node *child;
1492 	struct rb_node *n;
1493 	u64 child_hits = 0;
1494 
1495 	n = rb_first(&node->rb_root_in);
1496 	while (n) {
1497 		child = container_of(n, struct callchain_node, rb_node_in);
1498 
1499 		child_hits += decay_callchain_node(child);
1500 		n = rb_next(n);
1501 	}
1502 
1503 	node->hit = (node->hit * 7) / 8;
1504 	node->children_hit = child_hits;
1505 
1506 	return node->hit;
1507 }
1508 
1509 void decay_callchain(struct callchain_root *root)
1510 {
1511 	if (!symbol_conf.use_callchain)
1512 		return;
1513 
1514 	decay_callchain_node(&root->node);
1515 }
1516 
1517 int callchain_node__make_parent_list(struct callchain_node *node)
1518 {
1519 	struct callchain_node *parent = node->parent;
1520 	struct callchain_list *chain, *new;
1521 	LIST_HEAD(head);
1522 
1523 	while (parent) {
1524 		list_for_each_entry_reverse(chain, &parent->val, list) {
1525 			new = malloc(sizeof(*new));
1526 			if (new == NULL)
1527 				goto out;
1528 			*new = *chain;
1529 			new->has_children = false;
1530 			map__get(new->ms.map);
1531 			list_add_tail(&new->list, &head);
1532 		}
1533 		parent = parent->parent;
1534 	}
1535 
1536 	list_for_each_entry_safe_reverse(chain, new, &head, list)
1537 		list_move_tail(&chain->list, &node->parent_val);
1538 
1539 	if (!list_empty(&node->parent_val)) {
1540 		chain = list_first_entry(&node->parent_val, struct callchain_list, list);
1541 		chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node);
1542 
1543 		chain = list_first_entry(&node->val, struct callchain_list, list);
1544 		chain->has_children = false;
1545 	}
1546 	return 0;
1547 
1548 out:
1549 	list_for_each_entry_safe(chain, new, &head, list) {
1550 		list_del_init(&chain->list);
1551 		map__zput(chain->ms.map);
1552 		free(chain);
1553 	}
1554 	return -ENOMEM;
1555 }
1556 
1557 int callchain_cursor__copy(struct callchain_cursor *dst,
1558 			   struct callchain_cursor *src)
1559 {
1560 	int rc = 0;
1561 
1562 	callchain_cursor_reset(dst);
1563 	callchain_cursor_commit(src);
1564 
1565 	while (true) {
1566 		struct callchain_cursor_node *node;
1567 
1568 		node = callchain_cursor_current(src);
1569 		if (node == NULL)
1570 			break;
1571 
1572 		rc = callchain_cursor_append(dst, node->ip, node->map, node->sym,
1573 					     node->branch, &node->branch_flags,
1574 					     node->nr_loop_iter,
1575 					     node->iter_cycles,
1576 					     node->branch_from, node->srcline);
1577 		if (rc)
1578 			break;
1579 
1580 		callchain_cursor_advance(src);
1581 	}
1582 
1583 	return rc;
1584 }
1585 
1586 /*
1587  * Initialize a cursor before adding entries inside, but keep
1588  * the previously allocated entries as a cache.
1589  */
1590 void callchain_cursor_reset(struct callchain_cursor *cursor)
1591 {
1592 	struct callchain_cursor_node *node;
1593 
1594 	cursor->nr = 0;
1595 	cursor->last = &cursor->first;
1596 
1597 	for (node = cursor->first; node != NULL; node = node->next)
1598 		map__zput(node->map);
1599 }
1600