xref: /linux/tools/perf/util/callchain.c (revision c1a604dff486399ae0be95e6396e0158df95ad5d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
4  *
5  * Handle the callchains from the stream in an ad-hoc radix tree and then
6  * sort them in an rbtree.
7  *
8  * Using a radix for code path provides a fast retrieval and factorizes
9  * memory use. Also that lets us use the paths in a hierarchical graph view.
10  *
11  */
12 
13 #include <inttypes.h>
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <stdbool.h>
17 #include <errno.h>
18 #include <math.h>
19 #include <linux/zalloc.h>
20 
21 #include "asm/bug.h"
22 
23 #include "debug.h"
24 #include "hist.h"
25 #include "sort.h"
26 #include "machine.h"
27 #include "map.h"
28 #include "callchain.h"
29 #include "branch.h"
30 #include "symbol.h"
31 #include "../perf.h"
32 
33 #define CALLCHAIN_PARAM_DEFAULT			\
34 	.mode		= CHAIN_GRAPH_ABS,	\
35 	.min_percent	= 0.5,			\
36 	.order		= ORDER_CALLEE,		\
37 	.key		= CCKEY_FUNCTION,	\
38 	.value		= CCVAL_PERCENT,	\
39 
40 struct callchain_param callchain_param = {
41 	CALLCHAIN_PARAM_DEFAULT
42 };
43 
44 /*
45  * Are there any events usind DWARF callchains?
46  *
47  * I.e.
48  *
49  * -e cycles/call-graph=dwarf/
50  */
51 bool dwarf_callchain_users;
52 
53 struct callchain_param callchain_param_default = {
54 	CALLCHAIN_PARAM_DEFAULT
55 };
56 
57 __thread struct callchain_cursor callchain_cursor;
58 
59 int parse_callchain_record_opt(const char *arg, struct callchain_param *param)
60 {
61 	return parse_callchain_record(arg, param);
62 }
63 
64 static int parse_callchain_mode(const char *value)
65 {
66 	if (!strncmp(value, "graph", strlen(value))) {
67 		callchain_param.mode = CHAIN_GRAPH_ABS;
68 		return 0;
69 	}
70 	if (!strncmp(value, "flat", strlen(value))) {
71 		callchain_param.mode = CHAIN_FLAT;
72 		return 0;
73 	}
74 	if (!strncmp(value, "fractal", strlen(value))) {
75 		callchain_param.mode = CHAIN_GRAPH_REL;
76 		return 0;
77 	}
78 	if (!strncmp(value, "folded", strlen(value))) {
79 		callchain_param.mode = CHAIN_FOLDED;
80 		return 0;
81 	}
82 	return -1;
83 }
84 
85 static int parse_callchain_order(const char *value)
86 {
87 	if (!strncmp(value, "caller", strlen(value))) {
88 		callchain_param.order = ORDER_CALLER;
89 		callchain_param.order_set = true;
90 		return 0;
91 	}
92 	if (!strncmp(value, "callee", strlen(value))) {
93 		callchain_param.order = ORDER_CALLEE;
94 		callchain_param.order_set = true;
95 		return 0;
96 	}
97 	return -1;
98 }
99 
100 static int parse_callchain_sort_key(const char *value)
101 {
102 	if (!strncmp(value, "function", strlen(value))) {
103 		callchain_param.key = CCKEY_FUNCTION;
104 		return 0;
105 	}
106 	if (!strncmp(value, "address", strlen(value))) {
107 		callchain_param.key = CCKEY_ADDRESS;
108 		return 0;
109 	}
110 	if (!strncmp(value, "srcline", strlen(value))) {
111 		callchain_param.key = CCKEY_SRCLINE;
112 		return 0;
113 	}
114 	if (!strncmp(value, "branch", strlen(value))) {
115 		callchain_param.branch_callstack = 1;
116 		return 0;
117 	}
118 	return -1;
119 }
120 
121 static int parse_callchain_value(const char *value)
122 {
123 	if (!strncmp(value, "percent", strlen(value))) {
124 		callchain_param.value = CCVAL_PERCENT;
125 		return 0;
126 	}
127 	if (!strncmp(value, "period", strlen(value))) {
128 		callchain_param.value = CCVAL_PERIOD;
129 		return 0;
130 	}
131 	if (!strncmp(value, "count", strlen(value))) {
132 		callchain_param.value = CCVAL_COUNT;
133 		return 0;
134 	}
135 	return -1;
136 }
137 
138 static int get_stack_size(const char *str, unsigned long *_size)
139 {
140 	char *endptr;
141 	unsigned long size;
142 	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
143 
144 	size = strtoul(str, &endptr, 0);
145 
146 	do {
147 		if (*endptr)
148 			break;
149 
150 		size = round_up(size, sizeof(u64));
151 		if (!size || size > max_size)
152 			break;
153 
154 		*_size = size;
155 		return 0;
156 
157 	} while (0);
158 
159 	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
160 	       max_size, str);
161 	return -1;
162 }
163 
164 static int
165 __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
166 {
167 	char *tok;
168 	char *endptr, *saveptr = NULL;
169 	bool minpcnt_set = false;
170 	bool record_opt_set = false;
171 	bool try_stack_size = false;
172 
173 	callchain_param.enabled = true;
174 	symbol_conf.use_callchain = true;
175 
176 	if (!arg)
177 		return 0;
178 
179 	while ((tok = strtok_r((char *)arg, ",", &saveptr)) != NULL) {
180 		if (!strncmp(tok, "none", strlen(tok))) {
181 			callchain_param.mode = CHAIN_NONE;
182 			callchain_param.enabled = false;
183 			symbol_conf.use_callchain = false;
184 			return 0;
185 		}
186 
187 		if (!parse_callchain_mode(tok) ||
188 		    !parse_callchain_order(tok) ||
189 		    !parse_callchain_sort_key(tok) ||
190 		    !parse_callchain_value(tok)) {
191 			/* parsing ok - move on to the next */
192 			try_stack_size = false;
193 			goto next;
194 		} else if (allow_record_opt && !record_opt_set) {
195 			if (parse_callchain_record(tok, &callchain_param))
196 				goto try_numbers;
197 
198 			/* assume that number followed by 'dwarf' is stack size */
199 			if (callchain_param.record_mode == CALLCHAIN_DWARF)
200 				try_stack_size = true;
201 
202 			record_opt_set = true;
203 			goto next;
204 		}
205 
206 try_numbers:
207 		if (try_stack_size) {
208 			unsigned long size = 0;
209 
210 			if (get_stack_size(tok, &size) < 0)
211 				return -1;
212 			callchain_param.dump_size = size;
213 			try_stack_size = false;
214 		} else if (!minpcnt_set) {
215 			/* try to get the min percent */
216 			callchain_param.min_percent = strtod(tok, &endptr);
217 			if (tok == endptr)
218 				return -1;
219 			minpcnt_set = true;
220 		} else {
221 			/* try print limit at last */
222 			callchain_param.print_limit = strtoul(tok, &endptr, 0);
223 			if (tok == endptr)
224 				return -1;
225 		}
226 next:
227 		arg = NULL;
228 	}
229 
230 	if (callchain_register_param(&callchain_param) < 0) {
231 		pr_err("Can't register callchain params\n");
232 		return -1;
233 	}
234 	return 0;
235 }
236 
237 int parse_callchain_report_opt(const char *arg)
238 {
239 	return __parse_callchain_report_opt(arg, false);
240 }
241 
242 int parse_callchain_top_opt(const char *arg)
243 {
244 	return __parse_callchain_report_opt(arg, true);
245 }
246 
247 int parse_callchain_record(const char *arg, struct callchain_param *param)
248 {
249 	char *tok, *name, *saveptr = NULL;
250 	char *buf;
251 	int ret = -1;
252 
253 	/* We need buffer that we know we can write to. */
254 	buf = malloc(strlen(arg) + 1);
255 	if (!buf)
256 		return -ENOMEM;
257 
258 	strcpy(buf, arg);
259 
260 	tok = strtok_r((char *)buf, ",", &saveptr);
261 	name = tok ? : (char *)buf;
262 
263 	do {
264 		/* Framepointer style */
265 		if (!strncmp(name, "fp", sizeof("fp"))) {
266 			if (!strtok_r(NULL, ",", &saveptr)) {
267 				param->record_mode = CALLCHAIN_FP;
268 				ret = 0;
269 			} else
270 				pr_err("callchain: No more arguments "
271 				       "needed for --call-graph fp\n");
272 			break;
273 
274 		/* Dwarf style */
275 		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
276 			const unsigned long default_stack_dump_size = 8192;
277 
278 			ret = 0;
279 			param->record_mode = CALLCHAIN_DWARF;
280 			param->dump_size = default_stack_dump_size;
281 			dwarf_callchain_users = true;
282 
283 			tok = strtok_r(NULL, ",", &saveptr);
284 			if (tok) {
285 				unsigned long size = 0;
286 
287 				ret = get_stack_size(tok, &size);
288 				param->dump_size = size;
289 			}
290 		} else if (!strncmp(name, "lbr", sizeof("lbr"))) {
291 			if (!strtok_r(NULL, ",", &saveptr)) {
292 				param->record_mode = CALLCHAIN_LBR;
293 				ret = 0;
294 			} else
295 				pr_err("callchain: No more arguments "
296 					"needed for --call-graph lbr\n");
297 			break;
298 		} else {
299 			pr_err("callchain: Unknown --call-graph option "
300 			       "value: %s\n", arg);
301 			break;
302 		}
303 
304 	} while (0);
305 
306 	free(buf);
307 	return ret;
308 }
309 
310 int perf_callchain_config(const char *var, const char *value)
311 {
312 	char *endptr;
313 
314 	if (!strstarts(var, "call-graph."))
315 		return 0;
316 	var += sizeof("call-graph.") - 1;
317 
318 	if (!strcmp(var, "record-mode"))
319 		return parse_callchain_record_opt(value, &callchain_param);
320 	if (!strcmp(var, "dump-size")) {
321 		unsigned long size = 0;
322 		int ret;
323 
324 		ret = get_stack_size(value, &size);
325 		callchain_param.dump_size = size;
326 
327 		return ret;
328 	}
329 	if (!strcmp(var, "print-type")){
330 		int ret;
331 		ret = parse_callchain_mode(value);
332 		if (ret == -1)
333 			pr_err("Invalid callchain mode: %s\n", value);
334 		return ret;
335 	}
336 	if (!strcmp(var, "order")){
337 		int ret;
338 		ret = parse_callchain_order(value);
339 		if (ret == -1)
340 			pr_err("Invalid callchain order: %s\n", value);
341 		return ret;
342 	}
343 	if (!strcmp(var, "sort-key")){
344 		int ret;
345 		ret = parse_callchain_sort_key(value);
346 		if (ret == -1)
347 			pr_err("Invalid callchain sort key: %s\n", value);
348 		return ret;
349 	}
350 	if (!strcmp(var, "threshold")) {
351 		callchain_param.min_percent = strtod(value, &endptr);
352 		if (value == endptr) {
353 			pr_err("Invalid callchain threshold: %s\n", value);
354 			return -1;
355 		}
356 	}
357 	if (!strcmp(var, "print-limit")) {
358 		callchain_param.print_limit = strtod(value, &endptr);
359 		if (value == endptr) {
360 			pr_err("Invalid callchain print limit: %s\n", value);
361 			return -1;
362 		}
363 	}
364 
365 	return 0;
366 }
367 
368 static void
369 rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
370 		    enum chain_mode mode)
371 {
372 	struct rb_node **p = &root->rb_node;
373 	struct rb_node *parent = NULL;
374 	struct callchain_node *rnode;
375 	u64 chain_cumul = callchain_cumul_hits(chain);
376 
377 	while (*p) {
378 		u64 rnode_cumul;
379 
380 		parent = *p;
381 		rnode = rb_entry(parent, struct callchain_node, rb_node);
382 		rnode_cumul = callchain_cumul_hits(rnode);
383 
384 		switch (mode) {
385 		case CHAIN_FLAT:
386 		case CHAIN_FOLDED:
387 			if (rnode->hit < chain->hit)
388 				p = &(*p)->rb_left;
389 			else
390 				p = &(*p)->rb_right;
391 			break;
392 		case CHAIN_GRAPH_ABS: /* Falldown */
393 		case CHAIN_GRAPH_REL:
394 			if (rnode_cumul < chain_cumul)
395 				p = &(*p)->rb_left;
396 			else
397 				p = &(*p)->rb_right;
398 			break;
399 		case CHAIN_NONE:
400 		default:
401 			break;
402 		}
403 	}
404 
405 	rb_link_node(&chain->rb_node, parent, p);
406 	rb_insert_color(&chain->rb_node, root);
407 }
408 
409 static void
410 __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
411 		  u64 min_hit)
412 {
413 	struct rb_node *n;
414 	struct callchain_node *child;
415 
416 	n = rb_first(&node->rb_root_in);
417 	while (n) {
418 		child = rb_entry(n, struct callchain_node, rb_node_in);
419 		n = rb_next(n);
420 
421 		__sort_chain_flat(rb_root, child, min_hit);
422 	}
423 
424 	if (node->hit && node->hit >= min_hit)
425 		rb_insert_callchain(rb_root, node, CHAIN_FLAT);
426 }
427 
428 /*
429  * Once we get every callchains from the stream, we can now
430  * sort them by hit
431  */
432 static void
433 sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
434 		u64 min_hit, struct callchain_param *param __maybe_unused)
435 {
436 	*rb_root = RB_ROOT;
437 	__sort_chain_flat(rb_root, &root->node, min_hit);
438 }
439 
440 static void __sort_chain_graph_abs(struct callchain_node *node,
441 				   u64 min_hit)
442 {
443 	struct rb_node *n;
444 	struct callchain_node *child;
445 
446 	node->rb_root = RB_ROOT;
447 	n = rb_first(&node->rb_root_in);
448 
449 	while (n) {
450 		child = rb_entry(n, struct callchain_node, rb_node_in);
451 		n = rb_next(n);
452 
453 		__sort_chain_graph_abs(child, min_hit);
454 		if (callchain_cumul_hits(child) >= min_hit)
455 			rb_insert_callchain(&node->rb_root, child,
456 					    CHAIN_GRAPH_ABS);
457 	}
458 }
459 
460 static void
461 sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
462 		     u64 min_hit, struct callchain_param *param __maybe_unused)
463 {
464 	__sort_chain_graph_abs(&chain_root->node, min_hit);
465 	rb_root->rb_node = chain_root->node.rb_root.rb_node;
466 }
467 
468 static void __sort_chain_graph_rel(struct callchain_node *node,
469 				   double min_percent)
470 {
471 	struct rb_node *n;
472 	struct callchain_node *child;
473 	u64 min_hit;
474 
475 	node->rb_root = RB_ROOT;
476 	min_hit = ceil(node->children_hit * min_percent);
477 
478 	n = rb_first(&node->rb_root_in);
479 	while (n) {
480 		child = rb_entry(n, struct callchain_node, rb_node_in);
481 		n = rb_next(n);
482 
483 		__sort_chain_graph_rel(child, min_percent);
484 		if (callchain_cumul_hits(child) >= min_hit)
485 			rb_insert_callchain(&node->rb_root, child,
486 					    CHAIN_GRAPH_REL);
487 	}
488 }
489 
490 static void
491 sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
492 		     u64 min_hit __maybe_unused, struct callchain_param *param)
493 {
494 	__sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
495 	rb_root->rb_node = chain_root->node.rb_root.rb_node;
496 }
497 
498 int callchain_register_param(struct callchain_param *param)
499 {
500 	switch (param->mode) {
501 	case CHAIN_GRAPH_ABS:
502 		param->sort = sort_chain_graph_abs;
503 		break;
504 	case CHAIN_GRAPH_REL:
505 		param->sort = sort_chain_graph_rel;
506 		break;
507 	case CHAIN_FLAT:
508 	case CHAIN_FOLDED:
509 		param->sort = sort_chain_flat;
510 		break;
511 	case CHAIN_NONE:
512 	default:
513 		return -1;
514 	}
515 	return 0;
516 }
517 
518 /*
519  * Create a child for a parent. If inherit_children, then the new child
520  * will become the new parent of it's parent children
521  */
522 static struct callchain_node *
523 create_child(struct callchain_node *parent, bool inherit_children)
524 {
525 	struct callchain_node *new;
526 
527 	new = zalloc(sizeof(*new));
528 	if (!new) {
529 		perror("not enough memory to create child for code path tree");
530 		return NULL;
531 	}
532 	new->parent = parent;
533 	INIT_LIST_HEAD(&new->val);
534 	INIT_LIST_HEAD(&new->parent_val);
535 
536 	if (inherit_children) {
537 		struct rb_node *n;
538 		struct callchain_node *child;
539 
540 		new->rb_root_in = parent->rb_root_in;
541 		parent->rb_root_in = RB_ROOT;
542 
543 		n = rb_first(&new->rb_root_in);
544 		while (n) {
545 			child = rb_entry(n, struct callchain_node, rb_node_in);
546 			child->parent = new;
547 			n = rb_next(n);
548 		}
549 
550 		/* make it the first child */
551 		rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node);
552 		rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
553 	}
554 
555 	return new;
556 }
557 
558 
559 /*
560  * Fill the node with callchain values
561  */
562 static int
563 fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
564 {
565 	struct callchain_cursor_node *cursor_node;
566 
567 	node->val_nr = cursor->nr - cursor->pos;
568 	if (!node->val_nr)
569 		pr_warning("Warning: empty node in callchain tree\n");
570 
571 	cursor_node = callchain_cursor_current(cursor);
572 
573 	while (cursor_node) {
574 		struct callchain_list *call;
575 
576 		call = zalloc(sizeof(*call));
577 		if (!call) {
578 			perror("not enough memory for the code path tree");
579 			return -1;
580 		}
581 		call->ip = cursor_node->ip;
582 		call->ms.sym = cursor_node->sym;
583 		call->ms.map = map__get(cursor_node->map);
584 		call->srcline = cursor_node->srcline;
585 
586 		if (cursor_node->branch) {
587 			call->branch_count = 1;
588 
589 			if (cursor_node->branch_from) {
590 				/*
591 				 * branch_from is set with value somewhere else
592 				 * to imply it's "to" of a branch.
593 				 */
594 				call->brtype_stat.branch_to = true;
595 
596 				if (cursor_node->branch_flags.predicted)
597 					call->predicted_count = 1;
598 
599 				if (cursor_node->branch_flags.abort)
600 					call->abort_count = 1;
601 
602 				branch_type_count(&call->brtype_stat,
603 						  &cursor_node->branch_flags,
604 						  cursor_node->branch_from,
605 						  cursor_node->ip);
606 			} else {
607 				/*
608 				 * It's "from" of a branch
609 				 */
610 				call->brtype_stat.branch_to = false;
611 				call->cycles_count =
612 					cursor_node->branch_flags.cycles;
613 				call->iter_count = cursor_node->nr_loop_iter;
614 				call->iter_cycles = cursor_node->iter_cycles;
615 			}
616 		}
617 
618 		list_add_tail(&call->list, &node->val);
619 
620 		callchain_cursor_advance(cursor);
621 		cursor_node = callchain_cursor_current(cursor);
622 	}
623 	return 0;
624 }
625 
626 static struct callchain_node *
627 add_child(struct callchain_node *parent,
628 	  struct callchain_cursor *cursor,
629 	  u64 period)
630 {
631 	struct callchain_node *new;
632 
633 	new = create_child(parent, false);
634 	if (new == NULL)
635 		return NULL;
636 
637 	if (fill_node(new, cursor) < 0) {
638 		struct callchain_list *call, *tmp;
639 
640 		list_for_each_entry_safe(call, tmp, &new->val, list) {
641 			list_del_init(&call->list);
642 			map__zput(call->ms.map);
643 			free(call);
644 		}
645 		free(new);
646 		return NULL;
647 	}
648 
649 	new->children_hit = 0;
650 	new->hit = period;
651 	new->children_count = 0;
652 	new->count = 1;
653 	return new;
654 }
655 
656 enum match_result {
657 	MATCH_ERROR  = -1,
658 	MATCH_EQ,
659 	MATCH_LT,
660 	MATCH_GT,
661 };
662 
663 static enum match_result match_chain_strings(const char *left,
664 					     const char *right)
665 {
666 	enum match_result ret = MATCH_EQ;
667 	int cmp;
668 
669 	if (left && right)
670 		cmp = strcmp(left, right);
671 	else if (!left && right)
672 		cmp = 1;
673 	else if (left && !right)
674 		cmp = -1;
675 	else
676 		return MATCH_ERROR;
677 
678 	if (cmp != 0)
679 		ret = cmp < 0 ? MATCH_LT : MATCH_GT;
680 
681 	return ret;
682 }
683 
684 /*
685  * We need to always use relative addresses because we're aggregating
686  * callchains from multiple threads, i.e. different address spaces, so
687  * comparing absolute addresses make no sense as a symbol in a DSO may end up
688  * in a different address when used in a different binary or even the same
689  * binary but with some sort of address randomization technique, thus we need
690  * to compare just relative addresses. -acme
691  */
692 static enum match_result match_chain_dso_addresses(struct map *left_map, u64 left_ip,
693 						   struct map *right_map, u64 right_ip)
694 {
695 	struct dso *left_dso = left_map ? left_map->dso : NULL;
696 	struct dso *right_dso = right_map ? right_map->dso : NULL;
697 
698 	if (left_dso != right_dso)
699 		return left_dso < right_dso ? MATCH_LT : MATCH_GT;
700 
701 	if (left_ip != right_ip)
702  		return left_ip < right_ip ? MATCH_LT : MATCH_GT;
703 
704 	return MATCH_EQ;
705 }
706 
707 static enum match_result match_chain(struct callchain_cursor_node *node,
708 				     struct callchain_list *cnode)
709 {
710 	enum match_result match = MATCH_ERROR;
711 
712 	switch (callchain_param.key) {
713 	case CCKEY_SRCLINE:
714 		match = match_chain_strings(cnode->srcline, node->srcline);
715 		if (match != MATCH_ERROR)
716 			break;
717 		/* otherwise fall-back to symbol-based comparison below */
718 		__fallthrough;
719 	case CCKEY_FUNCTION:
720 		if (node->sym && cnode->ms.sym) {
721 			/*
722 			 * Compare inlined frames based on their symbol name
723 			 * because different inlined frames will have the same
724 			 * symbol start. Otherwise do a faster comparison based
725 			 * on the symbol start address.
726 			 */
727 			if (cnode->ms.sym->inlined || node->sym->inlined) {
728 				match = match_chain_strings(cnode->ms.sym->name,
729 							    node->sym->name);
730 				if (match != MATCH_ERROR)
731 					break;
732 			} else {
733 				match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start,
734 								  node->map, node->sym->start);
735 				break;
736 			}
737 		}
738 		/* otherwise fall-back to IP-based comparison below */
739 		__fallthrough;
740 	case CCKEY_ADDRESS:
741 	default:
742 		match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->map, node->ip);
743 		break;
744 	}
745 
746 	if (match == MATCH_EQ && node->branch) {
747 		cnode->branch_count++;
748 
749 		if (node->branch_from) {
750 			/*
751 			 * It's "to" of a branch
752 			 */
753 			cnode->brtype_stat.branch_to = true;
754 
755 			if (node->branch_flags.predicted)
756 				cnode->predicted_count++;
757 
758 			if (node->branch_flags.abort)
759 				cnode->abort_count++;
760 
761 			branch_type_count(&cnode->brtype_stat,
762 					  &node->branch_flags,
763 					  node->branch_from,
764 					  node->ip);
765 		} else {
766 			/*
767 			 * It's "from" of a branch
768 			 */
769 			cnode->brtype_stat.branch_to = false;
770 			cnode->cycles_count += node->branch_flags.cycles;
771 			cnode->iter_count += node->nr_loop_iter;
772 			cnode->iter_cycles += node->iter_cycles;
773 			cnode->from_count++;
774 		}
775 	}
776 
777 	return match;
778 }
779 
780 /*
781  * Split the parent in two parts (a new child is created) and
782  * give a part of its callchain to the created child.
783  * Then create another child to host the given callchain of new branch
784  */
785 static int
786 split_add_child(struct callchain_node *parent,
787 		struct callchain_cursor *cursor,
788 		struct callchain_list *to_split,
789 		u64 idx_parents, u64 idx_local, u64 period)
790 {
791 	struct callchain_node *new;
792 	struct list_head *old_tail;
793 	unsigned int idx_total = idx_parents + idx_local;
794 
795 	/* split */
796 	new = create_child(parent, true);
797 	if (new == NULL)
798 		return -1;
799 
800 	/* split the callchain and move a part to the new child */
801 	old_tail = parent->val.prev;
802 	list_del_range(&to_split->list, old_tail);
803 	new->val.next = &to_split->list;
804 	new->val.prev = old_tail;
805 	to_split->list.prev = &new->val;
806 	old_tail->next = &new->val;
807 
808 	/* split the hits */
809 	new->hit = parent->hit;
810 	new->children_hit = parent->children_hit;
811 	parent->children_hit = callchain_cumul_hits(new);
812 	new->val_nr = parent->val_nr - idx_local;
813 	parent->val_nr = idx_local;
814 	new->count = parent->count;
815 	new->children_count = parent->children_count;
816 	parent->children_count = callchain_cumul_counts(new);
817 
818 	/* create a new child for the new branch if any */
819 	if (idx_total < cursor->nr) {
820 		struct callchain_node *first;
821 		struct callchain_list *cnode;
822 		struct callchain_cursor_node *node;
823 		struct rb_node *p, **pp;
824 
825 		parent->hit = 0;
826 		parent->children_hit += period;
827 		parent->count = 0;
828 		parent->children_count += 1;
829 
830 		node = callchain_cursor_current(cursor);
831 		new = add_child(parent, cursor, period);
832 		if (new == NULL)
833 			return -1;
834 
835 		/*
836 		 * This is second child since we moved parent's children
837 		 * to new (first) child above.
838 		 */
839 		p = parent->rb_root_in.rb_node;
840 		first = rb_entry(p, struct callchain_node, rb_node_in);
841 		cnode = list_first_entry(&first->val, struct callchain_list,
842 					 list);
843 
844 		if (match_chain(node, cnode) == MATCH_LT)
845 			pp = &p->rb_left;
846 		else
847 			pp = &p->rb_right;
848 
849 		rb_link_node(&new->rb_node_in, p, pp);
850 		rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
851 	} else {
852 		parent->hit = period;
853 		parent->count = 1;
854 	}
855 	return 0;
856 }
857 
858 static enum match_result
859 append_chain(struct callchain_node *root,
860 	     struct callchain_cursor *cursor,
861 	     u64 period);
862 
863 static int
864 append_chain_children(struct callchain_node *root,
865 		      struct callchain_cursor *cursor,
866 		      u64 period)
867 {
868 	struct callchain_node *rnode;
869 	struct callchain_cursor_node *node;
870 	struct rb_node **p = &root->rb_root_in.rb_node;
871 	struct rb_node *parent = NULL;
872 
873 	node = callchain_cursor_current(cursor);
874 	if (!node)
875 		return -1;
876 
877 	/* lookup in childrens */
878 	while (*p) {
879 		enum match_result ret;
880 
881 		parent = *p;
882 		rnode = rb_entry(parent, struct callchain_node, rb_node_in);
883 
884 		/* If at least first entry matches, rely to children */
885 		ret = append_chain(rnode, cursor, period);
886 		if (ret == MATCH_EQ)
887 			goto inc_children_hit;
888 		if (ret == MATCH_ERROR)
889 			return -1;
890 
891 		if (ret == MATCH_LT)
892 			p = &parent->rb_left;
893 		else
894 			p = &parent->rb_right;
895 	}
896 	/* nothing in children, add to the current node */
897 	rnode = add_child(root, cursor, period);
898 	if (rnode == NULL)
899 		return -1;
900 
901 	rb_link_node(&rnode->rb_node_in, parent, p);
902 	rb_insert_color(&rnode->rb_node_in, &root->rb_root_in);
903 
904 inc_children_hit:
905 	root->children_hit += period;
906 	root->children_count++;
907 	return 0;
908 }
909 
910 static enum match_result
911 append_chain(struct callchain_node *root,
912 	     struct callchain_cursor *cursor,
913 	     u64 period)
914 {
915 	struct callchain_list *cnode;
916 	u64 start = cursor->pos;
917 	bool found = false;
918 	u64 matches;
919 	enum match_result cmp = MATCH_ERROR;
920 
921 	/*
922 	 * Lookup in the current node
923 	 * If we have a symbol, then compare the start to match
924 	 * anywhere inside a function, unless function
925 	 * mode is disabled.
926 	 */
927 	list_for_each_entry(cnode, &root->val, list) {
928 		struct callchain_cursor_node *node;
929 
930 		node = callchain_cursor_current(cursor);
931 		if (!node)
932 			break;
933 
934 		cmp = match_chain(node, cnode);
935 		if (cmp != MATCH_EQ)
936 			break;
937 
938 		found = true;
939 
940 		callchain_cursor_advance(cursor);
941 	}
942 
943 	/* matches not, relay no the parent */
944 	if (!found) {
945 		WARN_ONCE(cmp == MATCH_ERROR, "Chain comparison error\n");
946 		return cmp;
947 	}
948 
949 	matches = cursor->pos - start;
950 
951 	/* we match only a part of the node. Split it and add the new chain */
952 	if (matches < root->val_nr) {
953 		if (split_add_child(root, cursor, cnode, start, matches,
954 				    period) < 0)
955 			return MATCH_ERROR;
956 
957 		return MATCH_EQ;
958 	}
959 
960 	/* we match 100% of the path, increment the hit */
961 	if (matches == root->val_nr && cursor->pos == cursor->nr) {
962 		root->hit += period;
963 		root->count++;
964 		return MATCH_EQ;
965 	}
966 
967 	/* We match the node and still have a part remaining */
968 	if (append_chain_children(root, cursor, period) < 0)
969 		return MATCH_ERROR;
970 
971 	return MATCH_EQ;
972 }
973 
974 int callchain_append(struct callchain_root *root,
975 		     struct callchain_cursor *cursor,
976 		     u64 period)
977 {
978 	if (!cursor->nr)
979 		return 0;
980 
981 	callchain_cursor_commit(cursor);
982 
983 	if (append_chain_children(&root->node, cursor, period) < 0)
984 		return -1;
985 
986 	if (cursor->nr > root->max_depth)
987 		root->max_depth = cursor->nr;
988 
989 	return 0;
990 }
991 
992 static int
993 merge_chain_branch(struct callchain_cursor *cursor,
994 		   struct callchain_node *dst, struct callchain_node *src)
995 {
996 	struct callchain_cursor_node **old_last = cursor->last;
997 	struct callchain_node *child;
998 	struct callchain_list *list, *next_list;
999 	struct rb_node *n;
1000 	int old_pos = cursor->nr;
1001 	int err = 0;
1002 
1003 	list_for_each_entry_safe(list, next_list, &src->val, list) {
1004 		callchain_cursor_append(cursor, list->ip,
1005 					list->ms.map, list->ms.sym,
1006 					false, NULL, 0, 0, 0, list->srcline);
1007 		list_del_init(&list->list);
1008 		map__zput(list->ms.map);
1009 		free(list);
1010 	}
1011 
1012 	if (src->hit) {
1013 		callchain_cursor_commit(cursor);
1014 		if (append_chain_children(dst, cursor, src->hit) < 0)
1015 			return -1;
1016 	}
1017 
1018 	n = rb_first(&src->rb_root_in);
1019 	while (n) {
1020 		child = container_of(n, struct callchain_node, rb_node_in);
1021 		n = rb_next(n);
1022 		rb_erase(&child->rb_node_in, &src->rb_root_in);
1023 
1024 		err = merge_chain_branch(cursor, dst, child);
1025 		if (err)
1026 			break;
1027 
1028 		free(child);
1029 	}
1030 
1031 	cursor->nr = old_pos;
1032 	cursor->last = old_last;
1033 
1034 	return err;
1035 }
1036 
1037 int callchain_merge(struct callchain_cursor *cursor,
1038 		    struct callchain_root *dst, struct callchain_root *src)
1039 {
1040 	return merge_chain_branch(cursor, &dst->node, &src->node);
1041 }
1042 
1043 int callchain_cursor_append(struct callchain_cursor *cursor,
1044 			    u64 ip, struct map *map, struct symbol *sym,
1045 			    bool branch, struct branch_flags *flags,
1046 			    int nr_loop_iter, u64 iter_cycles, u64 branch_from,
1047 			    const char *srcline)
1048 {
1049 	struct callchain_cursor_node *node = *cursor->last;
1050 
1051 	if (!node) {
1052 		node = calloc(1, sizeof(*node));
1053 		if (!node)
1054 			return -ENOMEM;
1055 
1056 		*cursor->last = node;
1057 	}
1058 
1059 	node->ip = ip;
1060 	map__zput(node->map);
1061 	node->map = map__get(map);
1062 	node->sym = sym;
1063 	node->branch = branch;
1064 	node->nr_loop_iter = nr_loop_iter;
1065 	node->iter_cycles = iter_cycles;
1066 	node->srcline = srcline;
1067 
1068 	if (flags)
1069 		memcpy(&node->branch_flags, flags,
1070 			sizeof(struct branch_flags));
1071 
1072 	node->branch_from = branch_from;
1073 	cursor->nr++;
1074 
1075 	cursor->last = &node->next;
1076 
1077 	return 0;
1078 }
1079 
1080 int sample__resolve_callchain(struct perf_sample *sample,
1081 			      struct callchain_cursor *cursor, struct symbol **parent,
1082 			      struct evsel *evsel, struct addr_location *al,
1083 			      int max_stack)
1084 {
1085 	if (sample->callchain == NULL && !symbol_conf.show_branchflag_count)
1086 		return 0;
1087 
1088 	if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
1089 	    perf_hpp_list.parent || symbol_conf.show_branchflag_count) {
1090 		return thread__resolve_callchain(al->thread, cursor, evsel, sample,
1091 						 parent, al, max_stack);
1092 	}
1093 	return 0;
1094 }
1095 
1096 int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
1097 {
1098 	if ((!symbol_conf.use_callchain || sample->callchain == NULL) &&
1099 		!symbol_conf.show_branchflag_count)
1100 		return 0;
1101 	return callchain_append(he->callchain, &callchain_cursor, sample->period);
1102 }
1103 
1104 int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
1105 			bool hide_unresolved)
1106 {
1107 	al->map = node->map;
1108 	al->sym = node->sym;
1109 	al->srcline = node->srcline;
1110 	al->addr = node->ip;
1111 
1112 	if (al->sym == NULL) {
1113 		if (hide_unresolved)
1114 			return 0;
1115 		if (al->map == NULL)
1116 			goto out;
1117 	}
1118 
1119 	if (al->map->groups == &al->machine->kmaps) {
1120 		if (machine__is_host(al->machine)) {
1121 			al->cpumode = PERF_RECORD_MISC_KERNEL;
1122 			al->level = 'k';
1123 		} else {
1124 			al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
1125 			al->level = 'g';
1126 		}
1127 	} else {
1128 		if (machine__is_host(al->machine)) {
1129 			al->cpumode = PERF_RECORD_MISC_USER;
1130 			al->level = '.';
1131 		} else if (perf_guest) {
1132 			al->cpumode = PERF_RECORD_MISC_GUEST_USER;
1133 			al->level = 'u';
1134 		} else {
1135 			al->cpumode = PERF_RECORD_MISC_HYPERVISOR;
1136 			al->level = 'H';
1137 		}
1138 	}
1139 
1140 out:
1141 	return 1;
1142 }
1143 
1144 char *callchain_list__sym_name(struct callchain_list *cl,
1145 			       char *bf, size_t bfsize, bool show_dso)
1146 {
1147 	bool show_addr = callchain_param.key == CCKEY_ADDRESS;
1148 	bool show_srcline = show_addr || callchain_param.key == CCKEY_SRCLINE;
1149 	int printed;
1150 
1151 	if (cl->ms.sym) {
1152 		const char *inlined = cl->ms.sym->inlined ? " (inlined)" : "";
1153 
1154 		if (show_srcline && cl->srcline)
1155 			printed = scnprintf(bf, bfsize, "%s %s%s",
1156 					    cl->ms.sym->name, cl->srcline,
1157 					    inlined);
1158 		else
1159 			printed = scnprintf(bf, bfsize, "%s%s",
1160 					    cl->ms.sym->name, inlined);
1161 	} else
1162 		printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
1163 
1164 	if (show_dso)
1165 		scnprintf(bf + printed, bfsize - printed, " %s",
1166 			  cl->ms.map ?
1167 			  cl->ms.map->dso->short_name :
1168 			  "unknown");
1169 
1170 	return bf;
1171 }
1172 
1173 char *callchain_node__scnprintf_value(struct callchain_node *node,
1174 				      char *bf, size_t bfsize, u64 total)
1175 {
1176 	double percent = 0.0;
1177 	u64 period = callchain_cumul_hits(node);
1178 	unsigned count = callchain_cumul_counts(node);
1179 
1180 	if (callchain_param.mode == CHAIN_FOLDED) {
1181 		period = node->hit;
1182 		count = node->count;
1183 	}
1184 
1185 	switch (callchain_param.value) {
1186 	case CCVAL_PERIOD:
1187 		scnprintf(bf, bfsize, "%"PRIu64, period);
1188 		break;
1189 	case CCVAL_COUNT:
1190 		scnprintf(bf, bfsize, "%u", count);
1191 		break;
1192 	case CCVAL_PERCENT:
1193 	default:
1194 		if (total)
1195 			percent = period * 100.0 / total;
1196 		scnprintf(bf, bfsize, "%.2f%%", percent);
1197 		break;
1198 	}
1199 	return bf;
1200 }
1201 
1202 int callchain_node__fprintf_value(struct callchain_node *node,
1203 				 FILE *fp, u64 total)
1204 {
1205 	double percent = 0.0;
1206 	u64 period = callchain_cumul_hits(node);
1207 	unsigned count = callchain_cumul_counts(node);
1208 
1209 	if (callchain_param.mode == CHAIN_FOLDED) {
1210 		period = node->hit;
1211 		count = node->count;
1212 	}
1213 
1214 	switch (callchain_param.value) {
1215 	case CCVAL_PERIOD:
1216 		return fprintf(fp, "%"PRIu64, period);
1217 	case CCVAL_COUNT:
1218 		return fprintf(fp, "%u", count);
1219 	case CCVAL_PERCENT:
1220 	default:
1221 		if (total)
1222 			percent = period * 100.0 / total;
1223 		return percent_color_fprintf(fp, "%.2f%%", percent);
1224 	}
1225 	return 0;
1226 }
1227 
1228 static void callchain_counts_value(struct callchain_node *node,
1229 				   u64 *branch_count, u64 *predicted_count,
1230 				   u64 *abort_count, u64 *cycles_count)
1231 {
1232 	struct callchain_list *clist;
1233 
1234 	list_for_each_entry(clist, &node->val, list) {
1235 		if (branch_count)
1236 			*branch_count += clist->branch_count;
1237 
1238 		if (predicted_count)
1239 			*predicted_count += clist->predicted_count;
1240 
1241 		if (abort_count)
1242 			*abort_count += clist->abort_count;
1243 
1244 		if (cycles_count)
1245 			*cycles_count += clist->cycles_count;
1246 	}
1247 }
1248 
1249 static int callchain_node_branch_counts_cumul(struct callchain_node *node,
1250 					      u64 *branch_count,
1251 					      u64 *predicted_count,
1252 					      u64 *abort_count,
1253 					      u64 *cycles_count)
1254 {
1255 	struct callchain_node *child;
1256 	struct rb_node *n;
1257 
1258 	n = rb_first(&node->rb_root_in);
1259 	while (n) {
1260 		child = rb_entry(n, struct callchain_node, rb_node_in);
1261 		n = rb_next(n);
1262 
1263 		callchain_node_branch_counts_cumul(child, branch_count,
1264 						   predicted_count,
1265 						   abort_count,
1266 						   cycles_count);
1267 
1268 		callchain_counts_value(child, branch_count,
1269 				       predicted_count, abort_count,
1270 				       cycles_count);
1271 	}
1272 
1273 	return 0;
1274 }
1275 
1276 int callchain_branch_counts(struct callchain_root *root,
1277 			    u64 *branch_count, u64 *predicted_count,
1278 			    u64 *abort_count, u64 *cycles_count)
1279 {
1280 	if (branch_count)
1281 		*branch_count = 0;
1282 
1283 	if (predicted_count)
1284 		*predicted_count = 0;
1285 
1286 	if (abort_count)
1287 		*abort_count = 0;
1288 
1289 	if (cycles_count)
1290 		*cycles_count = 0;
1291 
1292 	return callchain_node_branch_counts_cumul(&root->node,
1293 						  branch_count,
1294 						  predicted_count,
1295 						  abort_count,
1296 						  cycles_count);
1297 }
1298 
1299 static int count_pri64_printf(int idx, const char *str, u64 value, char *bf, int bfsize)
1300 {
1301 	int printed;
1302 
1303 	printed = scnprintf(bf, bfsize, "%s%s:%" PRId64 "", (idx) ? " " : " (", str, value);
1304 
1305 	return printed;
1306 }
1307 
1308 static int count_float_printf(int idx, const char *str, float value,
1309 			      char *bf, int bfsize, float threshold)
1310 {
1311 	int printed;
1312 
1313 	if (threshold != 0.0 && value < threshold)
1314 		return 0;
1315 
1316 	printed = scnprintf(bf, bfsize, "%s%s:%.1f%%", (idx) ? " " : " (", str, value);
1317 
1318 	return printed;
1319 }
1320 
1321 static int branch_to_str(char *bf, int bfsize,
1322 			 u64 branch_count, u64 predicted_count,
1323 			 u64 abort_count,
1324 			 struct branch_type_stat *brtype_stat)
1325 {
1326 	int printed, i = 0;
1327 
1328 	printed = branch_type_str(brtype_stat, bf, bfsize);
1329 	if (printed)
1330 		i++;
1331 
1332 	if (predicted_count < branch_count) {
1333 		printed += count_float_printf(i++, "predicted",
1334 				predicted_count * 100.0 / branch_count,
1335 				bf + printed, bfsize - printed, 0.0);
1336 	}
1337 
1338 	if (abort_count) {
1339 		printed += count_float_printf(i++, "abort",
1340 				abort_count * 100.0 / branch_count,
1341 				bf + printed, bfsize - printed, 0.1);
1342 	}
1343 
1344 	if (i)
1345 		printed += scnprintf(bf + printed, bfsize - printed, ")");
1346 
1347 	return printed;
1348 }
1349 
1350 static int branch_from_str(char *bf, int bfsize,
1351 			   u64 branch_count,
1352 			   u64 cycles_count, u64 iter_count,
1353 			   u64 iter_cycles, u64 from_count)
1354 {
1355 	int printed = 0, i = 0;
1356 	u64 cycles, v = 0;
1357 
1358 	cycles = cycles_count / branch_count;
1359 	if (cycles) {
1360 		printed += count_pri64_printf(i++, "cycles",
1361 				cycles,
1362 				bf + printed, bfsize - printed);
1363 	}
1364 
1365 	if (iter_count && from_count) {
1366 		v = iter_count / from_count;
1367 		if (v) {
1368 			printed += count_pri64_printf(i++, "iter",
1369 					v, bf + printed, bfsize - printed);
1370 
1371 			printed += count_pri64_printf(i++, "avg_cycles",
1372 					iter_cycles / iter_count,
1373 					bf + printed, bfsize - printed);
1374 		}
1375 	}
1376 
1377 	if (i)
1378 		printed += scnprintf(bf + printed, bfsize - printed, ")");
1379 
1380 	return printed;
1381 }
1382 
1383 static int counts_str_build(char *bf, int bfsize,
1384 			     u64 branch_count, u64 predicted_count,
1385 			     u64 abort_count, u64 cycles_count,
1386 			     u64 iter_count, u64 iter_cycles,
1387 			     u64 from_count,
1388 			     struct branch_type_stat *brtype_stat)
1389 {
1390 	int printed;
1391 
1392 	if (branch_count == 0)
1393 		return scnprintf(bf, bfsize, " (calltrace)");
1394 
1395 	if (brtype_stat->branch_to) {
1396 		printed = branch_to_str(bf, bfsize, branch_count,
1397 				predicted_count, abort_count, brtype_stat);
1398 	} else {
1399 		printed = branch_from_str(bf, bfsize, branch_count,
1400 				cycles_count, iter_count, iter_cycles,
1401 				from_count);
1402 	}
1403 
1404 	if (!printed)
1405 		bf[0] = 0;
1406 
1407 	return printed;
1408 }
1409 
1410 static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
1411 				   u64 branch_count, u64 predicted_count,
1412 				   u64 abort_count, u64 cycles_count,
1413 				   u64 iter_count, u64 iter_cycles,
1414 				   u64 from_count,
1415 				   struct branch_type_stat *brtype_stat)
1416 {
1417 	char str[256];
1418 
1419 	counts_str_build(str, sizeof(str), branch_count,
1420 			 predicted_count, abort_count, cycles_count,
1421 			 iter_count, iter_cycles, from_count, brtype_stat);
1422 
1423 	if (fp)
1424 		return fprintf(fp, "%s", str);
1425 
1426 	return scnprintf(bf, bfsize, "%s", str);
1427 }
1428 
1429 int callchain_list_counts__printf_value(struct callchain_list *clist,
1430 					FILE *fp, char *bf, int bfsize)
1431 {
1432 	u64 branch_count, predicted_count;
1433 	u64 abort_count, cycles_count;
1434 	u64 iter_count, iter_cycles;
1435 	u64 from_count;
1436 
1437 	branch_count = clist->branch_count;
1438 	predicted_count = clist->predicted_count;
1439 	abort_count = clist->abort_count;
1440 	cycles_count = clist->cycles_count;
1441 	iter_count = clist->iter_count;
1442 	iter_cycles = clist->iter_cycles;
1443 	from_count = clist->from_count;
1444 
1445 	return callchain_counts_printf(fp, bf, bfsize, branch_count,
1446 				       predicted_count, abort_count,
1447 				       cycles_count, iter_count, iter_cycles,
1448 				       from_count, &clist->brtype_stat);
1449 }
1450 
1451 static void free_callchain_node(struct callchain_node *node)
1452 {
1453 	struct callchain_list *list, *tmp;
1454 	struct callchain_node *child;
1455 	struct rb_node *n;
1456 
1457 	list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
1458 		list_del_init(&list->list);
1459 		map__zput(list->ms.map);
1460 		free(list);
1461 	}
1462 
1463 	list_for_each_entry_safe(list, tmp, &node->val, list) {
1464 		list_del_init(&list->list);
1465 		map__zput(list->ms.map);
1466 		free(list);
1467 	}
1468 
1469 	n = rb_first(&node->rb_root_in);
1470 	while (n) {
1471 		child = container_of(n, struct callchain_node, rb_node_in);
1472 		n = rb_next(n);
1473 		rb_erase(&child->rb_node_in, &node->rb_root_in);
1474 
1475 		free_callchain_node(child);
1476 		free(child);
1477 	}
1478 }
1479 
1480 void free_callchain(struct callchain_root *root)
1481 {
1482 	if (!symbol_conf.use_callchain)
1483 		return;
1484 
1485 	free_callchain_node(&root->node);
1486 }
1487 
1488 static u64 decay_callchain_node(struct callchain_node *node)
1489 {
1490 	struct callchain_node *child;
1491 	struct rb_node *n;
1492 	u64 child_hits = 0;
1493 
1494 	n = rb_first(&node->rb_root_in);
1495 	while (n) {
1496 		child = container_of(n, struct callchain_node, rb_node_in);
1497 
1498 		child_hits += decay_callchain_node(child);
1499 		n = rb_next(n);
1500 	}
1501 
1502 	node->hit = (node->hit * 7) / 8;
1503 	node->children_hit = child_hits;
1504 
1505 	return node->hit;
1506 }
1507 
1508 void decay_callchain(struct callchain_root *root)
1509 {
1510 	if (!symbol_conf.use_callchain)
1511 		return;
1512 
1513 	decay_callchain_node(&root->node);
1514 }
1515 
1516 int callchain_node__make_parent_list(struct callchain_node *node)
1517 {
1518 	struct callchain_node *parent = node->parent;
1519 	struct callchain_list *chain, *new;
1520 	LIST_HEAD(head);
1521 
1522 	while (parent) {
1523 		list_for_each_entry_reverse(chain, &parent->val, list) {
1524 			new = malloc(sizeof(*new));
1525 			if (new == NULL)
1526 				goto out;
1527 			*new = *chain;
1528 			new->has_children = false;
1529 			map__get(new->ms.map);
1530 			list_add_tail(&new->list, &head);
1531 		}
1532 		parent = parent->parent;
1533 	}
1534 
1535 	list_for_each_entry_safe_reverse(chain, new, &head, list)
1536 		list_move_tail(&chain->list, &node->parent_val);
1537 
1538 	if (!list_empty(&node->parent_val)) {
1539 		chain = list_first_entry(&node->parent_val, struct callchain_list, list);
1540 		chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node);
1541 
1542 		chain = list_first_entry(&node->val, struct callchain_list, list);
1543 		chain->has_children = false;
1544 	}
1545 	return 0;
1546 
1547 out:
1548 	list_for_each_entry_safe(chain, new, &head, list) {
1549 		list_del_init(&chain->list);
1550 		map__zput(chain->ms.map);
1551 		free(chain);
1552 	}
1553 	return -ENOMEM;
1554 }
1555 
1556 int callchain_cursor__copy(struct callchain_cursor *dst,
1557 			   struct callchain_cursor *src)
1558 {
1559 	int rc = 0;
1560 
1561 	callchain_cursor_reset(dst);
1562 	callchain_cursor_commit(src);
1563 
1564 	while (true) {
1565 		struct callchain_cursor_node *node;
1566 
1567 		node = callchain_cursor_current(src);
1568 		if (node == NULL)
1569 			break;
1570 
1571 		rc = callchain_cursor_append(dst, node->ip, node->map, node->sym,
1572 					     node->branch, &node->branch_flags,
1573 					     node->nr_loop_iter,
1574 					     node->iter_cycles,
1575 					     node->branch_from, node->srcline);
1576 		if (rc)
1577 			break;
1578 
1579 		callchain_cursor_advance(src);
1580 	}
1581 
1582 	return rc;
1583 }
1584 
1585 /*
1586  * Initialize a cursor before adding entries inside, but keep
1587  * the previously allocated entries as a cache.
1588  */
1589 void callchain_cursor_reset(struct callchain_cursor *cursor)
1590 {
1591 	struct callchain_cursor_node *node;
1592 
1593 	cursor->nr = 0;
1594 	cursor->last = &cursor->first;
1595 
1596 	for (node = cursor->first; node != NULL; node = node->next)
1597 		map__zput(node->map);
1598 }
1599