xref: /linux/tools/perf/ui/stdio/hist.c (revision 92ce4c3ea7c44e61ca2b6ef3e5682bfcea851d87)
1 #include <stdio.h>
2 #include <linux/string.h>
3 
4 #include "../../util/util.h"
5 #include "../../util/hist.h"
6 #include "../../util/sort.h"
7 #include "../../util/evsel.h"
8 #include "../../util/srcline.h"
9 #include "../../util/string2.h"
10 #include "../../util/thread.h"
11 #include "../../util/sane_ctype.h"
12 
13 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
14 {
15 	int i;
16 	int ret = fprintf(fp, "            ");
17 
18 	for (i = 0; i < left_margin; i++)
19 		ret += fprintf(fp, " ");
20 
21 	return ret;
22 }
23 
24 static size_t inline__fprintf(struct map *map, u64 ip, int left_margin,
25 			      int depth, int depth_mask, FILE *fp)
26 {
27 	struct dso *dso;
28 	struct inline_node *node;
29 	struct inline_list *ilist;
30 	int ret = 0, i;
31 
32 	if (map == NULL)
33 		return 0;
34 
35 	dso = map->dso;
36 	if (dso == NULL)
37 		return 0;
38 
39 	node = dso__parse_addr_inlines(dso,
40 				       map__rip_2objdump(map, ip));
41 	if (node == NULL)
42 		return 0;
43 
44 	list_for_each_entry(ilist, &node->val, list) {
45 		if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
46 			ret += callchain__fprintf_left_margin(fp, left_margin);
47 
48 			for (i = 0; i < depth; i++) {
49 				if (depth_mask & (1 << i))
50 					ret += fprintf(fp, "|");
51 				else
52 					ret += fprintf(fp, " ");
53 				ret += fprintf(fp, "          ");
54 			}
55 
56 			if (callchain_param.key == CCKEY_ADDRESS ||
57 			    callchain_param.key == CCKEY_SRCLINE) {
58 				if (ilist->filename != NULL)
59 					ret += fprintf(fp, "%s:%d (inline)",
60 						       ilist->filename,
61 						       ilist->line_nr);
62 				else
63 					ret += fprintf(fp, "??");
64 			} else if (ilist->funcname != NULL)
65 				ret += fprintf(fp, "%s (inline)",
66 					       ilist->funcname);
67 			else if (ilist->filename != NULL)
68 				ret += fprintf(fp, "%s:%d (inline)",
69 					       ilist->filename,
70 					       ilist->line_nr);
71 			else
72 				ret += fprintf(fp, "??");
73 
74 			ret += fprintf(fp, "\n");
75 		}
76 	}
77 
78 	inline_node__delete(node);
79 	return ret;
80 }
81 
82 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
83 					  int left_margin)
84 {
85 	int i;
86 	size_t ret = callchain__fprintf_left_margin(fp, left_margin);
87 
88 	for (i = 0; i < depth; i++)
89 		if (depth_mask & (1 << i))
90 			ret += fprintf(fp, "|          ");
91 		else
92 			ret += fprintf(fp, "           ");
93 
94 	ret += fprintf(fp, "\n");
95 
96 	return ret;
97 }
98 
99 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
100 				     struct callchain_list *chain,
101 				     int depth, int depth_mask, int period,
102 				     u64 total_samples, int left_margin)
103 {
104 	int i;
105 	size_t ret = 0;
106 	char bf[1024], *alloc_str = NULL;
107 	char buf[64];
108 	const char *str;
109 
110 	ret += callchain__fprintf_left_margin(fp, left_margin);
111 	for (i = 0; i < depth; i++) {
112 		if (depth_mask & (1 << i))
113 			ret += fprintf(fp, "|");
114 		else
115 			ret += fprintf(fp, " ");
116 		if (!period && i == depth - 1) {
117 			ret += fprintf(fp, "--");
118 			ret += callchain_node__fprintf_value(node, fp, total_samples);
119 			ret += fprintf(fp, "--");
120 		} else
121 			ret += fprintf(fp, "%s", "          ");
122 	}
123 
124 	str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
125 
126 	if (symbol_conf.show_branchflag_count) {
127 		if (!period)
128 			callchain_list_counts__printf_value(node, chain, NULL,
129 							    buf, sizeof(buf));
130 		else
131 			callchain_list_counts__printf_value(NULL, chain, NULL,
132 							    buf, sizeof(buf));
133 
134 		if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
135 			str = "Not enough memory!";
136 		else
137 			str = alloc_str;
138 	}
139 
140 	fputs(str, fp);
141 	fputc('\n', fp);
142 	free(alloc_str);
143 
144 	if (symbol_conf.inline_name)
145 		ret += inline__fprintf(chain->ms.map, chain->ip,
146 				       left_margin, depth, depth_mask, fp);
147 	return ret;
148 }
149 
150 static struct symbol *rem_sq_bracket;
151 static struct callchain_list rem_hits;
152 
153 static void init_rem_hits(void)
154 {
155 	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
156 	if (!rem_sq_bracket) {
157 		fprintf(stderr, "Not enough memory to display remaining hits\n");
158 		return;
159 	}
160 
161 	strcpy(rem_sq_bracket->name, "[...]");
162 	rem_hits.ms.sym = rem_sq_bracket;
163 }
164 
165 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
166 					 u64 total_samples, int depth,
167 					 int depth_mask, int left_margin)
168 {
169 	struct rb_node *node, *next;
170 	struct callchain_node *child = NULL;
171 	struct callchain_list *chain;
172 	int new_depth_mask = depth_mask;
173 	u64 remaining;
174 	size_t ret = 0;
175 	int i;
176 	uint entries_printed = 0;
177 	int cumul_count = 0;
178 
179 	remaining = total_samples;
180 
181 	node = rb_first(root);
182 	while (node) {
183 		u64 new_total;
184 		u64 cumul;
185 
186 		child = rb_entry(node, struct callchain_node, rb_node);
187 		cumul = callchain_cumul_hits(child);
188 		remaining -= cumul;
189 		cumul_count += callchain_cumul_counts(child);
190 
191 		/*
192 		 * The depth mask manages the output of pipes that show
193 		 * the depth. We don't want to keep the pipes of the current
194 		 * level for the last child of this depth.
195 		 * Except if we have remaining filtered hits. They will
196 		 * supersede the last child
197 		 */
198 		next = rb_next(node);
199 		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
200 			new_depth_mask &= ~(1 << (depth - 1));
201 
202 		/*
203 		 * But we keep the older depth mask for the line separator
204 		 * to keep the level link until we reach the last child
205 		 */
206 		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
207 						   left_margin);
208 		i = 0;
209 		list_for_each_entry(chain, &child->val, list) {
210 			ret += ipchain__fprintf_graph(fp, child, chain, depth,
211 						      new_depth_mask, i++,
212 						      total_samples,
213 						      left_margin);
214 		}
215 
216 		if (callchain_param.mode == CHAIN_GRAPH_REL)
217 			new_total = child->children_hit;
218 		else
219 			new_total = total_samples;
220 
221 		ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
222 						  depth + 1,
223 						  new_depth_mask | (1 << depth),
224 						  left_margin);
225 		node = next;
226 		if (++entries_printed == callchain_param.print_limit)
227 			break;
228 	}
229 
230 	if (callchain_param.mode == CHAIN_GRAPH_REL &&
231 		remaining && remaining != total_samples) {
232 		struct callchain_node rem_node = {
233 			.hit = remaining,
234 		};
235 
236 		if (!rem_sq_bracket)
237 			return ret;
238 
239 		if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
240 			rem_node.count = child->parent->children_count - cumul_count;
241 			if (rem_node.count <= 0)
242 				return ret;
243 		}
244 
245 		new_depth_mask &= ~(1 << (depth - 1));
246 		ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
247 					      new_depth_mask, 0, total_samples,
248 					      left_margin);
249 	}
250 
251 	return ret;
252 }
253 
254 /*
255  * If have one single callchain root, don't bother printing
256  * its percentage (100 % in fractal mode and the same percentage
257  * than the hist in graph mode). This also avoid one level of column.
258  *
259  * However when percent-limit applied, it's possible that single callchain
260  * node have different (non-100% in fractal mode) percentage.
261  */
262 static bool need_percent_display(struct rb_node *node, u64 parent_samples)
263 {
264 	struct callchain_node *cnode;
265 
266 	if (rb_next(node))
267 		return true;
268 
269 	cnode = rb_entry(node, struct callchain_node, rb_node);
270 	return callchain_cumul_hits(cnode) != parent_samples;
271 }
272 
273 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
274 				       u64 total_samples, u64 parent_samples,
275 				       int left_margin)
276 {
277 	struct callchain_node *cnode;
278 	struct callchain_list *chain;
279 	u32 entries_printed = 0;
280 	bool printed = false;
281 	struct rb_node *node;
282 	int i = 0;
283 	int ret = 0;
284 	char bf[1024];
285 
286 	node = rb_first(root);
287 	if (node && !need_percent_display(node, parent_samples)) {
288 		cnode = rb_entry(node, struct callchain_node, rb_node);
289 		list_for_each_entry(chain, &cnode->val, list) {
290 			/*
291 			 * If we sort by symbol, the first entry is the same than
292 			 * the symbol. No need to print it otherwise it appears as
293 			 * displayed twice.
294 			 */
295 			if (!i++ && field_order == NULL &&
296 			    sort_order && strstarts(sort_order, "sym"))
297 				continue;
298 
299 			if (!printed) {
300 				ret += callchain__fprintf_left_margin(fp, left_margin);
301 				ret += fprintf(fp, "|\n");
302 				ret += callchain__fprintf_left_margin(fp, left_margin);
303 				ret += fprintf(fp, "---");
304 				left_margin += 3;
305 				printed = true;
306 			} else
307 				ret += callchain__fprintf_left_margin(fp, left_margin);
308 
309 			ret += fprintf(fp, "%s",
310 				       callchain_list__sym_name(chain, bf,
311 								sizeof(bf),
312 								false));
313 
314 			if (symbol_conf.show_branchflag_count)
315 				ret += callchain_list_counts__printf_value(
316 						NULL, chain, fp, NULL, 0);
317 			ret += fprintf(fp, "\n");
318 
319 			if (++entries_printed == callchain_param.print_limit)
320 				break;
321 
322 			if (symbol_conf.inline_name)
323 				ret += inline__fprintf(chain->ms.map,
324 						       chain->ip,
325 						       left_margin,
326 						       0, 0,
327 						       fp);
328 		}
329 		root = &cnode->rb_root;
330 	}
331 
332 	if (callchain_param.mode == CHAIN_GRAPH_REL)
333 		total_samples = parent_samples;
334 
335 	ret += __callchain__fprintf_graph(fp, root, total_samples,
336 					  1, 1, left_margin);
337 	if (ret) {
338 		/* do not add a blank line if it printed nothing */
339 		ret += fprintf(fp, "\n");
340 	}
341 
342 	return ret;
343 }
344 
345 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
346 					u64 total_samples)
347 {
348 	struct callchain_list *chain;
349 	size_t ret = 0;
350 	char bf[1024];
351 
352 	if (!node)
353 		return 0;
354 
355 	ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
356 
357 
358 	list_for_each_entry(chain, &node->val, list) {
359 		if (chain->ip >= PERF_CONTEXT_MAX)
360 			continue;
361 		ret += fprintf(fp, "                %s\n", callchain_list__sym_name(chain,
362 					bf, sizeof(bf), false));
363 	}
364 
365 	return ret;
366 }
367 
368 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
369 				      u64 total_samples)
370 {
371 	size_t ret = 0;
372 	u32 entries_printed = 0;
373 	struct callchain_node *chain;
374 	struct rb_node *rb_node = rb_first(tree);
375 
376 	while (rb_node) {
377 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
378 
379 		ret += fprintf(fp, "           ");
380 		ret += callchain_node__fprintf_value(chain, fp, total_samples);
381 		ret += fprintf(fp, "\n");
382 		ret += __callchain__fprintf_flat(fp, chain, total_samples);
383 		ret += fprintf(fp, "\n");
384 		if (++entries_printed == callchain_param.print_limit)
385 			break;
386 
387 		rb_node = rb_next(rb_node);
388 	}
389 
390 	return ret;
391 }
392 
393 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
394 {
395 	const char *sep = symbol_conf.field_sep ?: ";";
396 	struct callchain_list *chain;
397 	size_t ret = 0;
398 	char bf[1024];
399 	bool first;
400 
401 	if (!node)
402 		return 0;
403 
404 	ret += __callchain__fprintf_folded(fp, node->parent);
405 
406 	first = (ret == 0);
407 	list_for_each_entry(chain, &node->val, list) {
408 		if (chain->ip >= PERF_CONTEXT_MAX)
409 			continue;
410 		ret += fprintf(fp, "%s%s", first ? "" : sep,
411 			       callchain_list__sym_name(chain,
412 						bf, sizeof(bf), false));
413 		first = false;
414 	}
415 
416 	return ret;
417 }
418 
419 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
420 					u64 total_samples)
421 {
422 	size_t ret = 0;
423 	u32 entries_printed = 0;
424 	struct callchain_node *chain;
425 	struct rb_node *rb_node = rb_first(tree);
426 
427 	while (rb_node) {
428 
429 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
430 
431 		ret += callchain_node__fprintf_value(chain, fp, total_samples);
432 		ret += fprintf(fp, " ");
433 		ret += __callchain__fprintf_folded(fp, chain);
434 		ret += fprintf(fp, "\n");
435 		if (++entries_printed == callchain_param.print_limit)
436 			break;
437 
438 		rb_node = rb_next(rb_node);
439 	}
440 
441 	return ret;
442 }
443 
444 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
445 					    u64 total_samples, int left_margin,
446 					    FILE *fp)
447 {
448 	u64 parent_samples = he->stat.period;
449 
450 	if (symbol_conf.cumulate_callchain)
451 		parent_samples = he->stat_acc->period;
452 
453 	switch (callchain_param.mode) {
454 	case CHAIN_GRAPH_REL:
455 		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
456 						parent_samples, left_margin);
457 		break;
458 	case CHAIN_GRAPH_ABS:
459 		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
460 						parent_samples, left_margin);
461 		break;
462 	case CHAIN_FLAT:
463 		return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
464 		break;
465 	case CHAIN_FOLDED:
466 		return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
467 		break;
468 	case CHAIN_NONE:
469 		break;
470 	default:
471 		pr_err("Bad callchain mode\n");
472 	}
473 
474 	return 0;
475 }
476 
477 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
478 			   struct perf_hpp_list *hpp_list)
479 {
480 	const char *sep = symbol_conf.field_sep;
481 	struct perf_hpp_fmt *fmt;
482 	char *start = hpp->buf;
483 	int ret;
484 	bool first = true;
485 
486 	if (symbol_conf.exclude_other && !he->parent)
487 		return 0;
488 
489 	perf_hpp_list__for_each_format(hpp_list, fmt) {
490 		if (perf_hpp__should_skip(fmt, he->hists))
491 			continue;
492 
493 		/*
494 		 * If there's no field_sep, we still need
495 		 * to display initial '  '.
496 		 */
497 		if (!sep || !first) {
498 			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
499 			advance_hpp(hpp, ret);
500 		} else
501 			first = false;
502 
503 		if (perf_hpp__use_color() && fmt->color)
504 			ret = fmt->color(fmt, hpp, he);
505 		else
506 			ret = fmt->entry(fmt, hpp, he);
507 
508 		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
509 		advance_hpp(hpp, ret);
510 	}
511 
512 	return hpp->buf - start;
513 }
514 
515 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
516 {
517 	return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
518 }
519 
520 static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
521 					 struct perf_hpp *hpp,
522 					 struct hists *hists,
523 					 FILE *fp)
524 {
525 	const char *sep = symbol_conf.field_sep;
526 	struct perf_hpp_fmt *fmt;
527 	struct perf_hpp_list_node *fmt_node;
528 	char *buf = hpp->buf;
529 	size_t size = hpp->size;
530 	int ret, printed = 0;
531 	bool first = true;
532 
533 	if (symbol_conf.exclude_other && !he->parent)
534 		return 0;
535 
536 	ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
537 	advance_hpp(hpp, ret);
538 
539 	/* the first hpp_list_node is for overhead columns */
540 	fmt_node = list_first_entry(&hists->hpp_formats,
541 				    struct perf_hpp_list_node, list);
542 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
543 		/*
544 		 * If there's no field_sep, we still need
545 		 * to display initial '  '.
546 		 */
547 		if (!sep || !first) {
548 			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
549 			advance_hpp(hpp, ret);
550 		} else
551 			first = false;
552 
553 		if (perf_hpp__use_color() && fmt->color)
554 			ret = fmt->color(fmt, hpp, he);
555 		else
556 			ret = fmt->entry(fmt, hpp, he);
557 
558 		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
559 		advance_hpp(hpp, ret);
560 	}
561 
562 	if (!sep)
563 		ret = scnprintf(hpp->buf, hpp->size, "%*s",
564 				(hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
565 	advance_hpp(hpp, ret);
566 
567 	printed += fprintf(fp, "%s", buf);
568 
569 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
570 		hpp->buf  = buf;
571 		hpp->size = size;
572 
573 		/*
574 		 * No need to call hist_entry__snprintf_alignment() since this
575 		 * fmt is always the last column in the hierarchy mode.
576 		 */
577 		if (perf_hpp__use_color() && fmt->color)
578 			fmt->color(fmt, hpp, he);
579 		else
580 			fmt->entry(fmt, hpp, he);
581 
582 		/*
583 		 * dynamic entries are right-aligned but we want left-aligned
584 		 * in the hierarchy mode
585 		 */
586 		printed += fprintf(fp, "%s%s", sep ?: "  ", ltrim(buf));
587 	}
588 	printed += putc('\n', fp);
589 
590 	if (symbol_conf.use_callchain && he->leaf) {
591 		u64 total = hists__total_period(hists);
592 
593 		printed += hist_entry_callchain__fprintf(he, total, 0, fp);
594 		goto out;
595 	}
596 
597 out:
598 	return printed;
599 }
600 
601 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
602 			       char *bf, size_t bfsz, FILE *fp,
603 			       bool use_callchain)
604 {
605 	int ret;
606 	int callchain_ret = 0;
607 	int inline_ret = 0;
608 	struct perf_hpp hpp = {
609 		.buf		= bf,
610 		.size		= size,
611 	};
612 	struct hists *hists = he->hists;
613 	u64 total_period = hists->stats.total_period;
614 
615 	if (size == 0 || size > bfsz)
616 		size = hpp.size = bfsz;
617 
618 	if (symbol_conf.report_hierarchy)
619 		return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
620 
621 	hist_entry__snprintf(he, &hpp);
622 
623 	ret = fprintf(fp, "%s\n", bf);
624 
625 	if (use_callchain)
626 		callchain_ret = hist_entry_callchain__fprintf(he, total_period,
627 							      0, fp);
628 
629 	if (callchain_ret == 0 && symbol_conf.inline_name) {
630 		inline_ret = inline__fprintf(he->ms.map, he->ip, 0, 0, 0, fp);
631 		ret += inline_ret;
632 		if (inline_ret > 0)
633 			ret += fprintf(fp, "\n");
634 	} else
635 		ret += callchain_ret;
636 
637 	return ret;
638 }
639 
640 static int print_hierarchy_indent(const char *sep, int indent,
641 				  const char *line, FILE *fp)
642 {
643 	if (sep != NULL || indent < 2)
644 		return 0;
645 
646 	return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
647 }
648 
649 static int hists__fprintf_hierarchy_headers(struct hists *hists,
650 					    struct perf_hpp *hpp, FILE *fp)
651 {
652 	bool first_node, first_col;
653 	int indent;
654 	int depth;
655 	unsigned width = 0;
656 	unsigned header_width = 0;
657 	struct perf_hpp_fmt *fmt;
658 	struct perf_hpp_list_node *fmt_node;
659 	const char *sep = symbol_conf.field_sep;
660 
661 	indent = hists->nr_hpp_node;
662 
663 	/* preserve max indent depth for column headers */
664 	print_hierarchy_indent(sep, indent, spaces, fp);
665 
666 	/* the first hpp_list_node is for overhead columns */
667 	fmt_node = list_first_entry(&hists->hpp_formats,
668 				    struct perf_hpp_list_node, list);
669 
670 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
671 		fmt->header(fmt, hpp, hists, 0, NULL);
672 		fprintf(fp, "%s%s", hpp->buf, sep ?: "  ");
673 	}
674 
675 	/* combine sort headers with ' / ' */
676 	first_node = true;
677 	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
678 		if (!first_node)
679 			header_width += fprintf(fp, " / ");
680 		first_node = false;
681 
682 		first_col = true;
683 		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
684 			if (perf_hpp__should_skip(fmt, hists))
685 				continue;
686 
687 			if (!first_col)
688 				header_width += fprintf(fp, "+");
689 			first_col = false;
690 
691 			fmt->header(fmt, hpp, hists, 0, NULL);
692 
693 			header_width += fprintf(fp, "%s", trim(hpp->buf));
694 		}
695 	}
696 
697 	fprintf(fp, "\n# ");
698 
699 	/* preserve max indent depth for initial dots */
700 	print_hierarchy_indent(sep, indent, dots, fp);
701 
702 	/* the first hpp_list_node is for overhead columns */
703 	fmt_node = list_first_entry(&hists->hpp_formats,
704 				    struct perf_hpp_list_node, list);
705 
706 	first_col = true;
707 	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
708 		if (!first_col)
709 			fprintf(fp, "%s", sep ?: "..");
710 		first_col = false;
711 
712 		width = fmt->width(fmt, hpp, hists);
713 		fprintf(fp, "%.*s", width, dots);
714 	}
715 
716 	depth = 0;
717 	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
718 		first_col = true;
719 		width = depth * HIERARCHY_INDENT;
720 
721 		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
722 			if (perf_hpp__should_skip(fmt, hists))
723 				continue;
724 
725 			if (!first_col)
726 				width++;  /* for '+' sign between column header */
727 			first_col = false;
728 
729 			width += fmt->width(fmt, hpp, hists);
730 		}
731 
732 		if (width > header_width)
733 			header_width = width;
734 
735 		depth++;
736 	}
737 
738 	fprintf(fp, "%s%-.*s", sep ?: "  ", header_width, dots);
739 
740 	fprintf(fp, "\n#\n");
741 
742 	return 2;
743 }
744 
745 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
746 			 int line, FILE *fp)
747 {
748 	struct perf_hpp_fmt *fmt;
749 	const char *sep = symbol_conf.field_sep;
750 	bool first = true;
751 	int span = 0;
752 
753 	hists__for_each_format(hists, fmt) {
754 		if (perf_hpp__should_skip(fmt, hists))
755 			continue;
756 
757 		if (!first && !span)
758 			fprintf(fp, "%s", sep ?: "  ");
759 		else
760 			first = false;
761 
762 		fmt->header(fmt, hpp, hists, line, &span);
763 
764 		if (!span)
765 			fprintf(fp, "%s", hpp->buf);
766 	}
767 }
768 
769 static int
770 hists__fprintf_standard_headers(struct hists *hists,
771 				struct perf_hpp *hpp,
772 				FILE *fp)
773 {
774 	struct perf_hpp_list *hpp_list = hists->hpp_list;
775 	struct perf_hpp_fmt *fmt;
776 	unsigned int width;
777 	const char *sep = symbol_conf.field_sep;
778 	bool first = true;
779 	int line;
780 
781 	for (line = 0; line < hpp_list->nr_header_lines; line++) {
782 		/* first # is displayed one level up */
783 		if (line)
784 			fprintf(fp, "# ");
785 		fprintf_line(hists, hpp, line, fp);
786 		fprintf(fp, "\n");
787 	}
788 
789 	if (sep)
790 		return hpp_list->nr_header_lines;
791 
792 	first = true;
793 
794 	fprintf(fp, "# ");
795 
796 	hists__for_each_format(hists, fmt) {
797 		unsigned int i;
798 
799 		if (perf_hpp__should_skip(fmt, hists))
800 			continue;
801 
802 		if (!first)
803 			fprintf(fp, "%s", sep ?: "  ");
804 		else
805 			first = false;
806 
807 		width = fmt->width(fmt, hpp, hists);
808 		for (i = 0; i < width; i++)
809 			fprintf(fp, ".");
810 	}
811 
812 	fprintf(fp, "\n");
813 	fprintf(fp, "#\n");
814 	return hpp_list->nr_header_lines + 2;
815 }
816 
817 int hists__fprintf_headers(struct hists *hists, FILE *fp)
818 {
819 	char bf[1024];
820 	struct perf_hpp dummy_hpp = {
821 		.buf	= bf,
822 		.size	= sizeof(bf),
823 	};
824 
825 	fprintf(fp, "# ");
826 
827 	if (symbol_conf.report_hierarchy)
828 		return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
829 	else
830 		return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
831 
832 }
833 
834 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
835 		      int max_cols, float min_pcnt, FILE *fp,
836 		      bool use_callchain)
837 {
838 	struct rb_node *nd;
839 	size_t ret = 0;
840 	const char *sep = symbol_conf.field_sep;
841 	int nr_rows = 0;
842 	size_t linesz;
843 	char *line = NULL;
844 	unsigned indent;
845 
846 	init_rem_hits();
847 
848 	hists__reset_column_width(hists);
849 
850 	if (symbol_conf.col_width_list_str)
851 		perf_hpp__set_user_width(symbol_conf.col_width_list_str);
852 
853 	if (show_header)
854 		nr_rows += hists__fprintf_headers(hists, fp);
855 
856 	if (max_rows && nr_rows >= max_rows)
857 		goto out;
858 
859 	linesz = hists__sort_list_width(hists) + 3 + 1;
860 	linesz += perf_hpp__color_overhead();
861 	line = malloc(linesz);
862 	if (line == NULL) {
863 		ret = -1;
864 		goto out;
865 	}
866 
867 	indent = hists__overhead_width(hists) + 4;
868 
869 	for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
870 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
871 		float percent;
872 
873 		if (h->filtered)
874 			continue;
875 
876 		percent = hist_entry__get_percent_limit(h);
877 		if (percent < min_pcnt)
878 			continue;
879 
880 		ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
881 
882 		if (max_rows && ++nr_rows >= max_rows)
883 			break;
884 
885 		/*
886 		 * If all children are filtered out or percent-limited,
887 		 * display "no entry >= x.xx%" message.
888 		 */
889 		if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
890 			int depth = hists->nr_hpp_node + h->depth + 1;
891 
892 			print_hierarchy_indent(sep, depth, spaces, fp);
893 			fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
894 
895 			if (max_rows && ++nr_rows >= max_rows)
896 				break;
897 		}
898 
899 		if (h->ms.map == NULL && verbose > 1) {
900 			__map_groups__fprintf_maps(h->thread->mg,
901 						   MAP__FUNCTION, fp);
902 			fprintf(fp, "%.10s end\n", graph_dotted_line);
903 		}
904 	}
905 
906 	free(line);
907 out:
908 	zfree(&rem_sq_bracket);
909 
910 	return ret;
911 }
912 
913 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
914 {
915 	int i;
916 	size_t ret = 0;
917 
918 	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
919 		const char *name;
920 
921 		if (stats->nr_events[i] == 0)
922 			continue;
923 
924 		name = perf_event__name(i);
925 		if (!strcmp(name, "UNKNOWN"))
926 			continue;
927 
928 		ret += fprintf(fp, "%16s events: %10d\n", name,
929 			       stats->nr_events[i]);
930 	}
931 
932 	return ret;
933 }
934