xref: /linux/tools/perf/ui/hist.c (revision 2363088eba2ecccfb643725e4864af73c4226a04)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <inttypes.h>
3 #include <math.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <linux/compiler.h>
7 
8 #include "../util/callchain.h"
9 #include "../util/debug.h"
10 #include "../util/hist.h"
11 #include "../util/sort.h"
12 #include "../util/evsel.h"
13 #include "../util/evlist.h"
14 #include "../util/thread.h"
15 #include "../util/util.h"
16 
17 /* hist period print (hpp) functions */
18 
19 #define hpp__call_print_fn(hpp, fn, fmt, ...)			\
20 ({								\
21 	int __ret = fn(hpp, fmt, ##__VA_ARGS__);		\
22 	advance_hpp(hpp, __ret);				\
23 	__ret;							\
24 })
25 
26 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
27 		      hpp_field_fn get_field, const char *fmt, int len,
28 		      hpp_snprint_fn print_fn, bool fmt_percent)
29 {
30 	int ret;
31 	struct hists *hists = he->hists;
32 	struct evsel *evsel = hists_to_evsel(hists);
33 	char *buf = hpp->buf;
34 	size_t size = hpp->size;
35 
36 	if (fmt_percent) {
37 		double percent = 0.0;
38 		u64 total = hists__total_period(hists);
39 
40 		if (total)
41 			percent = 100.0 * get_field(he) / total;
42 
43 		ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
44 	} else
45 		ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
46 
47 	if (evsel__is_group_event(evsel)) {
48 		int prev_idx, idx_delta;
49 		struct hist_entry *pair;
50 		int nr_members = evsel->core.nr_members;
51 
52 		prev_idx = evsel__group_idx(evsel);
53 
54 		list_for_each_entry(pair, &he->pairs.head, pairs.node) {
55 			u64 period = get_field(pair);
56 			u64 total = hists__total_period(pair->hists);
57 
58 			if (!total)
59 				continue;
60 
61 			evsel = hists_to_evsel(pair->hists);
62 			idx_delta = evsel__group_idx(evsel) - prev_idx - 1;
63 
64 			while (idx_delta--) {
65 				/*
66 				 * zero-fill group members in the middle which
67 				 * have no sample
68 				 */
69 				if (fmt_percent) {
70 					ret += hpp__call_print_fn(hpp, print_fn,
71 								  fmt, len, 0.0);
72 				} else {
73 					ret += hpp__call_print_fn(hpp, print_fn,
74 								  fmt, len, 0ULL);
75 				}
76 			}
77 
78 			if (fmt_percent) {
79 				ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
80 							  100.0 * period / total);
81 			} else {
82 				ret += hpp__call_print_fn(hpp, print_fn, fmt,
83 							  len, period);
84 			}
85 
86 			prev_idx = evsel__group_idx(evsel);
87 		}
88 
89 		idx_delta = nr_members - prev_idx - 1;
90 
91 		while (idx_delta--) {
92 			/*
93 			 * zero-fill group members at last which have no sample
94 			 */
95 			if (fmt_percent) {
96 				ret += hpp__call_print_fn(hpp, print_fn,
97 							  fmt, len, 0.0);
98 			} else {
99 				ret += hpp__call_print_fn(hpp, print_fn,
100 							  fmt, len, 0ULL);
101 			}
102 		}
103 	}
104 
105 	/*
106 	 * Restore original buf and size as it's where caller expects
107 	 * the result will be saved.
108 	 */
109 	hpp->buf = buf;
110 	hpp->size = size;
111 
112 	return ret;
113 }
114 
115 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
116 	     struct hist_entry *he, hpp_field_fn get_field,
117 	     const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
118 {
119 	int len = fmt->user_len ?: fmt->len;
120 
121 	if (symbol_conf.field_sep) {
122 		return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
123 				  print_fn, fmt_percent);
124 	}
125 
126 	if (fmt_percent)
127 		len -= 2; /* 2 for a space and a % sign */
128 	else
129 		len -= 1;
130 
131 	return  __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
132 }
133 
134 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
135 		 struct hist_entry *he, hpp_field_fn get_field,
136 		 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
137 {
138 	if (!symbol_conf.cumulate_callchain) {
139 		int len = fmt->user_len ?: fmt->len;
140 		return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
141 	}
142 
143 	return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
144 }
145 
146 static int field_cmp(u64 field_a, u64 field_b)
147 {
148 	if (field_a > field_b)
149 		return 1;
150 	if (field_a < field_b)
151 		return -1;
152 	return 0;
153 }
154 
155 static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b,
156 				hpp_field_fn get_field, int nr_members,
157 				u64 **fields_a, u64 **fields_b)
158 {
159 	u64 *fa = calloc(nr_members, sizeof(*fa)),
160 	    *fb = calloc(nr_members, sizeof(*fb));
161 	struct hist_entry *pair;
162 
163 	if (!fa || !fb)
164 		goto out_free;
165 
166 	list_for_each_entry(pair, &a->pairs.head, pairs.node) {
167 		struct evsel *evsel = hists_to_evsel(pair->hists);
168 		fa[evsel__group_idx(evsel)] = get_field(pair);
169 	}
170 
171 	list_for_each_entry(pair, &b->pairs.head, pairs.node) {
172 		struct evsel *evsel = hists_to_evsel(pair->hists);
173 		fb[evsel__group_idx(evsel)] = get_field(pair);
174 	}
175 
176 	*fields_a = fa;
177 	*fields_b = fb;
178 	return 0;
179 out_free:
180 	free(fa);
181 	free(fb);
182 	*fields_a = *fields_b = NULL;
183 	return -1;
184 }
185 
186 static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b,
187 				 hpp_field_fn get_field, int idx)
188 {
189 	struct evsel *evsel = hists_to_evsel(a->hists);
190 	u64 *fields_a, *fields_b;
191 	int cmp, nr_members, ret, i;
192 
193 	cmp = field_cmp(get_field(a), get_field(b));
194 	if (!evsel__is_group_event(evsel))
195 		return cmp;
196 
197 	nr_members = evsel->core.nr_members;
198 	if (idx < 1 || idx >= nr_members)
199 		return cmp;
200 
201 	ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
202 	if (ret) {
203 		ret = cmp;
204 		goto out;
205 	}
206 
207 	ret = field_cmp(fields_a[idx], fields_b[idx]);
208 	if (ret)
209 		goto out;
210 
211 	for (i = 1; i < nr_members; i++) {
212 		if (i != idx) {
213 			ret = field_cmp(fields_a[i], fields_b[i]);
214 			if (ret)
215 				goto out;
216 		}
217 	}
218 
219 out:
220 	free(fields_a);
221 	free(fields_b);
222 
223 	return ret;
224 }
225 
226 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
227 		       hpp_field_fn get_field)
228 {
229 	s64 ret;
230 	int i, nr_members;
231 	struct evsel *evsel;
232 	u64 *fields_a, *fields_b;
233 
234 	if (symbol_conf.group_sort_idx && symbol_conf.event_group) {
235 		return __hpp__group_sort_idx(a, b, get_field,
236 					     symbol_conf.group_sort_idx);
237 	}
238 
239 	ret = field_cmp(get_field(a), get_field(b));
240 	if (ret || !symbol_conf.event_group)
241 		return ret;
242 
243 	evsel = hists_to_evsel(a->hists);
244 	if (!evsel__is_group_event(evsel))
245 		return ret;
246 
247 	nr_members = evsel->core.nr_members;
248 	i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
249 	if (i)
250 		goto out;
251 
252 	for (i = 1; i < nr_members; i++) {
253 		ret = field_cmp(fields_a[i], fields_b[i]);
254 		if (ret)
255 			break;
256 	}
257 
258 out:
259 	free(fields_a);
260 	free(fields_b);
261 
262 	return ret;
263 }
264 
265 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
266 			   hpp_field_fn get_field)
267 {
268 	s64 ret = 0;
269 
270 	if (symbol_conf.cumulate_callchain) {
271 		/*
272 		 * Put caller above callee when they have equal period.
273 		 */
274 		ret = field_cmp(get_field(a), get_field(b));
275 		if (ret)
276 			return ret;
277 
278 		if ((a->thread == NULL ? NULL : RC_CHK_ACCESS(a->thread)) !=
279 		    (b->thread == NULL ? NULL : RC_CHK_ACCESS(b->thread)) ||
280 		    !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
281 			return 0;
282 
283 		ret = b->callchain->max_depth - a->callchain->max_depth;
284 		if (callchain_param.order == ORDER_CALLER)
285 			ret = -ret;
286 	}
287 	return ret;
288 }
289 
290 static int hpp__width_fn(struct perf_hpp_fmt *fmt,
291 			 struct perf_hpp *hpp __maybe_unused,
292 			 struct hists *hists)
293 {
294 	int len = fmt->user_len ?: fmt->len;
295 	struct evsel *evsel = hists_to_evsel(hists);
296 
297 	if (symbol_conf.event_group)
298 		len = max(len, evsel->core.nr_members * fmt->len);
299 
300 	if (len < (int)strlen(fmt->name))
301 		len = strlen(fmt->name);
302 
303 	return len;
304 }
305 
306 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
307 			  struct hists *hists, int line __maybe_unused,
308 			  int *span __maybe_unused)
309 {
310 	int len = hpp__width_fn(fmt, hpp, hists);
311 	return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
312 }
313 
314 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
315 {
316 	va_list args;
317 	ssize_t ssize = hpp->size;
318 	double percent;
319 	int ret, len;
320 
321 	va_start(args, fmt);
322 	len = va_arg(args, int);
323 	percent = va_arg(args, double);
324 	ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
325 	va_end(args);
326 
327 	return (ret >= ssize) ? (ssize - 1) : ret;
328 }
329 
330 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
331 {
332 	va_list args;
333 	ssize_t ssize = hpp->size;
334 	int ret;
335 
336 	va_start(args, fmt);
337 	ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
338 	va_end(args);
339 
340 	return (ret >= ssize) ? (ssize - 1) : ret;
341 }
342 
343 #define __HPP_COLOR_PERCENT_FN(_type, _field)					\
344 static u64 he_get_##_field(struct hist_entry *he)				\
345 {										\
346 	return he->stat._field;							\
347 }										\
348 										\
349 static int hpp__color_##_type(struct perf_hpp_fmt *fmt,				\
350 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
351 {										\
352 	return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%",		\
353 			hpp_color_scnprintf, true);				\
354 }
355 
356 #define __HPP_ENTRY_PERCENT_FN(_type, _field)					\
357 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
358 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
359 {										\
360 	return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%",		\
361 			hpp_entry_scnprintf, true);				\
362 }
363 
364 #define __HPP_SORT_FN(_type, _field)						\
365 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
366 				 struct hist_entry *a, struct hist_entry *b) 	\
367 {										\
368 	return __hpp__sort(a, b, he_get_##_field);				\
369 }
370 
371 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field)				\
372 static u64 he_get_acc_##_field(struct hist_entry *he)				\
373 {										\
374 	return he->stat_acc->_field;						\
375 }										\
376 										\
377 static int hpp__color_##_type(struct perf_hpp_fmt *fmt,				\
378 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
379 {										\
380 	return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", 	\
381 			    hpp_color_scnprintf, true);				\
382 }
383 
384 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field)				\
385 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
386 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
387 {										\
388 	return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%",	\
389 			    hpp_entry_scnprintf, true);				\
390 }
391 
392 #define __HPP_SORT_ACC_FN(_type, _field)					\
393 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
394 				 struct hist_entry *a, struct hist_entry *b) 	\
395 {										\
396 	return __hpp__sort_acc(a, b, he_get_acc_##_field);			\
397 }
398 
399 #define __HPP_ENTRY_RAW_FN(_type, _field)					\
400 static u64 he_get_raw_##_field(struct hist_entry *he)				\
401 {										\
402 	return he->stat._field;							\
403 }										\
404 										\
405 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
406 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
407 {										\
408 	return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, 	\
409 			hpp_entry_scnprintf, false);				\
410 }
411 
412 #define __HPP_SORT_RAW_FN(_type, _field)					\
413 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
414 				 struct hist_entry *a, struct hist_entry *b) 	\
415 {										\
416 	return __hpp__sort(a, b, he_get_raw_##_field);				\
417 }
418 
419 
420 #define HPP_PERCENT_FNS(_type, _field)					\
421 __HPP_COLOR_PERCENT_FN(_type, _field)					\
422 __HPP_ENTRY_PERCENT_FN(_type, _field)					\
423 __HPP_SORT_FN(_type, _field)
424 
425 #define HPP_PERCENT_ACC_FNS(_type, _field)				\
426 __HPP_COLOR_ACC_PERCENT_FN(_type, _field)				\
427 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field)				\
428 __HPP_SORT_ACC_FN(_type, _field)
429 
430 #define HPP_RAW_FNS(_type, _field)					\
431 __HPP_ENTRY_RAW_FN(_type, _field)					\
432 __HPP_SORT_RAW_FN(_type, _field)
433 
434 HPP_PERCENT_FNS(overhead, period)
435 HPP_PERCENT_FNS(overhead_sys, period_sys)
436 HPP_PERCENT_FNS(overhead_us, period_us)
437 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
438 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
439 HPP_PERCENT_ACC_FNS(overhead_acc, period)
440 
441 HPP_RAW_FNS(samples, nr_events)
442 HPP_RAW_FNS(period, period)
443 
444 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
445 			    struct hist_entry *a __maybe_unused,
446 			    struct hist_entry *b __maybe_unused)
447 {
448 	return 0;
449 }
450 
451 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
452 {
453 	return a->header == hpp__header_fn;
454 }
455 
456 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
457 {
458 	if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
459 		return false;
460 
461 	return a->idx == b->idx;
462 }
463 
464 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx)		\
465 	{						\
466 		.name   = _name,			\
467 		.header	= hpp__header_fn,		\
468 		.width	= hpp__width_fn,		\
469 		.color	= hpp__color_ ## _fn,		\
470 		.entry	= hpp__entry_ ## _fn,		\
471 		.cmp	= hpp__nop_cmp,			\
472 		.collapse = hpp__nop_cmp,		\
473 		.sort	= hpp__sort_ ## _fn,		\
474 		.idx	= PERF_HPP__ ## _idx,		\
475 		.equal	= hpp__equal,			\
476 	}
477 
478 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx)	\
479 	{						\
480 		.name   = _name,			\
481 		.header	= hpp__header_fn,		\
482 		.width	= hpp__width_fn,		\
483 		.color	= hpp__color_ ## _fn,		\
484 		.entry	= hpp__entry_ ## _fn,		\
485 		.cmp	= hpp__nop_cmp,			\
486 		.collapse = hpp__nop_cmp,		\
487 		.sort	= hpp__sort_ ## _fn,		\
488 		.idx	= PERF_HPP__ ## _idx,		\
489 		.equal	= hpp__equal,			\
490 	}
491 
492 #define HPP__PRINT_FNS(_name, _fn, _idx)		\
493 	{						\
494 		.name   = _name,			\
495 		.header	= hpp__header_fn,		\
496 		.width	= hpp__width_fn,		\
497 		.entry	= hpp__entry_ ## _fn,		\
498 		.cmp	= hpp__nop_cmp,			\
499 		.collapse = hpp__nop_cmp,		\
500 		.sort	= hpp__sort_ ## _fn,		\
501 		.idx	= PERF_HPP__ ## _idx,		\
502 		.equal	= hpp__equal,			\
503 	}
504 
505 struct perf_hpp_fmt perf_hpp__format[] = {
506 	HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
507 	HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
508 	HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
509 	HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
510 	HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
511 	HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
512 	HPP__PRINT_FNS("Samples", samples, SAMPLES),
513 	HPP__PRINT_FNS("Period", period, PERIOD)
514 };
515 
516 struct perf_hpp_list perf_hpp_list = {
517 	.fields	= LIST_HEAD_INIT(perf_hpp_list.fields),
518 	.sorts	= LIST_HEAD_INIT(perf_hpp_list.sorts),
519 	.nr_header_lines = 1,
520 };
521 
522 #undef HPP__COLOR_PRINT_FNS
523 #undef HPP__COLOR_ACC_PRINT_FNS
524 #undef HPP__PRINT_FNS
525 
526 #undef HPP_PERCENT_FNS
527 #undef HPP_PERCENT_ACC_FNS
528 #undef HPP_RAW_FNS
529 
530 #undef __HPP_HEADER_FN
531 #undef __HPP_WIDTH_FN
532 #undef __HPP_COLOR_PERCENT_FN
533 #undef __HPP_ENTRY_PERCENT_FN
534 #undef __HPP_COLOR_ACC_PERCENT_FN
535 #undef __HPP_ENTRY_ACC_PERCENT_FN
536 #undef __HPP_ENTRY_RAW_FN
537 #undef __HPP_SORT_FN
538 #undef __HPP_SORT_ACC_FN
539 #undef __HPP_SORT_RAW_FN
540 
541 static void fmt_free(struct perf_hpp_fmt *fmt)
542 {
543 	/*
544 	 * At this point fmt should be completely
545 	 * unhooked, if not it's a bug.
546 	 */
547 	BUG_ON(!list_empty(&fmt->list));
548 	BUG_ON(!list_empty(&fmt->sort_list));
549 
550 	if (fmt->free)
551 		fmt->free(fmt);
552 }
553 
554 void perf_hpp__init(void)
555 {
556 	int i;
557 
558 	for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
559 		struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
560 
561 		INIT_LIST_HEAD(&fmt->list);
562 
563 		/* sort_list may be linked by setup_sorting() */
564 		if (fmt->sort_list.next == NULL)
565 			INIT_LIST_HEAD(&fmt->sort_list);
566 	}
567 
568 	/*
569 	 * If user specified field order, no need to setup default fields.
570 	 */
571 	if (is_strict_order(field_order))
572 		return;
573 
574 	if (symbol_conf.cumulate_callchain) {
575 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
576 		perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
577 	}
578 
579 	hpp_dimension__add_output(PERF_HPP__OVERHEAD);
580 
581 	if (symbol_conf.show_cpu_utilization) {
582 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
583 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
584 
585 		if (perf_guest) {
586 			hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
587 			hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
588 		}
589 	}
590 
591 	if (symbol_conf.show_nr_samples)
592 		hpp_dimension__add_output(PERF_HPP__SAMPLES);
593 
594 	if (symbol_conf.show_total_period)
595 		hpp_dimension__add_output(PERF_HPP__PERIOD);
596 }
597 
598 void perf_hpp_list__column_register(struct perf_hpp_list *list,
599 				    struct perf_hpp_fmt *format)
600 {
601 	list_add_tail(&format->list, &list->fields);
602 }
603 
604 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
605 					struct perf_hpp_fmt *format)
606 {
607 	list_add_tail(&format->sort_list, &list->sorts);
608 }
609 
610 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
611 				       struct perf_hpp_fmt *format)
612 {
613 	list_add(&format->sort_list, &list->sorts);
614 }
615 
616 static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
617 {
618 	list_del_init(&format->list);
619 	fmt_free(format);
620 }
621 
622 void perf_hpp__cancel_cumulate(void)
623 {
624 	struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
625 
626 	if (is_strict_order(field_order))
627 		return;
628 
629 	ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
630 	acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
631 
632 	perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
633 		if (acc->equal(acc, fmt)) {
634 			perf_hpp__column_unregister(fmt);
635 			continue;
636 		}
637 
638 		if (ovh->equal(ovh, fmt))
639 			fmt->name = "Overhead";
640 	}
641 }
642 
643 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
644 {
645 	return a->equal && a->equal(a, b);
646 }
647 
648 void perf_hpp__setup_output_field(struct perf_hpp_list *list)
649 {
650 	struct perf_hpp_fmt *fmt;
651 
652 	/* append sort keys to output field */
653 	perf_hpp_list__for_each_sort_list(list, fmt) {
654 		struct perf_hpp_fmt *pos;
655 
656 		/* skip sort-only fields ("sort_compute" in perf diff) */
657 		if (!fmt->entry && !fmt->color)
658 			continue;
659 
660 		perf_hpp_list__for_each_format(list, pos) {
661 			if (fmt_equal(fmt, pos))
662 				goto next;
663 		}
664 
665 		perf_hpp__column_register(fmt);
666 next:
667 		continue;
668 	}
669 }
670 
671 void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
672 {
673 	struct perf_hpp_fmt *fmt;
674 
675 	/* append output fields to sort keys */
676 	perf_hpp_list__for_each_format(list, fmt) {
677 		struct perf_hpp_fmt *pos;
678 
679 		perf_hpp_list__for_each_sort_list(list, pos) {
680 			if (fmt_equal(fmt, pos))
681 				goto next;
682 		}
683 
684 		perf_hpp__register_sort_field(fmt);
685 next:
686 		continue;
687 	}
688 }
689 
690 
691 void perf_hpp__reset_output_field(struct perf_hpp_list *list)
692 {
693 	struct perf_hpp_fmt *fmt, *tmp;
694 
695 	/* reset output fields */
696 	perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
697 		list_del_init(&fmt->list);
698 		list_del_init(&fmt->sort_list);
699 		fmt_free(fmt);
700 	}
701 
702 	/* reset sort keys */
703 	perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
704 		list_del_init(&fmt->list);
705 		list_del_init(&fmt->sort_list);
706 		fmt_free(fmt);
707 	}
708 }
709 
710 /*
711  * See hists__fprintf to match the column widths
712  */
713 unsigned int hists__sort_list_width(struct hists *hists)
714 {
715 	struct perf_hpp_fmt *fmt;
716 	int ret = 0;
717 	bool first = true;
718 	struct perf_hpp dummy_hpp;
719 
720 	hists__for_each_format(hists, fmt) {
721 		if (perf_hpp__should_skip(fmt, hists))
722 			continue;
723 
724 		if (first)
725 			first = false;
726 		else
727 			ret += 2;
728 
729 		ret += fmt->width(fmt, &dummy_hpp, hists);
730 	}
731 
732 	if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
733 		ret += 3 + BITS_PER_LONG / 4;
734 
735 	return ret;
736 }
737 
738 unsigned int hists__overhead_width(struct hists *hists)
739 {
740 	struct perf_hpp_fmt *fmt;
741 	int ret = 0;
742 	bool first = true;
743 	struct perf_hpp dummy_hpp;
744 
745 	hists__for_each_format(hists, fmt) {
746 		if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
747 			break;
748 
749 		if (first)
750 			first = false;
751 		else
752 			ret += 2;
753 
754 		ret += fmt->width(fmt, &dummy_hpp, hists);
755 	}
756 
757 	return ret;
758 }
759 
760 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
761 {
762 	if (perf_hpp__is_sort_entry(fmt))
763 		return perf_hpp__reset_sort_width(fmt, hists);
764 
765 	if (perf_hpp__is_dynamic_entry(fmt))
766 		return;
767 
768 	BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
769 
770 	switch (fmt->idx) {
771 	case PERF_HPP__OVERHEAD:
772 	case PERF_HPP__OVERHEAD_SYS:
773 	case PERF_HPP__OVERHEAD_US:
774 	case PERF_HPP__OVERHEAD_ACC:
775 		fmt->len = 8;
776 		break;
777 
778 	case PERF_HPP__OVERHEAD_GUEST_SYS:
779 	case PERF_HPP__OVERHEAD_GUEST_US:
780 		fmt->len = 9;
781 		break;
782 
783 	case PERF_HPP__SAMPLES:
784 	case PERF_HPP__PERIOD:
785 		fmt->len = 12;
786 		break;
787 
788 	default:
789 		break;
790 	}
791 }
792 
793 void hists__reset_column_width(struct hists *hists)
794 {
795 	struct perf_hpp_fmt *fmt;
796 	struct perf_hpp_list_node *node;
797 
798 	hists__for_each_format(hists, fmt)
799 		perf_hpp__reset_width(fmt, hists);
800 
801 	/* hierarchy entries have their own hpp list */
802 	list_for_each_entry(node, &hists->hpp_formats, list) {
803 		perf_hpp_list__for_each_format(&node->hpp, fmt)
804 			perf_hpp__reset_width(fmt, hists);
805 	}
806 }
807 
808 void perf_hpp__set_user_width(const char *width_list_str)
809 {
810 	struct perf_hpp_fmt *fmt;
811 	const char *ptr = width_list_str;
812 
813 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
814 		char *p;
815 
816 		int len = strtol(ptr, &p, 10);
817 		fmt->user_len = len;
818 
819 		if (*p == ',')
820 			ptr = p + 1;
821 		else
822 			break;
823 	}
824 }
825 
826 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
827 {
828 	struct perf_hpp_list_node *node = NULL;
829 	struct perf_hpp_fmt *fmt_copy;
830 	bool found = false;
831 	bool skip = perf_hpp__should_skip(fmt, hists);
832 
833 	list_for_each_entry(node, &hists->hpp_formats, list) {
834 		if (node->level == fmt->level) {
835 			found = true;
836 			break;
837 		}
838 	}
839 
840 	if (!found) {
841 		node = malloc(sizeof(*node));
842 		if (node == NULL)
843 			return -1;
844 
845 		node->skip = skip;
846 		node->level = fmt->level;
847 		perf_hpp_list__init(&node->hpp);
848 
849 		hists->nr_hpp_node++;
850 		list_add_tail(&node->list, &hists->hpp_formats);
851 	}
852 
853 	fmt_copy = perf_hpp_fmt__dup(fmt);
854 	if (fmt_copy == NULL)
855 		return -1;
856 
857 	if (!skip)
858 		node->skip = false;
859 
860 	list_add_tail(&fmt_copy->list, &node->hpp.fields);
861 	list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
862 
863 	return 0;
864 }
865 
866 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
867 				  struct evlist *evlist)
868 {
869 	struct evsel *evsel;
870 	struct perf_hpp_fmt *fmt;
871 	struct hists *hists;
872 	int ret;
873 
874 	if (!symbol_conf.report_hierarchy)
875 		return 0;
876 
877 	evlist__for_each_entry(evlist, evsel) {
878 		hists = evsel__hists(evsel);
879 
880 		perf_hpp_list__for_each_sort_list(list, fmt) {
881 			if (perf_hpp__is_dynamic_entry(fmt) &&
882 			    !perf_hpp__defined_dynamic_entry(fmt, hists))
883 				continue;
884 
885 			ret = add_hierarchy_fmt(hists, fmt);
886 			if (ret < 0)
887 				return ret;
888 		}
889 	}
890 
891 	return 0;
892 }
893