xref: /linux/tools/perf/ui/hist.c (revision 4246b92cf9fb32da8d8b060c92d8302797c6fbea)
1 #include <inttypes.h>
2 #include <math.h>
3 #include <linux/compiler.h>
4 
5 #include "../util/hist.h"
6 #include "../util/util.h"
7 #include "../util/sort.h"
8 #include "../util/evsel.h"
9 #include "../util/evlist.h"
10 
11 /* hist period print (hpp) functions */
12 
13 #define hpp__call_print_fn(hpp, fn, fmt, ...)			\
14 ({								\
15 	int __ret = fn(hpp, fmt, ##__VA_ARGS__);		\
16 	advance_hpp(hpp, __ret);				\
17 	__ret;							\
18 })
19 
20 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
21 		      hpp_field_fn get_field, const char *fmt, int len,
22 		      hpp_snprint_fn print_fn, bool fmt_percent)
23 {
24 	int ret;
25 	struct hists *hists = he->hists;
26 	struct perf_evsel *evsel = hists_to_evsel(hists);
27 	char *buf = hpp->buf;
28 	size_t size = hpp->size;
29 
30 	if (fmt_percent) {
31 		double percent = 0.0;
32 		u64 total = hists__total_period(hists);
33 
34 		if (total)
35 			percent = 100.0 * get_field(he) / total;
36 
37 		ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
38 	} else
39 		ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
40 
41 	if (perf_evsel__is_group_event(evsel)) {
42 		int prev_idx, idx_delta;
43 		struct hist_entry *pair;
44 		int nr_members = evsel->nr_members;
45 
46 		prev_idx = perf_evsel__group_idx(evsel);
47 
48 		list_for_each_entry(pair, &he->pairs.head, pairs.node) {
49 			u64 period = get_field(pair);
50 			u64 total = hists__total_period(pair->hists);
51 
52 			if (!total)
53 				continue;
54 
55 			evsel = hists_to_evsel(pair->hists);
56 			idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
57 
58 			while (idx_delta--) {
59 				/*
60 				 * zero-fill group members in the middle which
61 				 * have no sample
62 				 */
63 				if (fmt_percent) {
64 					ret += hpp__call_print_fn(hpp, print_fn,
65 								  fmt, len, 0.0);
66 				} else {
67 					ret += hpp__call_print_fn(hpp, print_fn,
68 								  fmt, len, 0ULL);
69 				}
70 			}
71 
72 			if (fmt_percent) {
73 				ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
74 							  100.0 * period / total);
75 			} else {
76 				ret += hpp__call_print_fn(hpp, print_fn, fmt,
77 							  len, period);
78 			}
79 
80 			prev_idx = perf_evsel__group_idx(evsel);
81 		}
82 
83 		idx_delta = nr_members - prev_idx - 1;
84 
85 		while (idx_delta--) {
86 			/*
87 			 * zero-fill group members at last which have no sample
88 			 */
89 			if (fmt_percent) {
90 				ret += hpp__call_print_fn(hpp, print_fn,
91 							  fmt, len, 0.0);
92 			} else {
93 				ret += hpp__call_print_fn(hpp, print_fn,
94 							  fmt, len, 0ULL);
95 			}
96 		}
97 	}
98 
99 	/*
100 	 * Restore original buf and size as it's where caller expects
101 	 * the result will be saved.
102 	 */
103 	hpp->buf = buf;
104 	hpp->size = size;
105 
106 	return ret;
107 }
108 
109 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
110 	     struct hist_entry *he, hpp_field_fn get_field,
111 	     const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
112 {
113 	int len = fmt->user_len ?: fmt->len;
114 
115 	if (symbol_conf.field_sep) {
116 		return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
117 				  print_fn, fmt_percent);
118 	}
119 
120 	if (fmt_percent)
121 		len -= 2; /* 2 for a space and a % sign */
122 	else
123 		len -= 1;
124 
125 	return  __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
126 }
127 
128 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
129 		 struct hist_entry *he, hpp_field_fn get_field,
130 		 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
131 {
132 	if (!symbol_conf.cumulate_callchain) {
133 		int len = fmt->user_len ?: fmt->len;
134 		return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
135 	}
136 
137 	return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
138 }
139 
140 static int field_cmp(u64 field_a, u64 field_b)
141 {
142 	if (field_a > field_b)
143 		return 1;
144 	if (field_a < field_b)
145 		return -1;
146 	return 0;
147 }
148 
149 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
150 		       hpp_field_fn get_field)
151 {
152 	s64 ret;
153 	int i, nr_members;
154 	struct perf_evsel *evsel;
155 	struct hist_entry *pair;
156 	u64 *fields_a, *fields_b;
157 
158 	ret = field_cmp(get_field(a), get_field(b));
159 	if (ret || !symbol_conf.event_group)
160 		return ret;
161 
162 	evsel = hists_to_evsel(a->hists);
163 	if (!perf_evsel__is_group_event(evsel))
164 		return ret;
165 
166 	nr_members = evsel->nr_members;
167 	fields_a = calloc(nr_members, sizeof(*fields_a));
168 	fields_b = calloc(nr_members, sizeof(*fields_b));
169 
170 	if (!fields_a || !fields_b)
171 		goto out;
172 
173 	list_for_each_entry(pair, &a->pairs.head, pairs.node) {
174 		evsel = hists_to_evsel(pair->hists);
175 		fields_a[perf_evsel__group_idx(evsel)] = get_field(pair);
176 	}
177 
178 	list_for_each_entry(pair, &b->pairs.head, pairs.node) {
179 		evsel = hists_to_evsel(pair->hists);
180 		fields_b[perf_evsel__group_idx(evsel)] = get_field(pair);
181 	}
182 
183 	for (i = 1; i < nr_members; i++) {
184 		ret = field_cmp(fields_a[i], fields_b[i]);
185 		if (ret)
186 			break;
187 	}
188 
189 out:
190 	free(fields_a);
191 	free(fields_b);
192 
193 	return ret;
194 }
195 
196 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
197 			   hpp_field_fn get_field)
198 {
199 	s64 ret = 0;
200 
201 	if (symbol_conf.cumulate_callchain) {
202 		/*
203 		 * Put caller above callee when they have equal period.
204 		 */
205 		ret = field_cmp(get_field(a), get_field(b));
206 		if (ret)
207 			return ret;
208 
209 		if (a->thread != b->thread || !symbol_conf.use_callchain)
210 			return 0;
211 
212 		ret = b->callchain->max_depth - a->callchain->max_depth;
213 		if (callchain_param.order == ORDER_CALLER)
214 			ret = -ret;
215 	}
216 	return ret;
217 }
218 
219 static int hpp__width_fn(struct perf_hpp_fmt *fmt,
220 			 struct perf_hpp *hpp __maybe_unused,
221 			 struct hists *hists)
222 {
223 	int len = fmt->user_len ?: fmt->len;
224 	struct perf_evsel *evsel = hists_to_evsel(hists);
225 
226 	if (symbol_conf.event_group)
227 		len = max(len, evsel->nr_members * fmt->len);
228 
229 	if (len < (int)strlen(fmt->name))
230 		len = strlen(fmt->name);
231 
232 	return len;
233 }
234 
235 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
236 			  struct hists *hists, int line __maybe_unused,
237 			  int *span __maybe_unused)
238 {
239 	int len = hpp__width_fn(fmt, hpp, hists);
240 	return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
241 }
242 
243 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
244 {
245 	va_list args;
246 	ssize_t ssize = hpp->size;
247 	double percent;
248 	int ret, len;
249 
250 	va_start(args, fmt);
251 	len = va_arg(args, int);
252 	percent = va_arg(args, double);
253 	ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
254 	va_end(args);
255 
256 	return (ret >= ssize) ? (ssize - 1) : ret;
257 }
258 
259 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
260 {
261 	va_list args;
262 	ssize_t ssize = hpp->size;
263 	int ret;
264 
265 	va_start(args, fmt);
266 	ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
267 	va_end(args);
268 
269 	return (ret >= ssize) ? (ssize - 1) : ret;
270 }
271 
272 #define __HPP_COLOR_PERCENT_FN(_type, _field)					\
273 static u64 he_get_##_field(struct hist_entry *he)				\
274 {										\
275 	return he->stat._field;							\
276 }										\
277 										\
278 static int hpp__color_##_type(struct perf_hpp_fmt *fmt,				\
279 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
280 {										\
281 	return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%",		\
282 			hpp_color_scnprintf, true);				\
283 }
284 
285 #define __HPP_ENTRY_PERCENT_FN(_type, _field)					\
286 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
287 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
288 {										\
289 	return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%",		\
290 			hpp_entry_scnprintf, true);				\
291 }
292 
293 #define __HPP_SORT_FN(_type, _field)						\
294 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
295 				 struct hist_entry *a, struct hist_entry *b) 	\
296 {										\
297 	return __hpp__sort(a, b, he_get_##_field);				\
298 }
299 
300 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field)				\
301 static u64 he_get_acc_##_field(struct hist_entry *he)				\
302 {										\
303 	return he->stat_acc->_field;						\
304 }										\
305 										\
306 static int hpp__color_##_type(struct perf_hpp_fmt *fmt,				\
307 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
308 {										\
309 	return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", 	\
310 			    hpp_color_scnprintf, true);				\
311 }
312 
313 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field)				\
314 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
315 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
316 {										\
317 	return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%",	\
318 			    hpp_entry_scnprintf, true);				\
319 }
320 
321 #define __HPP_SORT_ACC_FN(_type, _field)					\
322 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
323 				 struct hist_entry *a, struct hist_entry *b) 	\
324 {										\
325 	return __hpp__sort_acc(a, b, he_get_acc_##_field);			\
326 }
327 
328 #define __HPP_ENTRY_RAW_FN(_type, _field)					\
329 static u64 he_get_raw_##_field(struct hist_entry *he)				\
330 {										\
331 	return he->stat._field;							\
332 }										\
333 										\
334 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
335 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
336 {										\
337 	return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, 	\
338 			hpp_entry_scnprintf, false);				\
339 }
340 
341 #define __HPP_SORT_RAW_FN(_type, _field)					\
342 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
343 				 struct hist_entry *a, struct hist_entry *b) 	\
344 {										\
345 	return __hpp__sort(a, b, he_get_raw_##_field);				\
346 }
347 
348 
349 #define HPP_PERCENT_FNS(_type, _field)					\
350 __HPP_COLOR_PERCENT_FN(_type, _field)					\
351 __HPP_ENTRY_PERCENT_FN(_type, _field)					\
352 __HPP_SORT_FN(_type, _field)
353 
354 #define HPP_PERCENT_ACC_FNS(_type, _field)				\
355 __HPP_COLOR_ACC_PERCENT_FN(_type, _field)				\
356 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field)				\
357 __HPP_SORT_ACC_FN(_type, _field)
358 
359 #define HPP_RAW_FNS(_type, _field)					\
360 __HPP_ENTRY_RAW_FN(_type, _field)					\
361 __HPP_SORT_RAW_FN(_type, _field)
362 
363 HPP_PERCENT_FNS(overhead, period)
364 HPP_PERCENT_FNS(overhead_sys, period_sys)
365 HPP_PERCENT_FNS(overhead_us, period_us)
366 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
367 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
368 HPP_PERCENT_ACC_FNS(overhead_acc, period)
369 
370 HPP_RAW_FNS(samples, nr_events)
371 HPP_RAW_FNS(period, period)
372 
373 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
374 			    struct hist_entry *a __maybe_unused,
375 			    struct hist_entry *b __maybe_unused)
376 {
377 	return 0;
378 }
379 
380 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
381 {
382 	return a->header == hpp__header_fn;
383 }
384 
385 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
386 {
387 	if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
388 		return false;
389 
390 	return a->idx == b->idx;
391 }
392 
393 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx)		\
394 	{						\
395 		.name   = _name,			\
396 		.header	= hpp__header_fn,		\
397 		.width	= hpp__width_fn,		\
398 		.color	= hpp__color_ ## _fn,		\
399 		.entry	= hpp__entry_ ## _fn,		\
400 		.cmp	= hpp__nop_cmp,			\
401 		.collapse = hpp__nop_cmp,		\
402 		.sort	= hpp__sort_ ## _fn,		\
403 		.idx	= PERF_HPP__ ## _idx,		\
404 		.equal	= hpp__equal,			\
405 	}
406 
407 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx)	\
408 	{						\
409 		.name   = _name,			\
410 		.header	= hpp__header_fn,		\
411 		.width	= hpp__width_fn,		\
412 		.color	= hpp__color_ ## _fn,		\
413 		.entry	= hpp__entry_ ## _fn,		\
414 		.cmp	= hpp__nop_cmp,			\
415 		.collapse = hpp__nop_cmp,		\
416 		.sort	= hpp__sort_ ## _fn,		\
417 		.idx	= PERF_HPP__ ## _idx,		\
418 		.equal	= hpp__equal,			\
419 	}
420 
421 #define HPP__PRINT_FNS(_name, _fn, _idx)		\
422 	{						\
423 		.name   = _name,			\
424 		.header	= hpp__header_fn,		\
425 		.width	= hpp__width_fn,		\
426 		.entry	= hpp__entry_ ## _fn,		\
427 		.cmp	= hpp__nop_cmp,			\
428 		.collapse = hpp__nop_cmp,		\
429 		.sort	= hpp__sort_ ## _fn,		\
430 		.idx	= PERF_HPP__ ## _idx,		\
431 		.equal	= hpp__equal,			\
432 	}
433 
434 struct perf_hpp_fmt perf_hpp__format[] = {
435 	HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
436 	HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
437 	HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
438 	HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
439 	HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
440 	HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
441 	HPP__PRINT_FNS("Samples", samples, SAMPLES),
442 	HPP__PRINT_FNS("Period", period, PERIOD)
443 };
444 
445 struct perf_hpp_list perf_hpp_list = {
446 	.fields	= LIST_HEAD_INIT(perf_hpp_list.fields),
447 	.sorts	= LIST_HEAD_INIT(perf_hpp_list.sorts),
448 	.nr_header_lines = 1,
449 };
450 
451 #undef HPP__COLOR_PRINT_FNS
452 #undef HPP__COLOR_ACC_PRINT_FNS
453 #undef HPP__PRINT_FNS
454 
455 #undef HPP_PERCENT_FNS
456 #undef HPP_PERCENT_ACC_FNS
457 #undef HPP_RAW_FNS
458 
459 #undef __HPP_HEADER_FN
460 #undef __HPP_WIDTH_FN
461 #undef __HPP_COLOR_PERCENT_FN
462 #undef __HPP_ENTRY_PERCENT_FN
463 #undef __HPP_COLOR_ACC_PERCENT_FN
464 #undef __HPP_ENTRY_ACC_PERCENT_FN
465 #undef __HPP_ENTRY_RAW_FN
466 #undef __HPP_SORT_FN
467 #undef __HPP_SORT_ACC_FN
468 #undef __HPP_SORT_RAW_FN
469 
470 
471 void perf_hpp__init(void)
472 {
473 	int i;
474 
475 	for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
476 		struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
477 
478 		INIT_LIST_HEAD(&fmt->list);
479 
480 		/* sort_list may be linked by setup_sorting() */
481 		if (fmt->sort_list.next == NULL)
482 			INIT_LIST_HEAD(&fmt->sort_list);
483 	}
484 
485 	/*
486 	 * If user specified field order, no need to setup default fields.
487 	 */
488 	if (is_strict_order(field_order))
489 		return;
490 
491 	if (symbol_conf.cumulate_callchain) {
492 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
493 		perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
494 	}
495 
496 	hpp_dimension__add_output(PERF_HPP__OVERHEAD);
497 
498 	if (symbol_conf.show_cpu_utilization) {
499 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
500 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
501 
502 		if (perf_guest) {
503 			hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
504 			hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
505 		}
506 	}
507 
508 	if (symbol_conf.show_nr_samples)
509 		hpp_dimension__add_output(PERF_HPP__SAMPLES);
510 
511 	if (symbol_conf.show_total_period)
512 		hpp_dimension__add_output(PERF_HPP__PERIOD);
513 }
514 
515 void perf_hpp_list__column_register(struct perf_hpp_list *list,
516 				    struct perf_hpp_fmt *format)
517 {
518 	list_add_tail(&format->list, &list->fields);
519 }
520 
521 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
522 					struct perf_hpp_fmt *format)
523 {
524 	list_add_tail(&format->sort_list, &list->sorts);
525 }
526 
527 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
528 				       struct perf_hpp_fmt *format)
529 {
530 	list_add(&format->sort_list, &list->sorts);
531 }
532 
533 void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
534 {
535 	list_del_init(&format->list);
536 }
537 
538 void perf_hpp__cancel_cumulate(void)
539 {
540 	struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
541 
542 	if (is_strict_order(field_order))
543 		return;
544 
545 	ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
546 	acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
547 
548 	perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
549 		if (acc->equal(acc, fmt)) {
550 			perf_hpp__column_unregister(fmt);
551 			continue;
552 		}
553 
554 		if (ovh->equal(ovh, fmt))
555 			fmt->name = "Overhead";
556 	}
557 }
558 
559 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
560 {
561 	return a->equal && a->equal(a, b);
562 }
563 
564 void perf_hpp__setup_output_field(struct perf_hpp_list *list)
565 {
566 	struct perf_hpp_fmt *fmt;
567 
568 	/* append sort keys to output field */
569 	perf_hpp_list__for_each_sort_list(list, fmt) {
570 		struct perf_hpp_fmt *pos;
571 
572 		/* skip sort-only fields ("sort_compute" in perf diff) */
573 		if (!fmt->entry && !fmt->color)
574 			continue;
575 
576 		perf_hpp_list__for_each_format(list, pos) {
577 			if (fmt_equal(fmt, pos))
578 				goto next;
579 		}
580 
581 		perf_hpp__column_register(fmt);
582 next:
583 		continue;
584 	}
585 }
586 
587 void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
588 {
589 	struct perf_hpp_fmt *fmt;
590 
591 	/* append output fields to sort keys */
592 	perf_hpp_list__for_each_format(list, fmt) {
593 		struct perf_hpp_fmt *pos;
594 
595 		perf_hpp_list__for_each_sort_list(list, pos) {
596 			if (fmt_equal(fmt, pos))
597 				goto next;
598 		}
599 
600 		perf_hpp__register_sort_field(fmt);
601 next:
602 		continue;
603 	}
604 }
605 
606 
607 static void fmt_free(struct perf_hpp_fmt *fmt)
608 {
609 	/*
610 	 * At this point fmt should be completely
611 	 * unhooked, if not it's a bug.
612 	 */
613 	BUG_ON(!list_empty(&fmt->list));
614 	BUG_ON(!list_empty(&fmt->sort_list));
615 
616 	if (fmt->free)
617 		fmt->free(fmt);
618 }
619 
620 void perf_hpp__reset_output_field(struct perf_hpp_list *list)
621 {
622 	struct perf_hpp_fmt *fmt, *tmp;
623 
624 	/* reset output fields */
625 	perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
626 		list_del_init(&fmt->list);
627 		list_del_init(&fmt->sort_list);
628 		fmt_free(fmt);
629 	}
630 
631 	/* reset sort keys */
632 	perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
633 		list_del_init(&fmt->list);
634 		list_del_init(&fmt->sort_list);
635 		fmt_free(fmt);
636 	}
637 }
638 
639 /*
640  * See hists__fprintf to match the column widths
641  */
642 unsigned int hists__sort_list_width(struct hists *hists)
643 {
644 	struct perf_hpp_fmt *fmt;
645 	int ret = 0;
646 	bool first = true;
647 	struct perf_hpp dummy_hpp;
648 
649 	hists__for_each_format(hists, fmt) {
650 		if (perf_hpp__should_skip(fmt, hists))
651 			continue;
652 
653 		if (first)
654 			first = false;
655 		else
656 			ret += 2;
657 
658 		ret += fmt->width(fmt, &dummy_hpp, hists);
659 	}
660 
661 	if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
662 		ret += 3 + BITS_PER_LONG / 4;
663 
664 	return ret;
665 }
666 
667 unsigned int hists__overhead_width(struct hists *hists)
668 {
669 	struct perf_hpp_fmt *fmt;
670 	int ret = 0;
671 	bool first = true;
672 	struct perf_hpp dummy_hpp;
673 
674 	hists__for_each_format(hists, fmt) {
675 		if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
676 			break;
677 
678 		if (first)
679 			first = false;
680 		else
681 			ret += 2;
682 
683 		ret += fmt->width(fmt, &dummy_hpp, hists);
684 	}
685 
686 	return ret;
687 }
688 
689 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
690 {
691 	if (perf_hpp__is_sort_entry(fmt))
692 		return perf_hpp__reset_sort_width(fmt, hists);
693 
694 	if (perf_hpp__is_dynamic_entry(fmt))
695 		return;
696 
697 	BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
698 
699 	switch (fmt->idx) {
700 	case PERF_HPP__OVERHEAD:
701 	case PERF_HPP__OVERHEAD_SYS:
702 	case PERF_HPP__OVERHEAD_US:
703 	case PERF_HPP__OVERHEAD_ACC:
704 		fmt->len = 8;
705 		break;
706 
707 	case PERF_HPP__OVERHEAD_GUEST_SYS:
708 	case PERF_HPP__OVERHEAD_GUEST_US:
709 		fmt->len = 9;
710 		break;
711 
712 	case PERF_HPP__SAMPLES:
713 	case PERF_HPP__PERIOD:
714 		fmt->len = 12;
715 		break;
716 
717 	default:
718 		break;
719 	}
720 }
721 
722 void hists__reset_column_width(struct hists *hists)
723 {
724 	struct perf_hpp_fmt *fmt;
725 	struct perf_hpp_list_node *node;
726 
727 	hists__for_each_format(hists, fmt)
728 		perf_hpp__reset_width(fmt, hists);
729 
730 	/* hierarchy entries have their own hpp list */
731 	list_for_each_entry(node, &hists->hpp_formats, list) {
732 		perf_hpp_list__for_each_format(&node->hpp, fmt)
733 			perf_hpp__reset_width(fmt, hists);
734 	}
735 }
736 
737 void perf_hpp__set_user_width(const char *width_list_str)
738 {
739 	struct perf_hpp_fmt *fmt;
740 	const char *ptr = width_list_str;
741 
742 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
743 		char *p;
744 
745 		int len = strtol(ptr, &p, 10);
746 		fmt->user_len = len;
747 
748 		if (*p == ',')
749 			ptr = p + 1;
750 		else
751 			break;
752 	}
753 }
754 
755 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
756 {
757 	struct perf_hpp_list_node *node = NULL;
758 	struct perf_hpp_fmt *fmt_copy;
759 	bool found = false;
760 	bool skip = perf_hpp__should_skip(fmt, hists);
761 
762 	list_for_each_entry(node, &hists->hpp_formats, list) {
763 		if (node->level == fmt->level) {
764 			found = true;
765 			break;
766 		}
767 	}
768 
769 	if (!found) {
770 		node = malloc(sizeof(*node));
771 		if (node == NULL)
772 			return -1;
773 
774 		node->skip = skip;
775 		node->level = fmt->level;
776 		perf_hpp_list__init(&node->hpp);
777 
778 		hists->nr_hpp_node++;
779 		list_add_tail(&node->list, &hists->hpp_formats);
780 	}
781 
782 	fmt_copy = perf_hpp_fmt__dup(fmt);
783 	if (fmt_copy == NULL)
784 		return -1;
785 
786 	if (!skip)
787 		node->skip = false;
788 
789 	list_add_tail(&fmt_copy->list, &node->hpp.fields);
790 	list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
791 
792 	return 0;
793 }
794 
795 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
796 				  struct perf_evlist *evlist)
797 {
798 	struct perf_evsel *evsel;
799 	struct perf_hpp_fmt *fmt;
800 	struct hists *hists;
801 	int ret;
802 
803 	if (!symbol_conf.report_hierarchy)
804 		return 0;
805 
806 	evlist__for_each_entry(evlist, evsel) {
807 		hists = evsel__hists(evsel);
808 
809 		perf_hpp_list__for_each_sort_list(list, fmt) {
810 			if (perf_hpp__is_dynamic_entry(fmt) &&
811 			    !perf_hpp__defined_dynamic_entry(fmt, hists))
812 				continue;
813 
814 			ret = add_hierarchy_fmt(hists, fmt);
815 			if (ret < 0)
816 				return ret;
817 		}
818 	}
819 
820 	return 0;
821 }
822