xref: /linux/tools/perf/util/hist.c (revision c1a604dff486399ae0be95e6396e0158df95ad5d)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "map.h"
6 #include "session.h"
7 #include "namespaces.h"
8 #include "sort.h"
9 #include "units.h"
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "annotate.h"
13 #include "srcline.h"
14 #include "symbol.h"
15 #include "thread.h"
16 #include "ui/progress.h"
17 #include <errno.h>
18 #include <math.h>
19 #include <inttypes.h>
20 #include <sys/param.h>
21 #include <linux/time64.h>
22 #include <linux/zalloc.h>
23 
24 static bool hists__filter_entry_by_dso(struct hists *hists,
25 				       struct hist_entry *he);
26 static bool hists__filter_entry_by_thread(struct hists *hists,
27 					  struct hist_entry *he);
28 static bool hists__filter_entry_by_symbol(struct hists *hists,
29 					  struct hist_entry *he);
30 static bool hists__filter_entry_by_socket(struct hists *hists,
31 					  struct hist_entry *he);
32 
33 u16 hists__col_len(struct hists *hists, enum hist_column col)
34 {
35 	return hists->col_len[col];
36 }
37 
38 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
39 {
40 	hists->col_len[col] = len;
41 }
42 
43 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
44 {
45 	if (len > hists__col_len(hists, col)) {
46 		hists__set_col_len(hists, col, len);
47 		return true;
48 	}
49 	return false;
50 }
51 
52 void hists__reset_col_len(struct hists *hists)
53 {
54 	enum hist_column col;
55 
56 	for (col = 0; col < HISTC_NR_COLS; ++col)
57 		hists__set_col_len(hists, col, 0);
58 }
59 
60 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
61 {
62 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
63 
64 	if (hists__col_len(hists, dso) < unresolved_col_width &&
65 	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
66 	    !symbol_conf.dso_list)
67 		hists__set_col_len(hists, dso, unresolved_col_width);
68 }
69 
70 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
71 {
72 	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
73 	int symlen;
74 	u16 len;
75 
76 	/*
77 	 * +4 accounts for '[x] ' priv level info
78 	 * +2 accounts for 0x prefix on raw addresses
79 	 * +3 accounts for ' y ' symtab origin info
80 	 */
81 	if (h->ms.sym) {
82 		symlen = h->ms.sym->namelen + 4;
83 		if (verbose > 0)
84 			symlen += BITS_PER_LONG / 4 + 2 + 3;
85 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
86 	} else {
87 		symlen = unresolved_col_width + 4 + 2;
88 		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
89 		hists__set_unres_dso_col_len(hists, HISTC_DSO);
90 	}
91 
92 	len = thread__comm_len(h->thread);
93 	if (hists__new_col_len(hists, HISTC_COMM, len))
94 		hists__set_col_len(hists, HISTC_THREAD, len + 8);
95 
96 	if (h->ms.map) {
97 		len = dso__name_len(h->ms.map->dso);
98 		hists__new_col_len(hists, HISTC_DSO, len);
99 	}
100 
101 	if (h->parent)
102 		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
103 
104 	if (h->branch_info) {
105 		if (h->branch_info->from.sym) {
106 			symlen = (int)h->branch_info->from.sym->namelen + 4;
107 			if (verbose > 0)
108 				symlen += BITS_PER_LONG / 4 + 2 + 3;
109 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
110 
111 			symlen = dso__name_len(h->branch_info->from.map->dso);
112 			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
113 		} else {
114 			symlen = unresolved_col_width + 4 + 2;
115 			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
116 			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
117 		}
118 
119 		if (h->branch_info->to.sym) {
120 			symlen = (int)h->branch_info->to.sym->namelen + 4;
121 			if (verbose > 0)
122 				symlen += BITS_PER_LONG / 4 + 2 + 3;
123 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
124 
125 			symlen = dso__name_len(h->branch_info->to.map->dso);
126 			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
127 		} else {
128 			symlen = unresolved_col_width + 4 + 2;
129 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
130 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
131 		}
132 
133 		if (h->branch_info->srcline_from)
134 			hists__new_col_len(hists, HISTC_SRCLINE_FROM,
135 					strlen(h->branch_info->srcline_from));
136 		if (h->branch_info->srcline_to)
137 			hists__new_col_len(hists, HISTC_SRCLINE_TO,
138 					strlen(h->branch_info->srcline_to));
139 	}
140 
141 	if (h->mem_info) {
142 		if (h->mem_info->daddr.sym) {
143 			symlen = (int)h->mem_info->daddr.sym->namelen + 4
144 			       + unresolved_col_width + 2;
145 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
146 					   symlen);
147 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
148 					   symlen + 1);
149 		} else {
150 			symlen = unresolved_col_width + 4 + 2;
151 			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
152 					   symlen);
153 			hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
154 					   symlen);
155 		}
156 
157 		if (h->mem_info->iaddr.sym) {
158 			symlen = (int)h->mem_info->iaddr.sym->namelen + 4
159 			       + unresolved_col_width + 2;
160 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
161 					   symlen);
162 		} else {
163 			symlen = unresolved_col_width + 4 + 2;
164 			hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
165 					   symlen);
166 		}
167 
168 		if (h->mem_info->daddr.map) {
169 			symlen = dso__name_len(h->mem_info->daddr.map->dso);
170 			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
171 					   symlen);
172 		} else {
173 			symlen = unresolved_col_width + 4 + 2;
174 			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
175 		}
176 
177 		hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
178 				   unresolved_col_width + 4 + 2);
179 
180 	} else {
181 		symlen = unresolved_col_width + 4 + 2;
182 		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
183 		hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
184 		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
185 	}
186 
187 	hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
188 	hists__new_col_len(hists, HISTC_CPU, 3);
189 	hists__new_col_len(hists, HISTC_SOCKET, 6);
190 	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
191 	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
192 	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
193 	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
194 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
195 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
196 	if (symbol_conf.nanosecs)
197 		hists__new_col_len(hists, HISTC_TIME, 16);
198 	else
199 		hists__new_col_len(hists, HISTC_TIME, 12);
200 
201 	if (h->srcline) {
202 		len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
203 		hists__new_col_len(hists, HISTC_SRCLINE, len);
204 	}
205 
206 	if (h->srcfile)
207 		hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
208 
209 	if (h->transaction)
210 		hists__new_col_len(hists, HISTC_TRANSACTION,
211 				   hist_entry__transaction_len());
212 
213 	if (h->trace_output)
214 		hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
215 }
216 
217 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
218 {
219 	struct rb_node *next = rb_first_cached(&hists->entries);
220 	struct hist_entry *n;
221 	int row = 0;
222 
223 	hists__reset_col_len(hists);
224 
225 	while (next && row++ < max_rows) {
226 		n = rb_entry(next, struct hist_entry, rb_node);
227 		if (!n->filtered)
228 			hists__calc_col_len(hists, n);
229 		next = rb_next(&n->rb_node);
230 	}
231 }
232 
233 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
234 					unsigned int cpumode, u64 period)
235 {
236 	switch (cpumode) {
237 	case PERF_RECORD_MISC_KERNEL:
238 		he_stat->period_sys += period;
239 		break;
240 	case PERF_RECORD_MISC_USER:
241 		he_stat->period_us += period;
242 		break;
243 	case PERF_RECORD_MISC_GUEST_KERNEL:
244 		he_stat->period_guest_sys += period;
245 		break;
246 	case PERF_RECORD_MISC_GUEST_USER:
247 		he_stat->period_guest_us += period;
248 		break;
249 	default:
250 		break;
251 	}
252 }
253 
254 static long hist_time(unsigned long htime)
255 {
256 	unsigned long time_quantum = symbol_conf.time_quantum;
257 	if (time_quantum)
258 		return (htime / time_quantum) * time_quantum;
259 	return htime;
260 }
261 
262 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
263 				u64 weight)
264 {
265 
266 	he_stat->period		+= period;
267 	he_stat->weight		+= weight;
268 	he_stat->nr_events	+= 1;
269 }
270 
271 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
272 {
273 	dest->period		+= src->period;
274 	dest->period_sys	+= src->period_sys;
275 	dest->period_us		+= src->period_us;
276 	dest->period_guest_sys	+= src->period_guest_sys;
277 	dest->period_guest_us	+= src->period_guest_us;
278 	dest->nr_events		+= src->nr_events;
279 	dest->weight		+= src->weight;
280 }
281 
282 static void he_stat__decay(struct he_stat *he_stat)
283 {
284 	he_stat->period = (he_stat->period * 7) / 8;
285 	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
286 	/* XXX need decay for weight too? */
287 }
288 
289 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
290 
291 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
292 {
293 	u64 prev_period = he->stat.period;
294 	u64 diff;
295 
296 	if (prev_period == 0)
297 		return true;
298 
299 	he_stat__decay(&he->stat);
300 	if (symbol_conf.cumulate_callchain)
301 		he_stat__decay(he->stat_acc);
302 	decay_callchain(he->callchain);
303 
304 	diff = prev_period - he->stat.period;
305 
306 	if (!he->depth) {
307 		hists->stats.total_period -= diff;
308 		if (!he->filtered)
309 			hists->stats.total_non_filtered_period -= diff;
310 	}
311 
312 	if (!he->leaf) {
313 		struct hist_entry *child;
314 		struct rb_node *node = rb_first_cached(&he->hroot_out);
315 		while (node) {
316 			child = rb_entry(node, struct hist_entry, rb_node);
317 			node = rb_next(node);
318 
319 			if (hists__decay_entry(hists, child))
320 				hists__delete_entry(hists, child);
321 		}
322 	}
323 
324 	return he->stat.period == 0;
325 }
326 
327 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
328 {
329 	struct rb_root_cached *root_in;
330 	struct rb_root_cached *root_out;
331 
332 	if (he->parent_he) {
333 		root_in  = &he->parent_he->hroot_in;
334 		root_out = &he->parent_he->hroot_out;
335 	} else {
336 		if (hists__has(hists, need_collapse))
337 			root_in = &hists->entries_collapsed;
338 		else
339 			root_in = hists->entries_in;
340 		root_out = &hists->entries;
341 	}
342 
343 	rb_erase_cached(&he->rb_node_in, root_in);
344 	rb_erase_cached(&he->rb_node, root_out);
345 
346 	--hists->nr_entries;
347 	if (!he->filtered)
348 		--hists->nr_non_filtered_entries;
349 
350 	hist_entry__delete(he);
351 }
352 
353 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
354 {
355 	struct rb_node *next = rb_first_cached(&hists->entries);
356 	struct hist_entry *n;
357 
358 	while (next) {
359 		n = rb_entry(next, struct hist_entry, rb_node);
360 		next = rb_next(&n->rb_node);
361 		if (((zap_user && n->level == '.') ||
362 		     (zap_kernel && n->level != '.') ||
363 		     hists__decay_entry(hists, n))) {
364 			hists__delete_entry(hists, n);
365 		}
366 	}
367 }
368 
369 void hists__delete_entries(struct hists *hists)
370 {
371 	struct rb_node *next = rb_first_cached(&hists->entries);
372 	struct hist_entry *n;
373 
374 	while (next) {
375 		n = rb_entry(next, struct hist_entry, rb_node);
376 		next = rb_next(&n->rb_node);
377 
378 		hists__delete_entry(hists, n);
379 	}
380 }
381 
382 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
383 {
384 	struct rb_node *next = rb_first_cached(&hists->entries);
385 	struct hist_entry *n;
386 	int i = 0;
387 
388 	while (next) {
389 		n = rb_entry(next, struct hist_entry, rb_node);
390 		if (i == idx)
391 			return n;
392 
393 		next = rb_next(&n->rb_node);
394 		i++;
395 	}
396 
397 	return NULL;
398 }
399 
400 /*
401  * histogram, sorted on item, collects periods
402  */
403 
404 static int hist_entry__init(struct hist_entry *he,
405 			    struct hist_entry *template,
406 			    bool sample_self,
407 			    size_t callchain_size)
408 {
409 	*he = *template;
410 	he->callchain_size = callchain_size;
411 
412 	if (symbol_conf.cumulate_callchain) {
413 		he->stat_acc = malloc(sizeof(he->stat));
414 		if (he->stat_acc == NULL)
415 			return -ENOMEM;
416 		memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
417 		if (!sample_self)
418 			memset(&he->stat, 0, sizeof(he->stat));
419 	}
420 
421 	map__get(he->ms.map);
422 
423 	if (he->branch_info) {
424 		/*
425 		 * This branch info is (a part of) allocated from
426 		 * sample__resolve_bstack() and will be freed after
427 		 * adding new entries.  So we need to save a copy.
428 		 */
429 		he->branch_info = malloc(sizeof(*he->branch_info));
430 		if (he->branch_info == NULL)
431 			goto err;
432 
433 		memcpy(he->branch_info, template->branch_info,
434 		       sizeof(*he->branch_info));
435 
436 		map__get(he->branch_info->from.map);
437 		map__get(he->branch_info->to.map);
438 	}
439 
440 	if (he->mem_info) {
441 		map__get(he->mem_info->iaddr.map);
442 		map__get(he->mem_info->daddr.map);
443 	}
444 
445 	if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
446 		callchain_init(he->callchain);
447 
448 	if (he->raw_data) {
449 		he->raw_data = memdup(he->raw_data, he->raw_size);
450 		if (he->raw_data == NULL)
451 			goto err_infos;
452 	}
453 
454 	if (he->srcline) {
455 		he->srcline = strdup(he->srcline);
456 		if (he->srcline == NULL)
457 			goto err_rawdata;
458 	}
459 
460 	if (symbol_conf.res_sample) {
461 		he->res_samples = calloc(sizeof(struct res_sample),
462 					symbol_conf.res_sample);
463 		if (!he->res_samples)
464 			goto err_srcline;
465 	}
466 
467 	INIT_LIST_HEAD(&he->pairs.node);
468 	thread__get(he->thread);
469 	he->hroot_in  = RB_ROOT_CACHED;
470 	he->hroot_out = RB_ROOT_CACHED;
471 
472 	if (!symbol_conf.report_hierarchy)
473 		he->leaf = true;
474 
475 	return 0;
476 
477 err_srcline:
478 	zfree(&he->srcline);
479 
480 err_rawdata:
481 	zfree(&he->raw_data);
482 
483 err_infos:
484 	if (he->branch_info) {
485 		map__put(he->branch_info->from.map);
486 		map__put(he->branch_info->to.map);
487 		zfree(&he->branch_info);
488 	}
489 	if (he->mem_info) {
490 		map__put(he->mem_info->iaddr.map);
491 		map__put(he->mem_info->daddr.map);
492 	}
493 err:
494 	map__zput(he->ms.map);
495 	zfree(&he->stat_acc);
496 	return -ENOMEM;
497 }
498 
499 static void *hist_entry__zalloc(size_t size)
500 {
501 	return zalloc(size + sizeof(struct hist_entry));
502 }
503 
504 static void hist_entry__free(void *ptr)
505 {
506 	free(ptr);
507 }
508 
509 static struct hist_entry_ops default_ops = {
510 	.new	= hist_entry__zalloc,
511 	.free	= hist_entry__free,
512 };
513 
514 static struct hist_entry *hist_entry__new(struct hist_entry *template,
515 					  bool sample_self)
516 {
517 	struct hist_entry_ops *ops = template->ops;
518 	size_t callchain_size = 0;
519 	struct hist_entry *he;
520 	int err = 0;
521 
522 	if (!ops)
523 		ops = template->ops = &default_ops;
524 
525 	if (symbol_conf.use_callchain)
526 		callchain_size = sizeof(struct callchain_root);
527 
528 	he = ops->new(callchain_size);
529 	if (he) {
530 		err = hist_entry__init(he, template, sample_self, callchain_size);
531 		if (err) {
532 			ops->free(he);
533 			he = NULL;
534 		}
535 	}
536 
537 	return he;
538 }
539 
540 static u8 symbol__parent_filter(const struct symbol *parent)
541 {
542 	if (symbol_conf.exclude_other && parent == NULL)
543 		return 1 << HIST_FILTER__PARENT;
544 	return 0;
545 }
546 
547 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
548 {
549 	if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
550 		return;
551 
552 	he->hists->callchain_period += period;
553 	if (!he->filtered)
554 		he->hists->callchain_non_filtered_period += period;
555 }
556 
557 static struct hist_entry *hists__findnew_entry(struct hists *hists,
558 					       struct hist_entry *entry,
559 					       struct addr_location *al,
560 					       bool sample_self)
561 {
562 	struct rb_node **p;
563 	struct rb_node *parent = NULL;
564 	struct hist_entry *he;
565 	int64_t cmp;
566 	u64 period = entry->stat.period;
567 	u64 weight = entry->stat.weight;
568 	bool leftmost = true;
569 
570 	p = &hists->entries_in->rb_root.rb_node;
571 
572 	while (*p != NULL) {
573 		parent = *p;
574 		he = rb_entry(parent, struct hist_entry, rb_node_in);
575 
576 		/*
577 		 * Make sure that it receives arguments in a same order as
578 		 * hist_entry__collapse() so that we can use an appropriate
579 		 * function when searching an entry regardless which sort
580 		 * keys were used.
581 		 */
582 		cmp = hist_entry__cmp(he, entry);
583 
584 		if (!cmp) {
585 			if (sample_self) {
586 				he_stat__add_period(&he->stat, period, weight);
587 				hist_entry__add_callchain_period(he, period);
588 			}
589 			if (symbol_conf.cumulate_callchain)
590 				he_stat__add_period(he->stat_acc, period, weight);
591 
592 			/*
593 			 * This mem info was allocated from sample__resolve_mem
594 			 * and will not be used anymore.
595 			 */
596 			mem_info__zput(entry->mem_info);
597 
598 			block_info__zput(entry->block_info);
599 
600 			/* If the map of an existing hist_entry has
601 			 * become out-of-date due to an exec() or
602 			 * similar, update it.  Otherwise we will
603 			 * mis-adjust symbol addresses when computing
604 			 * the history counter to increment.
605 			 */
606 			if (he->ms.map != entry->ms.map) {
607 				map__put(he->ms.map);
608 				he->ms.map = map__get(entry->ms.map);
609 			}
610 			goto out;
611 		}
612 
613 		if (cmp < 0)
614 			p = &(*p)->rb_left;
615 		else {
616 			p = &(*p)->rb_right;
617 			leftmost = false;
618 		}
619 	}
620 
621 	he = hist_entry__new(entry, sample_self);
622 	if (!he)
623 		return NULL;
624 
625 	if (sample_self)
626 		hist_entry__add_callchain_period(he, period);
627 	hists->nr_entries++;
628 
629 	rb_link_node(&he->rb_node_in, parent, p);
630 	rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
631 out:
632 	if (sample_self)
633 		he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
634 	if (symbol_conf.cumulate_callchain)
635 		he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
636 	return he;
637 }
638 
639 static unsigned random_max(unsigned high)
640 {
641 	unsigned thresh = -high % high;
642 	for (;;) {
643 		unsigned r = random();
644 		if (r >= thresh)
645 			return r % high;
646 	}
647 }
648 
649 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
650 {
651 	struct res_sample *r;
652 	int j;
653 
654 	if (he->num_res < symbol_conf.res_sample) {
655 		j = he->num_res++;
656 	} else {
657 		j = random_max(symbol_conf.res_sample);
658 	}
659 	r = &he->res_samples[j];
660 	r->time = sample->time;
661 	r->cpu = sample->cpu;
662 	r->tid = sample->tid;
663 }
664 
665 static struct hist_entry*
666 __hists__add_entry(struct hists *hists,
667 		   struct addr_location *al,
668 		   struct symbol *sym_parent,
669 		   struct branch_info *bi,
670 		   struct mem_info *mi,
671 		   struct block_info *block_info,
672 		   struct perf_sample *sample,
673 		   bool sample_self,
674 		   struct hist_entry_ops *ops)
675 {
676 	struct namespaces *ns = thread__namespaces(al->thread);
677 	struct hist_entry entry = {
678 		.thread	= al->thread,
679 		.comm = thread__comm(al->thread),
680 		.cgroup_id = {
681 			.dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
682 			.ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
683 		},
684 		.ms = {
685 			.map	= al->map,
686 			.sym	= al->sym,
687 		},
688 		.srcline = (char *) al->srcline,
689 		.socket	 = al->socket,
690 		.cpu	 = al->cpu,
691 		.cpumode = al->cpumode,
692 		.ip	 = al->addr,
693 		.level	 = al->level,
694 		.stat = {
695 			.nr_events = 1,
696 			.period	= sample->period,
697 			.weight = sample->weight,
698 		},
699 		.parent = sym_parent,
700 		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
701 		.hists	= hists,
702 		.branch_info = bi,
703 		.mem_info = mi,
704 		.block_info = block_info,
705 		.transaction = sample->transaction,
706 		.raw_data = sample->raw_data,
707 		.raw_size = sample->raw_size,
708 		.ops = ops,
709 		.time = hist_time(sample->time),
710 	}, *he = hists__findnew_entry(hists, &entry, al, sample_self);
711 
712 	if (!hists->has_callchains && he && he->callchain_size != 0)
713 		hists->has_callchains = true;
714 	if (he && symbol_conf.res_sample)
715 		hists__res_sample(he, sample);
716 	return he;
717 }
718 
719 struct hist_entry *hists__add_entry(struct hists *hists,
720 				    struct addr_location *al,
721 				    struct symbol *sym_parent,
722 				    struct branch_info *bi,
723 				    struct mem_info *mi,
724 				    struct perf_sample *sample,
725 				    bool sample_self)
726 {
727 	return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
728 				  sample, sample_self, NULL);
729 }
730 
731 struct hist_entry *hists__add_entry_ops(struct hists *hists,
732 					struct hist_entry_ops *ops,
733 					struct addr_location *al,
734 					struct symbol *sym_parent,
735 					struct branch_info *bi,
736 					struct mem_info *mi,
737 					struct perf_sample *sample,
738 					bool sample_self)
739 {
740 	return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
741 				  sample, sample_self, ops);
742 }
743 
744 struct hist_entry *hists__add_entry_block(struct hists *hists,
745 					  struct addr_location *al,
746 					  struct block_info *block_info)
747 {
748 	struct hist_entry entry = {
749 		.block_info = block_info,
750 		.hists = hists,
751 	}, *he = hists__findnew_entry(hists, &entry, al, false);
752 
753 	return he;
754 }
755 
756 static int
757 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
758 		    struct addr_location *al __maybe_unused)
759 {
760 	return 0;
761 }
762 
763 static int
764 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
765 			struct addr_location *al __maybe_unused)
766 {
767 	return 0;
768 }
769 
770 static int
771 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
772 {
773 	struct perf_sample *sample = iter->sample;
774 	struct mem_info *mi;
775 
776 	mi = sample__resolve_mem(sample, al);
777 	if (mi == NULL)
778 		return -ENOMEM;
779 
780 	iter->priv = mi;
781 	return 0;
782 }
783 
784 static int
785 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
786 {
787 	u64 cost;
788 	struct mem_info *mi = iter->priv;
789 	struct hists *hists = evsel__hists(iter->evsel);
790 	struct perf_sample *sample = iter->sample;
791 	struct hist_entry *he;
792 
793 	if (mi == NULL)
794 		return -EINVAL;
795 
796 	cost = sample->weight;
797 	if (!cost)
798 		cost = 1;
799 
800 	/*
801 	 * must pass period=weight in order to get the correct
802 	 * sorting from hists__collapse_resort() which is solely
803 	 * based on periods. We want sorting be done on nr_events * weight
804 	 * and this is indirectly achieved by passing period=weight here
805 	 * and the he_stat__add_period() function.
806 	 */
807 	sample->period = cost;
808 
809 	he = hists__add_entry(hists, al, iter->parent, NULL, mi,
810 			      sample, true);
811 	if (!he)
812 		return -ENOMEM;
813 
814 	iter->he = he;
815 	return 0;
816 }
817 
818 static int
819 iter_finish_mem_entry(struct hist_entry_iter *iter,
820 		      struct addr_location *al __maybe_unused)
821 {
822 	struct evsel *evsel = iter->evsel;
823 	struct hists *hists = evsel__hists(evsel);
824 	struct hist_entry *he = iter->he;
825 	int err = -EINVAL;
826 
827 	if (he == NULL)
828 		goto out;
829 
830 	hists__inc_nr_samples(hists, he->filtered);
831 
832 	err = hist_entry__append_callchain(he, iter->sample);
833 
834 out:
835 	/*
836 	 * We don't need to free iter->priv (mem_info) here since the mem info
837 	 * was either already freed in hists__findnew_entry() or passed to a
838 	 * new hist entry by hist_entry__new().
839 	 */
840 	iter->priv = NULL;
841 
842 	iter->he = NULL;
843 	return err;
844 }
845 
846 static int
847 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
848 {
849 	struct branch_info *bi;
850 	struct perf_sample *sample = iter->sample;
851 
852 	bi = sample__resolve_bstack(sample, al);
853 	if (!bi)
854 		return -ENOMEM;
855 
856 	iter->curr = 0;
857 	iter->total = sample->branch_stack->nr;
858 
859 	iter->priv = bi;
860 	return 0;
861 }
862 
863 static int
864 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
865 			     struct addr_location *al __maybe_unused)
866 {
867 	return 0;
868 }
869 
870 static int
871 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
872 {
873 	struct branch_info *bi = iter->priv;
874 	int i = iter->curr;
875 
876 	if (bi == NULL)
877 		return 0;
878 
879 	if (iter->curr >= iter->total)
880 		return 0;
881 
882 	al->map = bi[i].to.map;
883 	al->sym = bi[i].to.sym;
884 	al->addr = bi[i].to.addr;
885 	return 1;
886 }
887 
888 static int
889 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
890 {
891 	struct branch_info *bi;
892 	struct evsel *evsel = iter->evsel;
893 	struct hists *hists = evsel__hists(evsel);
894 	struct perf_sample *sample = iter->sample;
895 	struct hist_entry *he = NULL;
896 	int i = iter->curr;
897 	int err = 0;
898 
899 	bi = iter->priv;
900 
901 	if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
902 		goto out;
903 
904 	/*
905 	 * The report shows the percentage of total branches captured
906 	 * and not events sampled. Thus we use a pseudo period of 1.
907 	 */
908 	sample->period = 1;
909 	sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
910 
911 	he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
912 			      sample, true);
913 	if (he == NULL)
914 		return -ENOMEM;
915 
916 	hists__inc_nr_samples(hists, he->filtered);
917 
918 out:
919 	iter->he = he;
920 	iter->curr++;
921 	return err;
922 }
923 
924 static int
925 iter_finish_branch_entry(struct hist_entry_iter *iter,
926 			 struct addr_location *al __maybe_unused)
927 {
928 	zfree(&iter->priv);
929 	iter->he = NULL;
930 
931 	return iter->curr >= iter->total ? 0 : -1;
932 }
933 
934 static int
935 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
936 			  struct addr_location *al __maybe_unused)
937 {
938 	return 0;
939 }
940 
941 static int
942 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
943 {
944 	struct evsel *evsel = iter->evsel;
945 	struct perf_sample *sample = iter->sample;
946 	struct hist_entry *he;
947 
948 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
949 			      sample, true);
950 	if (he == NULL)
951 		return -ENOMEM;
952 
953 	iter->he = he;
954 	return 0;
955 }
956 
957 static int
958 iter_finish_normal_entry(struct hist_entry_iter *iter,
959 			 struct addr_location *al __maybe_unused)
960 {
961 	struct hist_entry *he = iter->he;
962 	struct evsel *evsel = iter->evsel;
963 	struct perf_sample *sample = iter->sample;
964 
965 	if (he == NULL)
966 		return 0;
967 
968 	iter->he = NULL;
969 
970 	hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
971 
972 	return hist_entry__append_callchain(he, sample);
973 }
974 
975 static int
976 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
977 			      struct addr_location *al __maybe_unused)
978 {
979 	struct hist_entry **he_cache;
980 
981 	callchain_cursor_commit(&callchain_cursor);
982 
983 	/*
984 	 * This is for detecting cycles or recursions so that they're
985 	 * cumulated only one time to prevent entries more than 100%
986 	 * overhead.
987 	 */
988 	he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
989 	if (he_cache == NULL)
990 		return -ENOMEM;
991 
992 	iter->priv = he_cache;
993 	iter->curr = 0;
994 
995 	return 0;
996 }
997 
998 static int
999 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1000 				 struct addr_location *al)
1001 {
1002 	struct evsel *evsel = iter->evsel;
1003 	struct hists *hists = evsel__hists(evsel);
1004 	struct perf_sample *sample = iter->sample;
1005 	struct hist_entry **he_cache = iter->priv;
1006 	struct hist_entry *he;
1007 	int err = 0;
1008 
1009 	he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
1010 			      sample, true);
1011 	if (he == NULL)
1012 		return -ENOMEM;
1013 
1014 	iter->he = he;
1015 	he_cache[iter->curr++] = he;
1016 
1017 	hist_entry__append_callchain(he, sample);
1018 
1019 	/*
1020 	 * We need to re-initialize the cursor since callchain_append()
1021 	 * advanced the cursor to the end.
1022 	 */
1023 	callchain_cursor_commit(&callchain_cursor);
1024 
1025 	hists__inc_nr_samples(hists, he->filtered);
1026 
1027 	return err;
1028 }
1029 
1030 static int
1031 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1032 			   struct addr_location *al)
1033 {
1034 	struct callchain_cursor_node *node;
1035 
1036 	node = callchain_cursor_current(&callchain_cursor);
1037 	if (node == NULL)
1038 		return 0;
1039 
1040 	return fill_callchain_info(al, node, iter->hide_unresolved);
1041 }
1042 
1043 static int
1044 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1045 			       struct addr_location *al)
1046 {
1047 	struct evsel *evsel = iter->evsel;
1048 	struct perf_sample *sample = iter->sample;
1049 	struct hist_entry **he_cache = iter->priv;
1050 	struct hist_entry *he;
1051 	struct hist_entry he_tmp = {
1052 		.hists = evsel__hists(evsel),
1053 		.cpu = al->cpu,
1054 		.thread = al->thread,
1055 		.comm = thread__comm(al->thread),
1056 		.ip = al->addr,
1057 		.ms = {
1058 			.map = al->map,
1059 			.sym = al->sym,
1060 		},
1061 		.srcline = (char *) al->srcline,
1062 		.parent = iter->parent,
1063 		.raw_data = sample->raw_data,
1064 		.raw_size = sample->raw_size,
1065 	};
1066 	int i;
1067 	struct callchain_cursor cursor;
1068 
1069 	callchain_cursor_snapshot(&cursor, &callchain_cursor);
1070 
1071 	callchain_cursor_advance(&callchain_cursor);
1072 
1073 	/*
1074 	 * Check if there's duplicate entries in the callchain.
1075 	 * It's possible that it has cycles or recursive calls.
1076 	 */
1077 	for (i = 0; i < iter->curr; i++) {
1078 		if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1079 			/* to avoid calling callback function */
1080 			iter->he = NULL;
1081 			return 0;
1082 		}
1083 	}
1084 
1085 	he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1086 			      sample, false);
1087 	if (he == NULL)
1088 		return -ENOMEM;
1089 
1090 	iter->he = he;
1091 	he_cache[iter->curr++] = he;
1092 
1093 	if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1094 		callchain_append(he->callchain, &cursor, sample->period);
1095 	return 0;
1096 }
1097 
1098 static int
1099 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1100 			     struct addr_location *al __maybe_unused)
1101 {
1102 	zfree(&iter->priv);
1103 	iter->he = NULL;
1104 
1105 	return 0;
1106 }
1107 
1108 const struct hist_iter_ops hist_iter_mem = {
1109 	.prepare_entry 		= iter_prepare_mem_entry,
1110 	.add_single_entry 	= iter_add_single_mem_entry,
1111 	.next_entry 		= iter_next_nop_entry,
1112 	.add_next_entry 	= iter_add_next_nop_entry,
1113 	.finish_entry 		= iter_finish_mem_entry,
1114 };
1115 
1116 const struct hist_iter_ops hist_iter_branch = {
1117 	.prepare_entry 		= iter_prepare_branch_entry,
1118 	.add_single_entry 	= iter_add_single_branch_entry,
1119 	.next_entry 		= iter_next_branch_entry,
1120 	.add_next_entry 	= iter_add_next_branch_entry,
1121 	.finish_entry 		= iter_finish_branch_entry,
1122 };
1123 
1124 const struct hist_iter_ops hist_iter_normal = {
1125 	.prepare_entry 		= iter_prepare_normal_entry,
1126 	.add_single_entry 	= iter_add_single_normal_entry,
1127 	.next_entry 		= iter_next_nop_entry,
1128 	.add_next_entry 	= iter_add_next_nop_entry,
1129 	.finish_entry 		= iter_finish_normal_entry,
1130 };
1131 
1132 const struct hist_iter_ops hist_iter_cumulative = {
1133 	.prepare_entry 		= iter_prepare_cumulative_entry,
1134 	.add_single_entry 	= iter_add_single_cumulative_entry,
1135 	.next_entry 		= iter_next_cumulative_entry,
1136 	.add_next_entry 	= iter_add_next_cumulative_entry,
1137 	.finish_entry 		= iter_finish_cumulative_entry,
1138 };
1139 
1140 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1141 			 int max_stack_depth, void *arg)
1142 {
1143 	int err, err2;
1144 	struct map *alm = NULL;
1145 
1146 	if (al)
1147 		alm = map__get(al->map);
1148 
1149 	err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1150 					iter->evsel, al, max_stack_depth);
1151 	if (err) {
1152 		map__put(alm);
1153 		return err;
1154 	}
1155 
1156 	err = iter->ops->prepare_entry(iter, al);
1157 	if (err)
1158 		goto out;
1159 
1160 	err = iter->ops->add_single_entry(iter, al);
1161 	if (err)
1162 		goto out;
1163 
1164 	if (iter->he && iter->add_entry_cb) {
1165 		err = iter->add_entry_cb(iter, al, true, arg);
1166 		if (err)
1167 			goto out;
1168 	}
1169 
1170 	while (iter->ops->next_entry(iter, al)) {
1171 		err = iter->ops->add_next_entry(iter, al);
1172 		if (err)
1173 			break;
1174 
1175 		if (iter->he && iter->add_entry_cb) {
1176 			err = iter->add_entry_cb(iter, al, false, arg);
1177 			if (err)
1178 				goto out;
1179 		}
1180 	}
1181 
1182 out:
1183 	err2 = iter->ops->finish_entry(iter, al);
1184 	if (!err)
1185 		err = err2;
1186 
1187 	map__put(alm);
1188 
1189 	return err;
1190 }
1191 
1192 int64_t
1193 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1194 {
1195 	struct hists *hists = left->hists;
1196 	struct perf_hpp_fmt *fmt;
1197 	int64_t cmp = 0;
1198 
1199 	hists__for_each_sort_list(hists, fmt) {
1200 		if (perf_hpp__is_dynamic_entry(fmt) &&
1201 		    !perf_hpp__defined_dynamic_entry(fmt, hists))
1202 			continue;
1203 
1204 		cmp = fmt->cmp(fmt, left, right);
1205 		if (cmp)
1206 			break;
1207 	}
1208 
1209 	return cmp;
1210 }
1211 
1212 int64_t
1213 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1214 {
1215 	struct hists *hists = left->hists;
1216 	struct perf_hpp_fmt *fmt;
1217 	int64_t cmp = 0;
1218 
1219 	hists__for_each_sort_list(hists, fmt) {
1220 		if (perf_hpp__is_dynamic_entry(fmt) &&
1221 		    !perf_hpp__defined_dynamic_entry(fmt, hists))
1222 			continue;
1223 
1224 		cmp = fmt->collapse(fmt, left, right);
1225 		if (cmp)
1226 			break;
1227 	}
1228 
1229 	return cmp;
1230 }
1231 
1232 void hist_entry__delete(struct hist_entry *he)
1233 {
1234 	struct hist_entry_ops *ops = he->ops;
1235 
1236 	thread__zput(he->thread);
1237 	map__zput(he->ms.map);
1238 
1239 	if (he->branch_info) {
1240 		map__zput(he->branch_info->from.map);
1241 		map__zput(he->branch_info->to.map);
1242 		free_srcline(he->branch_info->srcline_from);
1243 		free_srcline(he->branch_info->srcline_to);
1244 		zfree(&he->branch_info);
1245 	}
1246 
1247 	if (he->mem_info) {
1248 		map__zput(he->mem_info->iaddr.map);
1249 		map__zput(he->mem_info->daddr.map);
1250 		mem_info__zput(he->mem_info);
1251 	}
1252 
1253 	if (he->block_info)
1254 		block_info__zput(he->block_info);
1255 
1256 	zfree(&he->res_samples);
1257 	zfree(&he->stat_acc);
1258 	free_srcline(he->srcline);
1259 	if (he->srcfile && he->srcfile[0])
1260 		zfree(&he->srcfile);
1261 	free_callchain(he->callchain);
1262 	zfree(&he->trace_output);
1263 	zfree(&he->raw_data);
1264 	ops->free(he);
1265 }
1266 
1267 /*
1268  * If this is not the last column, then we need to pad it according to the
1269  * pre-calculated max length for this column, otherwise don't bother adding
1270  * spaces because that would break viewing this with, for instance, 'less',
1271  * that would show tons of trailing spaces when a long C++ demangled method
1272  * names is sampled.
1273 */
1274 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1275 				   struct perf_hpp_fmt *fmt, int printed)
1276 {
1277 	if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1278 		const int width = fmt->width(fmt, hpp, he->hists);
1279 		if (printed < width) {
1280 			advance_hpp(hpp, printed);
1281 			printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1282 		}
1283 	}
1284 
1285 	return printed;
1286 }
1287 
1288 /*
1289  * collapse the histogram
1290  */
1291 
1292 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1293 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1294 				       enum hist_filter type);
1295 
1296 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1297 
1298 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1299 {
1300 	return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1301 }
1302 
1303 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1304 						enum hist_filter type,
1305 						fmt_chk_fn check)
1306 {
1307 	struct perf_hpp_fmt *fmt;
1308 	bool type_match = false;
1309 	struct hist_entry *parent = he->parent_he;
1310 
1311 	switch (type) {
1312 	case HIST_FILTER__THREAD:
1313 		if (symbol_conf.comm_list == NULL &&
1314 		    symbol_conf.pid_list == NULL &&
1315 		    symbol_conf.tid_list == NULL)
1316 			return;
1317 		break;
1318 	case HIST_FILTER__DSO:
1319 		if (symbol_conf.dso_list == NULL)
1320 			return;
1321 		break;
1322 	case HIST_FILTER__SYMBOL:
1323 		if (symbol_conf.sym_list == NULL)
1324 			return;
1325 		break;
1326 	case HIST_FILTER__PARENT:
1327 	case HIST_FILTER__GUEST:
1328 	case HIST_FILTER__HOST:
1329 	case HIST_FILTER__SOCKET:
1330 	case HIST_FILTER__C2C:
1331 	default:
1332 		return;
1333 	}
1334 
1335 	/* if it's filtered by own fmt, it has to have filter bits */
1336 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1337 		if (check(fmt)) {
1338 			type_match = true;
1339 			break;
1340 		}
1341 	}
1342 
1343 	if (type_match) {
1344 		/*
1345 		 * If the filter is for current level entry, propagate
1346 		 * filter marker to parents.  The marker bit was
1347 		 * already set by default so it only needs to clear
1348 		 * non-filtered entries.
1349 		 */
1350 		if (!(he->filtered & (1 << type))) {
1351 			while (parent) {
1352 				parent->filtered &= ~(1 << type);
1353 				parent = parent->parent_he;
1354 			}
1355 		}
1356 	} else {
1357 		/*
1358 		 * If current entry doesn't have matching formats, set
1359 		 * filter marker for upper level entries.  it will be
1360 		 * cleared if its lower level entries is not filtered.
1361 		 *
1362 		 * For lower-level entries, it inherits parent's
1363 		 * filter bit so that lower level entries of a
1364 		 * non-filtered entry won't set the filter marker.
1365 		 */
1366 		if (parent == NULL)
1367 			he->filtered |= (1 << type);
1368 		else
1369 			he->filtered |= (parent->filtered & (1 << type));
1370 	}
1371 }
1372 
1373 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1374 {
1375 	hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1376 					    check_thread_entry);
1377 
1378 	hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1379 					    perf_hpp__is_dso_entry);
1380 
1381 	hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1382 					    perf_hpp__is_sym_entry);
1383 
1384 	hists__apply_filters(he->hists, he);
1385 }
1386 
1387 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1388 						 struct rb_root_cached *root,
1389 						 struct hist_entry *he,
1390 						 struct hist_entry *parent_he,
1391 						 struct perf_hpp_list *hpp_list)
1392 {
1393 	struct rb_node **p = &root->rb_root.rb_node;
1394 	struct rb_node *parent = NULL;
1395 	struct hist_entry *iter, *new;
1396 	struct perf_hpp_fmt *fmt;
1397 	int64_t cmp;
1398 	bool leftmost = true;
1399 
1400 	while (*p != NULL) {
1401 		parent = *p;
1402 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1403 
1404 		cmp = 0;
1405 		perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1406 			cmp = fmt->collapse(fmt, iter, he);
1407 			if (cmp)
1408 				break;
1409 		}
1410 
1411 		if (!cmp) {
1412 			he_stat__add_stat(&iter->stat, &he->stat);
1413 			return iter;
1414 		}
1415 
1416 		if (cmp < 0)
1417 			p = &parent->rb_left;
1418 		else {
1419 			p = &parent->rb_right;
1420 			leftmost = false;
1421 		}
1422 	}
1423 
1424 	new = hist_entry__new(he, true);
1425 	if (new == NULL)
1426 		return NULL;
1427 
1428 	hists->nr_entries++;
1429 
1430 	/* save related format list for output */
1431 	new->hpp_list = hpp_list;
1432 	new->parent_he = parent_he;
1433 
1434 	hist_entry__apply_hierarchy_filters(new);
1435 
1436 	/* some fields are now passed to 'new' */
1437 	perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1438 		if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1439 			he->trace_output = NULL;
1440 		else
1441 			new->trace_output = NULL;
1442 
1443 		if (perf_hpp__is_srcline_entry(fmt))
1444 			he->srcline = NULL;
1445 		else
1446 			new->srcline = NULL;
1447 
1448 		if (perf_hpp__is_srcfile_entry(fmt))
1449 			he->srcfile = NULL;
1450 		else
1451 			new->srcfile = NULL;
1452 	}
1453 
1454 	rb_link_node(&new->rb_node_in, parent, p);
1455 	rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1456 	return new;
1457 }
1458 
1459 static int hists__hierarchy_insert_entry(struct hists *hists,
1460 					 struct rb_root_cached *root,
1461 					 struct hist_entry *he)
1462 {
1463 	struct perf_hpp_list_node *node;
1464 	struct hist_entry *new_he = NULL;
1465 	struct hist_entry *parent = NULL;
1466 	int depth = 0;
1467 	int ret = 0;
1468 
1469 	list_for_each_entry(node, &hists->hpp_formats, list) {
1470 		/* skip period (overhead) and elided columns */
1471 		if (node->level == 0 || node->skip)
1472 			continue;
1473 
1474 		/* insert copy of 'he' for each fmt into the hierarchy */
1475 		new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1476 		if (new_he == NULL) {
1477 			ret = -1;
1478 			break;
1479 		}
1480 
1481 		root = &new_he->hroot_in;
1482 		new_he->depth = depth++;
1483 		parent = new_he;
1484 	}
1485 
1486 	if (new_he) {
1487 		new_he->leaf = true;
1488 
1489 		if (hist_entry__has_callchains(new_he) &&
1490 		    symbol_conf.use_callchain) {
1491 			callchain_cursor_reset(&callchain_cursor);
1492 			if (callchain_merge(&callchain_cursor,
1493 					    new_he->callchain,
1494 					    he->callchain) < 0)
1495 				ret = -1;
1496 		}
1497 	}
1498 
1499 	/* 'he' is no longer used */
1500 	hist_entry__delete(he);
1501 
1502 	/* return 0 (or -1) since it already applied filters */
1503 	return ret;
1504 }
1505 
1506 static int hists__collapse_insert_entry(struct hists *hists,
1507 					struct rb_root_cached *root,
1508 					struct hist_entry *he)
1509 {
1510 	struct rb_node **p = &root->rb_root.rb_node;
1511 	struct rb_node *parent = NULL;
1512 	struct hist_entry *iter;
1513 	int64_t cmp;
1514 	bool leftmost = true;
1515 
1516 	if (symbol_conf.report_hierarchy)
1517 		return hists__hierarchy_insert_entry(hists, root, he);
1518 
1519 	while (*p != NULL) {
1520 		parent = *p;
1521 		iter = rb_entry(parent, struct hist_entry, rb_node_in);
1522 
1523 		cmp = hist_entry__collapse(iter, he);
1524 
1525 		if (!cmp) {
1526 			int ret = 0;
1527 
1528 			he_stat__add_stat(&iter->stat, &he->stat);
1529 			if (symbol_conf.cumulate_callchain)
1530 				he_stat__add_stat(iter->stat_acc, he->stat_acc);
1531 
1532 			if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1533 				callchain_cursor_reset(&callchain_cursor);
1534 				if (callchain_merge(&callchain_cursor,
1535 						    iter->callchain,
1536 						    he->callchain) < 0)
1537 					ret = -1;
1538 			}
1539 			hist_entry__delete(he);
1540 			return ret;
1541 		}
1542 
1543 		if (cmp < 0)
1544 			p = &(*p)->rb_left;
1545 		else {
1546 			p = &(*p)->rb_right;
1547 			leftmost = false;
1548 		}
1549 	}
1550 	hists->nr_entries++;
1551 
1552 	rb_link_node(&he->rb_node_in, parent, p);
1553 	rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1554 	return 1;
1555 }
1556 
1557 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1558 {
1559 	struct rb_root_cached *root;
1560 
1561 	pthread_mutex_lock(&hists->lock);
1562 
1563 	root = hists->entries_in;
1564 	if (++hists->entries_in > &hists->entries_in_array[1])
1565 		hists->entries_in = &hists->entries_in_array[0];
1566 
1567 	pthread_mutex_unlock(&hists->lock);
1568 
1569 	return root;
1570 }
1571 
1572 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1573 {
1574 	hists__filter_entry_by_dso(hists, he);
1575 	hists__filter_entry_by_thread(hists, he);
1576 	hists__filter_entry_by_symbol(hists, he);
1577 	hists__filter_entry_by_socket(hists, he);
1578 }
1579 
1580 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1581 {
1582 	struct rb_root_cached *root;
1583 	struct rb_node *next;
1584 	struct hist_entry *n;
1585 	int ret;
1586 
1587 	if (!hists__has(hists, need_collapse))
1588 		return 0;
1589 
1590 	hists->nr_entries = 0;
1591 
1592 	root = hists__get_rotate_entries_in(hists);
1593 
1594 	next = rb_first_cached(root);
1595 
1596 	while (next) {
1597 		if (session_done())
1598 			break;
1599 		n = rb_entry(next, struct hist_entry, rb_node_in);
1600 		next = rb_next(&n->rb_node_in);
1601 
1602 		rb_erase_cached(&n->rb_node_in, root);
1603 		ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1604 		if (ret < 0)
1605 			return -1;
1606 
1607 		if (ret) {
1608 			/*
1609 			 * If it wasn't combined with one of the entries already
1610 			 * collapsed, we need to apply the filters that may have
1611 			 * been set by, say, the hist_browser.
1612 			 */
1613 			hists__apply_filters(hists, n);
1614 		}
1615 		if (prog)
1616 			ui_progress__update(prog, 1);
1617 	}
1618 	return 0;
1619 }
1620 
1621 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1622 {
1623 	struct hists *hists = a->hists;
1624 	struct perf_hpp_fmt *fmt;
1625 	int64_t cmp = 0;
1626 
1627 	hists__for_each_sort_list(hists, fmt) {
1628 		if (perf_hpp__should_skip(fmt, a->hists))
1629 			continue;
1630 
1631 		cmp = fmt->sort(fmt, a, b);
1632 		if (cmp)
1633 			break;
1634 	}
1635 
1636 	return cmp;
1637 }
1638 
1639 static void hists__reset_filter_stats(struct hists *hists)
1640 {
1641 	hists->nr_non_filtered_entries = 0;
1642 	hists->stats.total_non_filtered_period = 0;
1643 }
1644 
1645 void hists__reset_stats(struct hists *hists)
1646 {
1647 	hists->nr_entries = 0;
1648 	hists->stats.total_period = 0;
1649 
1650 	hists__reset_filter_stats(hists);
1651 }
1652 
1653 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1654 {
1655 	hists->nr_non_filtered_entries++;
1656 	hists->stats.total_non_filtered_period += h->stat.period;
1657 }
1658 
1659 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1660 {
1661 	if (!h->filtered)
1662 		hists__inc_filter_stats(hists, h);
1663 
1664 	hists->nr_entries++;
1665 	hists->stats.total_period += h->stat.period;
1666 }
1667 
1668 static void hierarchy_recalc_total_periods(struct hists *hists)
1669 {
1670 	struct rb_node *node;
1671 	struct hist_entry *he;
1672 
1673 	node = rb_first_cached(&hists->entries);
1674 
1675 	hists->stats.total_period = 0;
1676 	hists->stats.total_non_filtered_period = 0;
1677 
1678 	/*
1679 	 * recalculate total period using top-level entries only
1680 	 * since lower level entries only see non-filtered entries
1681 	 * but upper level entries have sum of both entries.
1682 	 */
1683 	while (node) {
1684 		he = rb_entry(node, struct hist_entry, rb_node);
1685 		node = rb_next(node);
1686 
1687 		hists->stats.total_period += he->stat.period;
1688 		if (!he->filtered)
1689 			hists->stats.total_non_filtered_period += he->stat.period;
1690 	}
1691 }
1692 
1693 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1694 					  struct hist_entry *he)
1695 {
1696 	struct rb_node **p = &root->rb_root.rb_node;
1697 	struct rb_node *parent = NULL;
1698 	struct hist_entry *iter;
1699 	struct perf_hpp_fmt *fmt;
1700 	bool leftmost = true;
1701 
1702 	while (*p != NULL) {
1703 		parent = *p;
1704 		iter = rb_entry(parent, struct hist_entry, rb_node);
1705 
1706 		if (hist_entry__sort(he, iter) > 0)
1707 			p = &parent->rb_left;
1708 		else {
1709 			p = &parent->rb_right;
1710 			leftmost = false;
1711 		}
1712 	}
1713 
1714 	rb_link_node(&he->rb_node, parent, p);
1715 	rb_insert_color_cached(&he->rb_node, root, leftmost);
1716 
1717 	/* update column width of dynamic entry */
1718 	perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1719 		if (perf_hpp__is_dynamic_entry(fmt))
1720 			fmt->sort(fmt, he, NULL);
1721 	}
1722 }
1723 
1724 static void hists__hierarchy_output_resort(struct hists *hists,
1725 					   struct ui_progress *prog,
1726 					   struct rb_root_cached *root_in,
1727 					   struct rb_root_cached *root_out,
1728 					   u64 min_callchain_hits,
1729 					   bool use_callchain)
1730 {
1731 	struct rb_node *node;
1732 	struct hist_entry *he;
1733 
1734 	*root_out = RB_ROOT_CACHED;
1735 	node = rb_first_cached(root_in);
1736 
1737 	while (node) {
1738 		he = rb_entry(node, struct hist_entry, rb_node_in);
1739 		node = rb_next(node);
1740 
1741 		hierarchy_insert_output_entry(root_out, he);
1742 
1743 		if (prog)
1744 			ui_progress__update(prog, 1);
1745 
1746 		hists->nr_entries++;
1747 		if (!he->filtered) {
1748 			hists->nr_non_filtered_entries++;
1749 			hists__calc_col_len(hists, he);
1750 		}
1751 
1752 		if (!he->leaf) {
1753 			hists__hierarchy_output_resort(hists, prog,
1754 						       &he->hroot_in,
1755 						       &he->hroot_out,
1756 						       min_callchain_hits,
1757 						       use_callchain);
1758 			continue;
1759 		}
1760 
1761 		if (!use_callchain)
1762 			continue;
1763 
1764 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
1765 			u64 total = he->stat.period;
1766 
1767 			if (symbol_conf.cumulate_callchain)
1768 				total = he->stat_acc->period;
1769 
1770 			min_callchain_hits = total * (callchain_param.min_percent / 100);
1771 		}
1772 
1773 		callchain_param.sort(&he->sorted_chain, he->callchain,
1774 				     min_callchain_hits, &callchain_param);
1775 	}
1776 }
1777 
1778 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1779 					 struct hist_entry *he,
1780 					 u64 min_callchain_hits,
1781 					 bool use_callchain)
1782 {
1783 	struct rb_node **p = &entries->rb_root.rb_node;
1784 	struct rb_node *parent = NULL;
1785 	struct hist_entry *iter;
1786 	struct perf_hpp_fmt *fmt;
1787 	bool leftmost = true;
1788 
1789 	if (use_callchain) {
1790 		if (callchain_param.mode == CHAIN_GRAPH_REL) {
1791 			u64 total = he->stat.period;
1792 
1793 			if (symbol_conf.cumulate_callchain)
1794 				total = he->stat_acc->period;
1795 
1796 			min_callchain_hits = total * (callchain_param.min_percent / 100);
1797 		}
1798 		callchain_param.sort(&he->sorted_chain, he->callchain,
1799 				      min_callchain_hits, &callchain_param);
1800 	}
1801 
1802 	while (*p != NULL) {
1803 		parent = *p;
1804 		iter = rb_entry(parent, struct hist_entry, rb_node);
1805 
1806 		if (hist_entry__sort(he, iter) > 0)
1807 			p = &(*p)->rb_left;
1808 		else {
1809 			p = &(*p)->rb_right;
1810 			leftmost = false;
1811 		}
1812 	}
1813 
1814 	rb_link_node(&he->rb_node, parent, p);
1815 	rb_insert_color_cached(&he->rb_node, entries, leftmost);
1816 
1817 	perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1818 		if (perf_hpp__is_dynamic_entry(fmt) &&
1819 		    perf_hpp__defined_dynamic_entry(fmt, he->hists))
1820 			fmt->sort(fmt, he, NULL);  /* update column width */
1821 	}
1822 }
1823 
1824 static void output_resort(struct hists *hists, struct ui_progress *prog,
1825 			  bool use_callchain, hists__resort_cb_t cb,
1826 			  void *cb_arg)
1827 {
1828 	struct rb_root_cached *root;
1829 	struct rb_node *next;
1830 	struct hist_entry *n;
1831 	u64 callchain_total;
1832 	u64 min_callchain_hits;
1833 
1834 	callchain_total = hists->callchain_period;
1835 	if (symbol_conf.filter_relative)
1836 		callchain_total = hists->callchain_non_filtered_period;
1837 
1838 	min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1839 
1840 	hists__reset_stats(hists);
1841 	hists__reset_col_len(hists);
1842 
1843 	if (symbol_conf.report_hierarchy) {
1844 		hists__hierarchy_output_resort(hists, prog,
1845 					       &hists->entries_collapsed,
1846 					       &hists->entries,
1847 					       min_callchain_hits,
1848 					       use_callchain);
1849 		hierarchy_recalc_total_periods(hists);
1850 		return;
1851 	}
1852 
1853 	if (hists__has(hists, need_collapse))
1854 		root = &hists->entries_collapsed;
1855 	else
1856 		root = hists->entries_in;
1857 
1858 	next = rb_first_cached(root);
1859 	hists->entries = RB_ROOT_CACHED;
1860 
1861 	while (next) {
1862 		n = rb_entry(next, struct hist_entry, rb_node_in);
1863 		next = rb_next(&n->rb_node_in);
1864 
1865 		if (cb && cb(n, cb_arg))
1866 			continue;
1867 
1868 		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1869 		hists__inc_stats(hists, n);
1870 
1871 		if (!n->filtered)
1872 			hists__calc_col_len(hists, n);
1873 
1874 		if (prog)
1875 			ui_progress__update(prog, 1);
1876 	}
1877 }
1878 
1879 void perf_evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1880 				  hists__resort_cb_t cb, void *cb_arg)
1881 {
1882 	bool use_callchain;
1883 
1884 	if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1885 		use_callchain = evsel__has_callchain(evsel);
1886 	else
1887 		use_callchain = symbol_conf.use_callchain;
1888 
1889 	use_callchain |= symbol_conf.show_branchflag_count;
1890 
1891 	output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1892 }
1893 
1894 void perf_evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
1895 {
1896 	return perf_evsel__output_resort_cb(evsel, prog, NULL, NULL);
1897 }
1898 
1899 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1900 {
1901 	output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
1902 }
1903 
1904 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1905 			     hists__resort_cb_t cb)
1906 {
1907 	output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
1908 }
1909 
1910 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1911 {
1912 	if (he->leaf || hmd == HMD_FORCE_SIBLING)
1913 		return false;
1914 
1915 	if (he->unfolded || hmd == HMD_FORCE_CHILD)
1916 		return true;
1917 
1918 	return false;
1919 }
1920 
1921 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1922 {
1923 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1924 
1925 	while (can_goto_child(he, HMD_NORMAL)) {
1926 		node = rb_last(&he->hroot_out.rb_root);
1927 		he = rb_entry(node, struct hist_entry, rb_node);
1928 	}
1929 	return node;
1930 }
1931 
1932 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1933 {
1934 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1935 
1936 	if (can_goto_child(he, hmd))
1937 		node = rb_first_cached(&he->hroot_out);
1938 	else
1939 		node = rb_next(node);
1940 
1941 	while (node == NULL) {
1942 		he = he->parent_he;
1943 		if (he == NULL)
1944 			break;
1945 
1946 		node = rb_next(&he->rb_node);
1947 	}
1948 	return node;
1949 }
1950 
1951 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
1952 {
1953 	struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1954 
1955 	node = rb_prev(node);
1956 	if (node)
1957 		return rb_hierarchy_last(node);
1958 
1959 	he = he->parent_he;
1960 	if (he == NULL)
1961 		return NULL;
1962 
1963 	return &he->rb_node;
1964 }
1965 
1966 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
1967 {
1968 	struct rb_node *node;
1969 	struct hist_entry *child;
1970 	float percent;
1971 
1972 	if (he->leaf)
1973 		return false;
1974 
1975 	node = rb_first_cached(&he->hroot_out);
1976 	child = rb_entry(node, struct hist_entry, rb_node);
1977 
1978 	while (node && child->filtered) {
1979 		node = rb_next(node);
1980 		child = rb_entry(node, struct hist_entry, rb_node);
1981 	}
1982 
1983 	if (node)
1984 		percent = hist_entry__get_percent_limit(child);
1985 	else
1986 		percent = 0;
1987 
1988 	return node && percent >= limit;
1989 }
1990 
1991 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1992 				       enum hist_filter filter)
1993 {
1994 	h->filtered &= ~(1 << filter);
1995 
1996 	if (symbol_conf.report_hierarchy) {
1997 		struct hist_entry *parent = h->parent_he;
1998 
1999 		while (parent) {
2000 			he_stat__add_stat(&parent->stat, &h->stat);
2001 
2002 			parent->filtered &= ~(1 << filter);
2003 
2004 			if (parent->filtered)
2005 				goto next;
2006 
2007 			/* force fold unfiltered entry for simplicity */
2008 			parent->unfolded = false;
2009 			parent->has_no_entry = false;
2010 			parent->row_offset = 0;
2011 			parent->nr_rows = 0;
2012 next:
2013 			parent = parent->parent_he;
2014 		}
2015 	}
2016 
2017 	if (h->filtered)
2018 		return;
2019 
2020 	/* force fold unfiltered entry for simplicity */
2021 	h->unfolded = false;
2022 	h->has_no_entry = false;
2023 	h->row_offset = 0;
2024 	h->nr_rows = 0;
2025 
2026 	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2027 
2028 	hists__inc_filter_stats(hists, h);
2029 	hists__calc_col_len(hists, h);
2030 }
2031 
2032 
2033 static bool hists__filter_entry_by_dso(struct hists *hists,
2034 				       struct hist_entry *he)
2035 {
2036 	if (hists->dso_filter != NULL &&
2037 	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
2038 		he->filtered |= (1 << HIST_FILTER__DSO);
2039 		return true;
2040 	}
2041 
2042 	return false;
2043 }
2044 
2045 static bool hists__filter_entry_by_thread(struct hists *hists,
2046 					  struct hist_entry *he)
2047 {
2048 	if (hists->thread_filter != NULL &&
2049 	    he->thread != hists->thread_filter) {
2050 		he->filtered |= (1 << HIST_FILTER__THREAD);
2051 		return true;
2052 	}
2053 
2054 	return false;
2055 }
2056 
2057 static bool hists__filter_entry_by_symbol(struct hists *hists,
2058 					  struct hist_entry *he)
2059 {
2060 	if (hists->symbol_filter_str != NULL &&
2061 	    (!he->ms.sym || strstr(he->ms.sym->name,
2062 				   hists->symbol_filter_str) == NULL)) {
2063 		he->filtered |= (1 << HIST_FILTER__SYMBOL);
2064 		return true;
2065 	}
2066 
2067 	return false;
2068 }
2069 
2070 static bool hists__filter_entry_by_socket(struct hists *hists,
2071 					  struct hist_entry *he)
2072 {
2073 	if ((hists->socket_filter > -1) &&
2074 	    (he->socket != hists->socket_filter)) {
2075 		he->filtered |= (1 << HIST_FILTER__SOCKET);
2076 		return true;
2077 	}
2078 
2079 	return false;
2080 }
2081 
2082 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2083 
2084 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2085 {
2086 	struct rb_node *nd;
2087 
2088 	hists->stats.nr_non_filtered_samples = 0;
2089 
2090 	hists__reset_filter_stats(hists);
2091 	hists__reset_col_len(hists);
2092 
2093 	for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2094 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2095 
2096 		if (filter(hists, h))
2097 			continue;
2098 
2099 		hists__remove_entry_filter(hists, h, type);
2100 	}
2101 }
2102 
2103 static void resort_filtered_entry(struct rb_root_cached *root,
2104 				  struct hist_entry *he)
2105 {
2106 	struct rb_node **p = &root->rb_root.rb_node;
2107 	struct rb_node *parent = NULL;
2108 	struct hist_entry *iter;
2109 	struct rb_root_cached new_root = RB_ROOT_CACHED;
2110 	struct rb_node *nd;
2111 	bool leftmost = true;
2112 
2113 	while (*p != NULL) {
2114 		parent = *p;
2115 		iter = rb_entry(parent, struct hist_entry, rb_node);
2116 
2117 		if (hist_entry__sort(he, iter) > 0)
2118 			p = &(*p)->rb_left;
2119 		else {
2120 			p = &(*p)->rb_right;
2121 			leftmost = false;
2122 		}
2123 	}
2124 
2125 	rb_link_node(&he->rb_node, parent, p);
2126 	rb_insert_color_cached(&he->rb_node, root, leftmost);
2127 
2128 	if (he->leaf || he->filtered)
2129 		return;
2130 
2131 	nd = rb_first_cached(&he->hroot_out);
2132 	while (nd) {
2133 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2134 
2135 		nd = rb_next(nd);
2136 		rb_erase_cached(&h->rb_node, &he->hroot_out);
2137 
2138 		resort_filtered_entry(&new_root, h);
2139 	}
2140 
2141 	he->hroot_out = new_root;
2142 }
2143 
2144 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2145 {
2146 	struct rb_node *nd;
2147 	struct rb_root_cached new_root = RB_ROOT_CACHED;
2148 
2149 	hists->stats.nr_non_filtered_samples = 0;
2150 
2151 	hists__reset_filter_stats(hists);
2152 	hists__reset_col_len(hists);
2153 
2154 	nd = rb_first_cached(&hists->entries);
2155 	while (nd) {
2156 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2157 		int ret;
2158 
2159 		ret = hist_entry__filter(h, type, arg);
2160 
2161 		/*
2162 		 * case 1. non-matching type
2163 		 * zero out the period, set filter marker and move to child
2164 		 */
2165 		if (ret < 0) {
2166 			memset(&h->stat, 0, sizeof(h->stat));
2167 			h->filtered |= (1 << type);
2168 
2169 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2170 		}
2171 		/*
2172 		 * case 2. matched type (filter out)
2173 		 * set filter marker and move to next
2174 		 */
2175 		else if (ret == 1) {
2176 			h->filtered |= (1 << type);
2177 
2178 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2179 		}
2180 		/*
2181 		 * case 3. ok (not filtered)
2182 		 * add period to hists and parents, erase the filter marker
2183 		 * and move to next sibling
2184 		 */
2185 		else {
2186 			hists__remove_entry_filter(hists, h, type);
2187 
2188 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2189 		}
2190 	}
2191 
2192 	hierarchy_recalc_total_periods(hists);
2193 
2194 	/*
2195 	 * resort output after applying a new filter since filter in a lower
2196 	 * hierarchy can change periods in a upper hierarchy.
2197 	 */
2198 	nd = rb_first_cached(&hists->entries);
2199 	while (nd) {
2200 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2201 
2202 		nd = rb_next(nd);
2203 		rb_erase_cached(&h->rb_node, &hists->entries);
2204 
2205 		resort_filtered_entry(&new_root, h);
2206 	}
2207 
2208 	hists->entries = new_root;
2209 }
2210 
2211 void hists__filter_by_thread(struct hists *hists)
2212 {
2213 	if (symbol_conf.report_hierarchy)
2214 		hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2215 					hists->thread_filter);
2216 	else
2217 		hists__filter_by_type(hists, HIST_FILTER__THREAD,
2218 				      hists__filter_entry_by_thread);
2219 }
2220 
2221 void hists__filter_by_dso(struct hists *hists)
2222 {
2223 	if (symbol_conf.report_hierarchy)
2224 		hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2225 					hists->dso_filter);
2226 	else
2227 		hists__filter_by_type(hists, HIST_FILTER__DSO,
2228 				      hists__filter_entry_by_dso);
2229 }
2230 
2231 void hists__filter_by_symbol(struct hists *hists)
2232 {
2233 	if (symbol_conf.report_hierarchy)
2234 		hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2235 					hists->symbol_filter_str);
2236 	else
2237 		hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2238 				      hists__filter_entry_by_symbol);
2239 }
2240 
2241 void hists__filter_by_socket(struct hists *hists)
2242 {
2243 	if (symbol_conf.report_hierarchy)
2244 		hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2245 					&hists->socket_filter);
2246 	else
2247 		hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2248 				      hists__filter_entry_by_socket);
2249 }
2250 
2251 void events_stats__inc(struct events_stats *stats, u32 type)
2252 {
2253 	++stats->nr_events[0];
2254 	++stats->nr_events[type];
2255 }
2256 
2257 void hists__inc_nr_events(struct hists *hists, u32 type)
2258 {
2259 	events_stats__inc(&hists->stats, type);
2260 }
2261 
2262 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2263 {
2264 	events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2265 	if (!filtered)
2266 		hists->stats.nr_non_filtered_samples++;
2267 }
2268 
2269 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2270 						 struct hist_entry *pair)
2271 {
2272 	struct rb_root_cached *root;
2273 	struct rb_node **p;
2274 	struct rb_node *parent = NULL;
2275 	struct hist_entry *he;
2276 	int64_t cmp;
2277 	bool leftmost = true;
2278 
2279 	if (hists__has(hists, need_collapse))
2280 		root = &hists->entries_collapsed;
2281 	else
2282 		root = hists->entries_in;
2283 
2284 	p = &root->rb_root.rb_node;
2285 
2286 	while (*p != NULL) {
2287 		parent = *p;
2288 		he = rb_entry(parent, struct hist_entry, rb_node_in);
2289 
2290 		cmp = hist_entry__collapse(he, pair);
2291 
2292 		if (!cmp)
2293 			goto out;
2294 
2295 		if (cmp < 0)
2296 			p = &(*p)->rb_left;
2297 		else {
2298 			p = &(*p)->rb_right;
2299 			leftmost = false;
2300 		}
2301 	}
2302 
2303 	he = hist_entry__new(pair, true);
2304 	if (he) {
2305 		memset(&he->stat, 0, sizeof(he->stat));
2306 		he->hists = hists;
2307 		if (symbol_conf.cumulate_callchain)
2308 			memset(he->stat_acc, 0, sizeof(he->stat));
2309 		rb_link_node(&he->rb_node_in, parent, p);
2310 		rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2311 		hists__inc_stats(hists, he);
2312 		he->dummy = true;
2313 	}
2314 out:
2315 	return he;
2316 }
2317 
2318 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2319 						    struct rb_root_cached *root,
2320 						    struct hist_entry *pair)
2321 {
2322 	struct rb_node **p;
2323 	struct rb_node *parent = NULL;
2324 	struct hist_entry *he;
2325 	struct perf_hpp_fmt *fmt;
2326 	bool leftmost = true;
2327 
2328 	p = &root->rb_root.rb_node;
2329 	while (*p != NULL) {
2330 		int64_t cmp = 0;
2331 
2332 		parent = *p;
2333 		he = rb_entry(parent, struct hist_entry, rb_node_in);
2334 
2335 		perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2336 			cmp = fmt->collapse(fmt, he, pair);
2337 			if (cmp)
2338 				break;
2339 		}
2340 		if (!cmp)
2341 			goto out;
2342 
2343 		if (cmp < 0)
2344 			p = &parent->rb_left;
2345 		else {
2346 			p = &parent->rb_right;
2347 			leftmost = false;
2348 		}
2349 	}
2350 
2351 	he = hist_entry__new(pair, true);
2352 	if (he) {
2353 		rb_link_node(&he->rb_node_in, parent, p);
2354 		rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2355 
2356 		he->dummy = true;
2357 		he->hists = hists;
2358 		memset(&he->stat, 0, sizeof(he->stat));
2359 		hists__inc_stats(hists, he);
2360 	}
2361 out:
2362 	return he;
2363 }
2364 
2365 static struct hist_entry *hists__find_entry(struct hists *hists,
2366 					    struct hist_entry *he)
2367 {
2368 	struct rb_node *n;
2369 
2370 	if (hists__has(hists, need_collapse))
2371 		n = hists->entries_collapsed.rb_root.rb_node;
2372 	else
2373 		n = hists->entries_in->rb_root.rb_node;
2374 
2375 	while (n) {
2376 		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2377 		int64_t cmp = hist_entry__collapse(iter, he);
2378 
2379 		if (cmp < 0)
2380 			n = n->rb_left;
2381 		else if (cmp > 0)
2382 			n = n->rb_right;
2383 		else
2384 			return iter;
2385 	}
2386 
2387 	return NULL;
2388 }
2389 
2390 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2391 						      struct hist_entry *he)
2392 {
2393 	struct rb_node *n = root->rb_root.rb_node;
2394 
2395 	while (n) {
2396 		struct hist_entry *iter;
2397 		struct perf_hpp_fmt *fmt;
2398 		int64_t cmp = 0;
2399 
2400 		iter = rb_entry(n, struct hist_entry, rb_node_in);
2401 		perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2402 			cmp = fmt->collapse(fmt, iter, he);
2403 			if (cmp)
2404 				break;
2405 		}
2406 
2407 		if (cmp < 0)
2408 			n = n->rb_left;
2409 		else if (cmp > 0)
2410 			n = n->rb_right;
2411 		else
2412 			return iter;
2413 	}
2414 
2415 	return NULL;
2416 }
2417 
2418 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2419 				   struct rb_root_cached *other_root)
2420 {
2421 	struct rb_node *nd;
2422 	struct hist_entry *pos, *pair;
2423 
2424 	for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2425 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2426 		pair = hists__find_hierarchy_entry(other_root, pos);
2427 
2428 		if (pair) {
2429 			hist_entry__add_pair(pair, pos);
2430 			hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2431 		}
2432 	}
2433 }
2434 
2435 /*
2436  * Look for pairs to link to the leader buckets (hist_entries):
2437  */
2438 void hists__match(struct hists *leader, struct hists *other)
2439 {
2440 	struct rb_root_cached *root;
2441 	struct rb_node *nd;
2442 	struct hist_entry *pos, *pair;
2443 
2444 	if (symbol_conf.report_hierarchy) {
2445 		/* hierarchy report always collapses entries */
2446 		return hists__match_hierarchy(&leader->entries_collapsed,
2447 					      &other->entries_collapsed);
2448 	}
2449 
2450 	if (hists__has(leader, need_collapse))
2451 		root = &leader->entries_collapsed;
2452 	else
2453 		root = leader->entries_in;
2454 
2455 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2456 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2457 		pair = hists__find_entry(other, pos);
2458 
2459 		if (pair)
2460 			hist_entry__add_pair(pair, pos);
2461 	}
2462 }
2463 
2464 static int hists__link_hierarchy(struct hists *leader_hists,
2465 				 struct hist_entry *parent,
2466 				 struct rb_root_cached *leader_root,
2467 				 struct rb_root_cached *other_root)
2468 {
2469 	struct rb_node *nd;
2470 	struct hist_entry *pos, *leader;
2471 
2472 	for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2473 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2474 
2475 		if (hist_entry__has_pairs(pos)) {
2476 			bool found = false;
2477 
2478 			list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2479 				if (leader->hists == leader_hists) {
2480 					found = true;
2481 					break;
2482 				}
2483 			}
2484 			if (!found)
2485 				return -1;
2486 		} else {
2487 			leader = add_dummy_hierarchy_entry(leader_hists,
2488 							   leader_root, pos);
2489 			if (leader == NULL)
2490 				return -1;
2491 
2492 			/* do not point parent in the pos */
2493 			leader->parent_he = parent;
2494 
2495 			hist_entry__add_pair(pos, leader);
2496 		}
2497 
2498 		if (!pos->leaf) {
2499 			if (hists__link_hierarchy(leader_hists, leader,
2500 						  &leader->hroot_in,
2501 						  &pos->hroot_in) < 0)
2502 				return -1;
2503 		}
2504 	}
2505 	return 0;
2506 }
2507 
2508 /*
2509  * Look for entries in the other hists that are not present in the leader, if
2510  * we find them, just add a dummy entry on the leader hists, with period=0,
2511  * nr_events=0, to serve as the list header.
2512  */
2513 int hists__link(struct hists *leader, struct hists *other)
2514 {
2515 	struct rb_root_cached *root;
2516 	struct rb_node *nd;
2517 	struct hist_entry *pos, *pair;
2518 
2519 	if (symbol_conf.report_hierarchy) {
2520 		/* hierarchy report always collapses entries */
2521 		return hists__link_hierarchy(leader, NULL,
2522 					     &leader->entries_collapsed,
2523 					     &other->entries_collapsed);
2524 	}
2525 
2526 	if (hists__has(other, need_collapse))
2527 		root = &other->entries_collapsed;
2528 	else
2529 		root = other->entries_in;
2530 
2531 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2532 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2533 
2534 		if (!hist_entry__has_pairs(pos)) {
2535 			pair = hists__add_dummy_entry(leader, pos);
2536 			if (pair == NULL)
2537 				return -1;
2538 			hist_entry__add_pair(pos, pair);
2539 		}
2540 	}
2541 
2542 	return 0;
2543 }
2544 
2545 int hists__unlink(struct hists *hists)
2546 {
2547 	struct rb_root_cached *root;
2548 	struct rb_node *nd;
2549 	struct hist_entry *pos;
2550 
2551 	if (hists__has(hists, need_collapse))
2552 		root = &hists->entries_collapsed;
2553 	else
2554 		root = hists->entries_in;
2555 
2556 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2557 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
2558 		list_del_init(&pos->pairs.node);
2559 	}
2560 
2561 	return 0;
2562 }
2563 
2564 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2565 			  struct perf_sample *sample, bool nonany_branch_mode)
2566 {
2567 	struct branch_info *bi;
2568 
2569 	/* If we have branch cycles always annotate them. */
2570 	if (bs && bs->nr && bs->entries[0].flags.cycles) {
2571 		int i;
2572 
2573 		bi = sample__resolve_bstack(sample, al);
2574 		if (bi) {
2575 			struct addr_map_symbol *prev = NULL;
2576 
2577 			/*
2578 			 * Ignore errors, still want to process the
2579 			 * other entries.
2580 			 *
2581 			 * For non standard branch modes always
2582 			 * force no IPC (prev == NULL)
2583 			 *
2584 			 * Note that perf stores branches reversed from
2585 			 * program order!
2586 			 */
2587 			for (i = bs->nr - 1; i >= 0; i--) {
2588 				addr_map_symbol__account_cycles(&bi[i].from,
2589 					nonany_branch_mode ? NULL : prev,
2590 					bi[i].flags.cycles);
2591 				prev = &bi[i].to;
2592 			}
2593 			free(bi);
2594 		}
2595 	}
2596 }
2597 
2598 size_t perf_evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
2599 {
2600 	struct evsel *pos;
2601 	size_t ret = 0;
2602 
2603 	evlist__for_each_entry(evlist, pos) {
2604 		ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
2605 		ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2606 	}
2607 
2608 	return ret;
2609 }
2610 
2611 
2612 u64 hists__total_period(struct hists *hists)
2613 {
2614 	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2615 		hists->stats.total_period;
2616 }
2617 
2618 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2619 {
2620 	char unit;
2621 	int printed;
2622 	const struct dso *dso = hists->dso_filter;
2623 	struct thread *thread = hists->thread_filter;
2624 	int socket_id = hists->socket_filter;
2625 	unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
2626 	u64 nr_events = hists->stats.total_period;
2627 	struct evsel *evsel = hists_to_evsel(hists);
2628 	const char *ev_name = perf_evsel__name(evsel);
2629 	char buf[512], sample_freq_str[64] = "";
2630 	size_t buflen = sizeof(buf);
2631 	char ref[30] = " show reference callgraph, ";
2632 	bool enable_ref = false;
2633 
2634 	if (symbol_conf.filter_relative) {
2635 		nr_samples = hists->stats.nr_non_filtered_samples;
2636 		nr_events = hists->stats.total_non_filtered_period;
2637 	}
2638 
2639 	if (perf_evsel__is_group_event(evsel)) {
2640 		struct evsel *pos;
2641 
2642 		perf_evsel__group_desc(evsel, buf, buflen);
2643 		ev_name = buf;
2644 
2645 		for_each_group_member(pos, evsel) {
2646 			struct hists *pos_hists = evsel__hists(pos);
2647 
2648 			if (symbol_conf.filter_relative) {
2649 				nr_samples += pos_hists->stats.nr_non_filtered_samples;
2650 				nr_events += pos_hists->stats.total_non_filtered_period;
2651 			} else {
2652 				nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
2653 				nr_events += pos_hists->stats.total_period;
2654 			}
2655 		}
2656 	}
2657 
2658 	if (symbol_conf.show_ref_callgraph &&
2659 	    strstr(ev_name, "call-graph=no"))
2660 		enable_ref = true;
2661 
2662 	if (show_freq)
2663 		scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2664 
2665 	nr_samples = convert_unit(nr_samples, &unit);
2666 	printed = scnprintf(bf, size,
2667 			   "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2668 			   nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2669 			   ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2670 
2671 
2672 	if (hists->uid_filter_str)
2673 		printed += snprintf(bf + printed, size - printed,
2674 				    ", UID: %s", hists->uid_filter_str);
2675 	if (thread) {
2676 		if (hists__has(hists, thread)) {
2677 			printed += scnprintf(bf + printed, size - printed,
2678 				    ", Thread: %s(%d)",
2679 				     (thread->comm_set ? thread__comm_str(thread) : ""),
2680 				    thread->tid);
2681 		} else {
2682 			printed += scnprintf(bf + printed, size - printed,
2683 				    ", Thread: %s",
2684 				     (thread->comm_set ? thread__comm_str(thread) : ""));
2685 		}
2686 	}
2687 	if (dso)
2688 		printed += scnprintf(bf + printed, size - printed,
2689 				    ", DSO: %s", dso->short_name);
2690 	if (socket_id > -1)
2691 		printed += scnprintf(bf + printed, size - printed,
2692 				    ", Processor Socket: %d", socket_id);
2693 
2694 	return printed;
2695 }
2696 
2697 int parse_filter_percentage(const struct option *opt __maybe_unused,
2698 			    const char *arg, int unset __maybe_unused)
2699 {
2700 	if (!strcmp(arg, "relative"))
2701 		symbol_conf.filter_relative = true;
2702 	else if (!strcmp(arg, "absolute"))
2703 		symbol_conf.filter_relative = false;
2704 	else {
2705 		pr_debug("Invalid percentage: %s\n", arg);
2706 		return -1;
2707 	}
2708 
2709 	return 0;
2710 }
2711 
2712 int perf_hist_config(const char *var, const char *value)
2713 {
2714 	if (!strcmp(var, "hist.percentage"))
2715 		return parse_filter_percentage(NULL, value, 0);
2716 
2717 	return 0;
2718 }
2719 
2720 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2721 {
2722 	memset(hists, 0, sizeof(*hists));
2723 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2724 	hists->entries_in = &hists->entries_in_array[0];
2725 	hists->entries_collapsed = RB_ROOT_CACHED;
2726 	hists->entries = RB_ROOT_CACHED;
2727 	pthread_mutex_init(&hists->lock, NULL);
2728 	hists->socket_filter = -1;
2729 	hists->hpp_list = hpp_list;
2730 	INIT_LIST_HEAD(&hists->hpp_formats);
2731 	return 0;
2732 }
2733 
2734 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2735 {
2736 	struct rb_node *node;
2737 	struct hist_entry *he;
2738 
2739 	while (!RB_EMPTY_ROOT(&root->rb_root)) {
2740 		node = rb_first_cached(root);
2741 		rb_erase_cached(node, root);
2742 
2743 		he = rb_entry(node, struct hist_entry, rb_node_in);
2744 		hist_entry__delete(he);
2745 	}
2746 }
2747 
2748 static void hists__delete_all_entries(struct hists *hists)
2749 {
2750 	hists__delete_entries(hists);
2751 	hists__delete_remaining_entries(&hists->entries_in_array[0]);
2752 	hists__delete_remaining_entries(&hists->entries_in_array[1]);
2753 	hists__delete_remaining_entries(&hists->entries_collapsed);
2754 }
2755 
2756 static void hists_evsel__exit(struct evsel *evsel)
2757 {
2758 	struct hists *hists = evsel__hists(evsel);
2759 	struct perf_hpp_fmt *fmt, *pos;
2760 	struct perf_hpp_list_node *node, *tmp;
2761 
2762 	hists__delete_all_entries(hists);
2763 
2764 	list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2765 		perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2766 			list_del_init(&fmt->list);
2767 			free(fmt);
2768 		}
2769 		list_del_init(&node->list);
2770 		free(node);
2771 	}
2772 }
2773 
2774 static int hists_evsel__init(struct evsel *evsel)
2775 {
2776 	struct hists *hists = evsel__hists(evsel);
2777 
2778 	__hists__init(hists, &perf_hpp_list);
2779 	return 0;
2780 }
2781 
2782 /*
2783  * XXX We probably need a hists_evsel__exit() to free the hist_entries
2784  * stored in the rbtree...
2785  */
2786 
2787 int hists__init(void)
2788 {
2789 	int err = perf_evsel__object_config(sizeof(struct hists_evsel),
2790 					    hists_evsel__init,
2791 					    hists_evsel__exit);
2792 	if (err)
2793 		fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2794 
2795 	return err;
2796 }
2797 
2798 void perf_hpp_list__init(struct perf_hpp_list *list)
2799 {
2800 	INIT_LIST_HEAD(&list->fields);
2801 	INIT_LIST_HEAD(&list->sorts);
2802 }
2803