1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __PERF_HIST_H
3 #define __PERF_HIST_H
4
5 #include <linux/rbtree.h>
6 #include <linux/types.h>
7 #include "callchain.h"
8 #include "color.h"
9 #include "events_stats.h"
10 #include "evsel.h"
11 #include "map_symbol.h"
12 #include "mem-events.h"
13 #include "mutex.h"
14 #include "sample.h"
15 #include "spark.h"
16 #include "stat.h"
17
18 struct addr_location;
19 struct mem_info;
20 struct kvm_info;
21 struct branch_info;
22 struct branch_stack;
23 struct block_info;
24 struct ui_progress;
25
26 enum hist_filter {
27 HIST_FILTER__DSO,
28 HIST_FILTER__THREAD,
29 HIST_FILTER__PARENT,
30 HIST_FILTER__SYMBOL,
31 HIST_FILTER__GUEST,
32 HIST_FILTER__HOST,
33 HIST_FILTER__SOCKET,
34 HIST_FILTER__C2C,
35 HIST_FILTER__PARALLELISM,
36 };
37
38 typedef u16 filter_mask_t;
39
40 enum hist_column {
41 HISTC_SYMBOL,
42 HISTC_TIME,
43 HISTC_DSO,
44 HISTC_THREAD,
45 HISTC_TGID,
46 HISTC_COMM,
47 HISTC_CGROUP_ID,
48 HISTC_CGROUP,
49 HISTC_PARENT,
50 HISTC_PARALLELISM,
51 HISTC_CPU,
52 HISTC_SOCKET,
53 HISTC_SRCLINE,
54 HISTC_SRCFILE,
55 HISTC_MISPREDICT,
56 HISTC_IN_TX,
57 HISTC_ABORT,
58 HISTC_SYMBOL_FROM,
59 HISTC_SYMBOL_TO,
60 HISTC_DSO_FROM,
61 HISTC_DSO_TO,
62 HISTC_LOCAL_WEIGHT,
63 HISTC_GLOBAL_WEIGHT,
64 HISTC_CODE_PAGE_SIZE,
65 HISTC_MEM_DADDR_SYMBOL,
66 HISTC_MEM_DADDR_DSO,
67 HISTC_MEM_PHYS_DADDR,
68 HISTC_MEM_DATA_PAGE_SIZE,
69 HISTC_MEM_LOCKED,
70 HISTC_MEM_TLB,
71 HISTC_MEM_LVL,
72 HISTC_MEM_SNOOP,
73 HISTC_MEM_DCACHELINE,
74 HISTC_MEM_IADDR_SYMBOL,
75 HISTC_TRANSACTION,
76 HISTC_CYCLES,
77 HISTC_SRCLINE_FROM,
78 HISTC_SRCLINE_TO,
79 HISTC_TRACE,
80 HISTC_SYM_SIZE,
81 HISTC_DSO_SIZE,
82 HISTC_SYMBOL_IPC,
83 HISTC_MEM_BLOCKED,
84 HISTC_LOCAL_INS_LAT,
85 HISTC_GLOBAL_INS_LAT,
86 HISTC_LOCAL_P_STAGE_CYC,
87 HISTC_GLOBAL_P_STAGE_CYC,
88 HISTC_ADDR_FROM,
89 HISTC_ADDR_TO,
90 HISTC_ADDR,
91 HISTC_SIMD,
92 HISTC_TYPE,
93 HISTC_TYPE_OFFSET,
94 HISTC_SYMBOL_OFFSET,
95 HISTC_TYPE_CACHELINE,
96 HISTC_CALLCHAIN_BRANCH_PREDICTED,
97 HISTC_CALLCHAIN_BRANCH_ABORT,
98 HISTC_CALLCHAIN_BRANCH_CYCLES,
99 HISTC_NR_COLS, /* Last entry */
100 };
101
102 struct thread;
103 struct dso;
104
105 #define MEM_STAT_LEN 8
106
107 struct he_mem_stat {
108 /* meaning of entries depends on enum mem_stat_type */
109 u64 entries[MEM_STAT_LEN];
110 };
111
112 struct hists {
113 struct rb_root_cached entries_in_array[2];
114 struct rb_root_cached *entries_in;
115 struct rb_root_cached entries;
116 struct rb_root_cached entries_collapsed;
117 u64 nr_entries;
118 u64 nr_non_filtered_entries;
119 u64 callchain_period;
120 u64 callchain_non_filtered_period;
121 u64 callchain_latency;
122 u64 callchain_non_filtered_latency;
123 struct thread *thread_filter;
124 const struct dso *dso_filter;
125 const char *uid_filter_str;
126 const char *symbol_filter_str;
127 unsigned long *parallelism_filter;
128 struct mutex lock;
129 struct hists_stats stats;
130 u64 event_stream;
131 u16 col_len[HISTC_NR_COLS];
132 bool has_callchains;
133 int socket_filter;
134 struct perf_hpp_list *hpp_list;
135 struct list_head hpp_formats;
136 int nr_hpp_node;
137 int nr_mem_stats;
138 enum mem_stat_type *mem_stat_types;
139 struct he_mem_stat *mem_stat_total;
140 };
141
142 #define hists__has(__h, __f) (__h)->hpp_list->__f
143
144 struct hist_entry_iter;
145
146 struct hist_iter_ops {
147 int (*prepare_entry)(struct hist_entry_iter *, struct addr_location *);
148 int (*add_single_entry)(struct hist_entry_iter *, struct addr_location *);
149 int (*next_entry)(struct hist_entry_iter *, struct addr_location *);
150 int (*add_next_entry)(struct hist_entry_iter *, struct addr_location *);
151 int (*finish_entry)(struct hist_entry_iter *, struct addr_location *);
152 };
153
154 struct hist_entry_iter {
155 int total;
156 int curr;
157
158 struct evsel *evsel;
159 struct perf_sample *sample;
160 struct hist_entry *he;
161 struct symbol *parent;
162
163 struct mem_info *mi;
164 struct branch_info *bi;
165 struct hist_entry **he_cache;
166
167 const struct hist_iter_ops *ops;
168 /* user-defined callback function (optional) */
169 int (*add_entry_cb)(struct hist_entry_iter *iter,
170 struct addr_location *al, bool single, void *arg);
171 bool hide_unresolved;
172 };
173
174 extern const struct hist_iter_ops hist_iter_normal;
175 extern const struct hist_iter_ops hist_iter_branch;
176 extern const struct hist_iter_ops hist_iter_mem;
177 extern const struct hist_iter_ops hist_iter_cumulative;
178
179 struct res_sample {
180 u64 time;
181 int cpu;
182 int tid;
183 };
184
185 struct he_stat {
186 u64 period;
187 /*
188 * Period re-scaled from CPU time to wall-clock time (divided by the
189 * parallelism at the time of the sample). This represents effect of
190 * the event on latency rather than CPU consumption.
191 */
192 u64 latency;
193 u64 period_sys;
194 u64 period_us;
195 u64 period_guest_sys;
196 u64 period_guest_us;
197 u64 weight1;
198 u64 weight2;
199 u64 weight3;
200 u32 nr_events;
201 };
202
203 struct namespace_id {
204 u64 dev;
205 u64 ino;
206 };
207
208 struct hist_entry_diff {
209 bool computed;
210 union {
211 /* PERF_HPP__DELTA */
212 double period_ratio_delta;
213
214 /* PERF_HPP__RATIO */
215 double period_ratio;
216
217 /* HISTC_WEIGHTED_DIFF */
218 s64 wdiff;
219
220 /* PERF_HPP_DIFF__CYCLES */
221 s64 cycles;
222 };
223 struct stats stats;
224 unsigned long svals[NUM_SPARKS];
225 };
226
227 struct hist_entry_ops {
228 void *(*new)(size_t size);
229 void (*free)(void *ptr);
230 };
231
232 /**
233 * struct hist_entry - histogram entry
234 *
235 * @row_offset - offset from the first callchain expanded to appear on screen
236 * @nr_rows - rows expanded in callchain, recalculated on folding/unfolding
237 */
238 struct hist_entry {
239 struct rb_node rb_node_in;
240 struct rb_node rb_node;
241 union {
242 struct list_head node;
243 struct list_head head;
244 } pairs;
245 struct he_stat stat;
246 struct he_stat *stat_acc;
247 struct he_mem_stat *mem_stat;
248 struct map_symbol ms;
249 struct thread *thread;
250 struct comm *comm;
251 struct namespace_id cgroup_id;
252 u64 cgroup;
253 u64 ip;
254 u64 transaction;
255 u64 code_page_size;
256 u64 weight;
257 u64 ins_lat;
258 /** @weight3: On x86 holds retire_lat, on powerpc holds p_stage_cyc. */
259 u64 weight3;
260 s32 socket;
261 s32 cpu;
262 int parallelism;
263 int mem_type_off;
264 u8 cpumode;
265 u8 depth;
266 struct simd_flags simd_flags;
267
268 /* We are added by hists__add_dummy_entry. */
269 bool dummy;
270 bool leaf;
271
272 char level;
273 filter_mask_t filtered;
274
275 u16 callchain_size;
276 union {
277 /*
278 * Since perf diff only supports the stdio output, TUI
279 * fields are only accessed from perf report (or perf
280 * top). So make it a union to reduce memory usage.
281 */
282 struct hist_entry_diff diff;
283 struct /* for TUI */ {
284 u16 row_offset;
285 u16 nr_rows;
286 bool init_have_children;
287 bool unfolded;
288 bool has_children;
289 bool has_no_entry;
290 };
291 };
292 char *srcline;
293 char *srcfile;
294 struct symbol *parent;
295 struct branch_info *branch_info;
296 long time;
297 struct hists *hists;
298 struct mem_info *mem_info;
299 struct block_info *block_info;
300 struct kvm_info *kvm_info;
301 void *raw_data;
302 u32 raw_size;
303 int num_res;
304 struct res_sample *res_samples;
305 void *trace_output;
306 struct perf_hpp_list *hpp_list;
307 struct hist_entry *parent_he;
308 struct hist_entry_ops *ops;
309 struct annotated_data_type *mem_type;
310 union {
311 /* this is for hierarchical entry structure */
312 struct {
313 struct rb_root_cached hroot_in;
314 struct rb_root_cached hroot_out;
315 }; /* non-leaf entries */
316 struct rb_root sorted_chain; /* leaf entry has callchains */
317 };
318 struct callchain_root callchain[0]; /* must be last member */
319 };
320
hist_entry__has_callchains(struct hist_entry * he)321 static __pure inline bool hist_entry__has_callchains(struct hist_entry *he)
322 {
323 return he->callchain_size != 0;
324 }
325
hist_entry__has_pairs(struct hist_entry * he)326 static inline bool hist_entry__has_pairs(struct hist_entry *he)
327 {
328 return !list_empty(&he->pairs.node);
329 }
330
hist_entry__next_pair(struct hist_entry * he)331 static inline struct hist_entry *hist_entry__next_pair(struct hist_entry *he)
332 {
333 if (hist_entry__has_pairs(he))
334 return list_entry(he->pairs.node.next, struct hist_entry, pairs.node);
335 return NULL;
336 }
337
hist_entry__add_pair(struct hist_entry * pair,struct hist_entry * he)338 static inline void hist_entry__add_pair(struct hist_entry *pair,
339 struct hist_entry *he)
340 {
341 list_add_tail(&pair->pairs.node, &he->pairs.head);
342 }
343
344 struct hist_entry *hists__add_entry(struct hists *hists,
345 struct addr_location *al,
346 struct symbol *parent,
347 struct branch_info *bi,
348 struct mem_info *mi,
349 struct kvm_info *ki,
350 struct perf_sample *sample,
351 bool sample_self);
352
353 struct hist_entry *hists__add_entry_ops(struct hists *hists,
354 struct hist_entry_ops *ops,
355 struct addr_location *al,
356 struct symbol *sym_parent,
357 struct branch_info *bi,
358 struct mem_info *mi,
359 struct kvm_info *ki,
360 struct perf_sample *sample,
361 bool sample_self);
362
363 struct hist_entry *hists__add_entry_block(struct hists *hists,
364 struct addr_location *al,
365 struct block_info *bi);
366
367 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
368 int max_stack_depth, void *arg);
369
370 struct perf_hpp;
371 struct perf_hpp_fmt;
372
373 int hist_entry__transaction_len(void);
374 int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
375 struct hists *hists);
376 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
377 struct perf_hpp_fmt *fmt, int printed);
378 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size,
379 unsigned int width);
380 void hist_entry__delete(struct hist_entry *he);
381
382 typedef int (*hists__resort_cb_t)(struct hist_entry *he, void *arg);
383
384 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
385 hists__resort_cb_t cb, void *cb_arg);
386 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog);
387 void hists__output_resort(struct hists *hists, struct ui_progress *prog);
388 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
389 hists__resort_cb_t cb);
390 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
391
392 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
393 void hists__delete_entries(struct hists *hists);
394 void hists__output_recalc_col_len(struct hists *hists, int max_rows);
395
396 struct hist_entry *hists__get_entry(struct hists *hists, int idx);
397
398 u64 hists__total_period(struct hists *hists);
399 u64 hists__total_latency(struct hists *hists);
400 void hists__reset_stats(struct hists *hists);
401 void hists__inc_stats(struct hists *hists, struct hist_entry *h);
402 void hists__inc_nr_events(struct hists *hists);
403 void hists__inc_nr_samples(struct hists *hists, bool filtered);
404 void hists__inc_nr_lost_samples(struct hists *hists, u32 lost);
405 void hists__inc_nr_dropped_samples(struct hists *hists, u32 lost);
406
407 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
408 int max_cols, float min_pcnt, FILE *fp,
409 bool ignore_callchains);
410 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp);
411
412 void hists__filter_by_dso(struct hists *hists);
413 void hists__filter_by_thread(struct hists *hists);
414 void hists__filter_by_symbol(struct hists *hists);
415 void hists__filter_by_socket(struct hists *hists);
416 void hists__filter_by_parallelism(struct hists *hists);
417
hists__has_filter(struct hists * hists)418 static inline bool hists__has_filter(struct hists *hists)
419 {
420 return hists->thread_filter || hists->dso_filter ||
421 hists->symbol_filter_str || (hists->socket_filter > -1) ||
422 hists->parallelism_filter;
423 }
424
425 u16 hists__col_len(struct hists *hists, enum hist_column col);
426 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len);
427 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len);
428 void hists__reset_col_len(struct hists *hists);
429 void hists__calc_col_len(struct hists *hists, struct hist_entry *he);
430
431 void hists__match(struct hists *leader, struct hists *other);
432 int hists__link(struct hists *leader, struct hists *other);
433 int hists__unlink(struct hists *hists);
434
hist_entry__get_percent_limit(struct hist_entry * he)435 static inline float hist_entry__get_percent_limit(struct hist_entry *he)
436 {
437 u64 period = he->stat.period;
438 u64 total_period = hists__total_period(he->hists);
439
440 if (unlikely(total_period == 0))
441 return 0;
442
443 if (symbol_conf.cumulate_callchain)
444 period = he->stat_acc->period;
445
446 return period * 100.0 / total_period;
447 }
448
449 struct hists_evsel {
450 struct evsel evsel;
451 struct hists hists;
452 };
453
hists_to_evsel(struct hists * hists)454 static inline struct evsel *hists_to_evsel(struct hists *hists)
455 {
456 struct hists_evsel *hevsel = container_of(hists, struct hists_evsel, hists);
457 return &hevsel->evsel;
458 }
459
evsel__hists(struct evsel * evsel)460 static inline struct hists *evsel__hists(struct evsel *evsel)
461 {
462 struct hists_evsel *hevsel = (struct hists_evsel *)evsel;
463 return &hevsel->hists;
464 }
465
hists__has_callchains(struct hists * hists)466 static __pure inline bool hists__has_callchains(struct hists *hists)
467 {
468 return hists->has_callchains;
469 }
470
471 int hists__init(void);
472 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list);
473
474 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists);
475
476 struct perf_hpp {
477 char *buf;
478 size_t size;
479 const char *sep;
480 void *ptr;
481 bool skip;
482 };
483
484 typedef int64_t (*perf_hpp_fmt_cmp_t)(
485 struct perf_hpp_fmt *, struct hist_entry *, struct hist_entry *);
486
487 struct perf_hpp_fmt {
488 const char *name;
489 int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
490 struct hists *hists, int line, int *span);
491 int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
492 struct hists *hists);
493 void (*init)(struct perf_hpp_fmt *fmt, struct hist_entry *he);
494 int (*color)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
495 struct hist_entry *he);
496 int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
497 struct hist_entry *he);
498 perf_hpp_fmt_cmp_t cmp;
499 perf_hpp_fmt_cmp_t collapse;
500 perf_hpp_fmt_cmp_t sort;
501 bool (*equal)(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b);
502 void (*free)(struct perf_hpp_fmt *fmt);
503
504 struct list_head list;
505 struct list_head sort_list;
506 bool elide;
507 int len;
508 int user_len;
509 int idx;
510 int level;
511 };
512
513 struct perf_hpp_list {
514 struct list_head fields;
515 struct list_head sorts;
516
517 int nr_header_lines;
518 int need_collapse;
519 int parent;
520 int sym;
521 int dso;
522 int socket;
523 int thread;
524 int comm;
525 };
526
527 extern struct perf_hpp_list perf_hpp_list;
528
529 struct perf_hpp_list_node {
530 struct list_head list;
531 struct perf_hpp_list hpp;
532 int level;
533 bool skip;
534 };
535
536 void perf_hpp_list__column_register(struct perf_hpp_list *list,
537 struct perf_hpp_fmt *format);
538 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
539 struct perf_hpp_fmt *format);
540 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
541 struct perf_hpp_fmt *format);
542
perf_hpp__column_register(struct perf_hpp_fmt * format)543 static inline void perf_hpp__column_register(struct perf_hpp_fmt *format)
544 {
545 perf_hpp_list__column_register(&perf_hpp_list, format);
546 }
547
perf_hpp__register_sort_field(struct perf_hpp_fmt * format)548 static inline void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
549 {
550 perf_hpp_list__register_sort_field(&perf_hpp_list, format);
551 }
552
perf_hpp__prepend_sort_field(struct perf_hpp_fmt * format)553 static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format)
554 {
555 perf_hpp_list__prepend_sort_field(&perf_hpp_list, format);
556 }
557
558 #define perf_hpp_list__for_each_format(_list, format) \
559 list_for_each_entry(format, &(_list)->fields, list)
560
561 #define perf_hpp_list__for_each_format_safe(_list, format, tmp) \
562 list_for_each_entry_safe(format, tmp, &(_list)->fields, list)
563
564 #define perf_hpp_list__for_each_sort_list(_list, format) \
565 list_for_each_entry(format, &(_list)->sorts, sort_list)
566
567 #define perf_hpp_list__for_each_sort_list_safe(_list, format, tmp) \
568 list_for_each_entry_safe(format, tmp, &(_list)->sorts, sort_list)
569
570 #define hists__for_each_format(hists, format) \
571 perf_hpp_list__for_each_format((hists)->hpp_list, format)
572
573 #define hists__for_each_sort_list(hists, format) \
574 perf_hpp_list__for_each_sort_list((hists)->hpp_list, format)
575
576 extern struct perf_hpp_fmt perf_hpp__format[];
577
578 enum {
579 /* Matches perf_hpp__format array. */
580 PERF_HPP__OVERHEAD,
581 PERF_HPP__LATENCY,
582 PERF_HPP__OVERHEAD_SYS,
583 PERF_HPP__OVERHEAD_US,
584 PERF_HPP__OVERHEAD_GUEST_SYS,
585 PERF_HPP__OVERHEAD_GUEST_US,
586 PERF_HPP__OVERHEAD_ACC,
587 PERF_HPP__LATENCY_ACC,
588 PERF_HPP__SAMPLES,
589 PERF_HPP__PERIOD,
590 PERF_HPP__WEIGHT1,
591 PERF_HPP__WEIGHT2,
592 PERF_HPP__WEIGHT3,
593 PERF_HPP__MEM_STAT_OP,
594 PERF_HPP__MEM_STAT_CACHE,
595 PERF_HPP__MEM_STAT_MEMORY,
596 PERF_HPP__MEM_STAT_SNOOP,
597 PERF_HPP__MEM_STAT_DTLB,
598
599 PERF_HPP__MAX_INDEX
600 };
601
602 void perf_hpp__init(void);
603 void perf_hpp__cancel_cumulate(struct evlist *evlist);
604 void perf_hpp__cancel_latency(struct evlist *evlist);
605 void perf_hpp__setup_output_field(struct perf_hpp_list *list);
606 void perf_hpp__reset_output_field(struct perf_hpp_list *list);
607 void perf_hpp__append_sort_keys(struct perf_hpp_list *list);
608 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
609 struct evlist *evlist);
610 int perf_hpp__alloc_mem_stats(struct perf_hpp_list *list,
611 struct evlist *evlist);
612
613
614 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format);
615 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *format);
616 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists);
617 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt);
618 bool perf_hpp__is_srcline_entry(struct perf_hpp_fmt *fmt);
619 bool perf_hpp__is_srcfile_entry(struct perf_hpp_fmt *fmt);
620 bool perf_hpp__is_thread_entry(struct perf_hpp_fmt *fmt);
621 bool perf_hpp__is_comm_entry(struct perf_hpp_fmt *fmt);
622 bool perf_hpp__is_dso_entry(struct perf_hpp_fmt *fmt);
623 bool perf_hpp__is_sym_entry(struct perf_hpp_fmt *fmt);
624 bool perf_hpp__is_parallelism_entry(struct perf_hpp_fmt *fmt);
625
626 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt);
627
628 int hist_entry__filter(struct hist_entry *he, int type, const void *arg);
629
perf_hpp__should_skip(struct perf_hpp_fmt * format,struct hists * hists)630 static inline bool perf_hpp__should_skip(struct perf_hpp_fmt *format,
631 struct hists *hists)
632 {
633 if (format->elide)
634 return true;
635
636 if (perf_hpp__is_dynamic_entry(format) &&
637 !perf_hpp__defined_dynamic_entry(format, hists))
638 return true;
639
640 return false;
641 }
642
643 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists);
644 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists);
645 void perf_hpp__set_user_width(const char *width_list_str);
646 void hists__reset_column_width(struct hists *hists);
647
648 enum perf_hpp_fmt_type {
649 PERF_HPP_FMT_TYPE__RAW,
650 PERF_HPP_FMT_TYPE__PERCENT,
651 PERF_HPP_FMT_TYPE__LATENCY,
652 PERF_HPP_FMT_TYPE__AVERAGE,
653 };
654
655 typedef u64 (*hpp_field_fn)(struct hist_entry *he);
656 typedef int (*hpp_callback_fn)(struct perf_hpp *hpp, bool front);
657 typedef int (*hpp_snprint_fn)(struct perf_hpp *hpp, const char *fmt, ...);
658
659 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
660 struct hist_entry *he, hpp_field_fn get_field,
661 const char *fmtstr, hpp_snprint_fn print_fn,
662 enum perf_hpp_fmt_type fmtype);
663 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
664 struct hist_entry *he, hpp_field_fn get_field,
665 const char *fmtstr, hpp_snprint_fn print_fn,
666 enum perf_hpp_fmt_type fmtype);
667 int hpp__fmt_mem_stat(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
668 struct hist_entry *he, enum mem_stat_type mst,
669 const char *fmtstr, hpp_snprint_fn print_fn);
670
advance_hpp(struct perf_hpp * hpp,int inc)671 static inline void advance_hpp(struct perf_hpp *hpp, int inc)
672 {
673 hpp->buf += inc;
674 hpp->size -= inc;
675 }
676
perf_hpp__use_color(void)677 static inline size_t perf_hpp__use_color(void)
678 {
679 return !symbol_conf.field_sep;
680 }
681
perf_hpp__color_overhead(void)682 static inline size_t perf_hpp__color_overhead(void)
683 {
684 return perf_hpp__use_color() ?
685 (COLOR_MAXLEN + sizeof(PERF_COLOR_RESET)) * PERF_HPP__MAX_INDEX
686 : 0;
687 }
688
689 struct evlist;
690
691 struct hist_browser_timer {
692 void (*timer)(void *arg);
693 void *arg;
694 int refresh;
695 };
696
697 enum rstype {
698 A_NORMAL,
699 A_ASM,
700 A_SOURCE
701 };
702
703 struct block_hist {
704 struct hists block_hists;
705 struct perf_hpp_list block_list;
706 struct perf_hpp_fmt block_fmt;
707 int block_idx;
708 bool valid;
709 struct hist_entry he;
710 };
711
712 #ifdef HAVE_SLANG_SUPPORT
713 #include "../ui/keysyms.h"
714 void attr_to_script(char *buf, struct perf_event_attr *attr);
715
716 int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
717 struct hist_browser_timer *hbt);
718
719 int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel,
720 struct hist_browser_timer *hbt);
721
722 int evlist__tui_browse_hists(struct evlist *evlist, const char *help, struct hist_browser_timer *hbt,
723 float min_pcnt, struct perf_env *env, bool warn_lost_event);
724
725 int script_browse(const char *script_opt, struct evsel *evsel);
726
727 void run_script(char *cmd);
728 int res_sample_browse(struct res_sample *res_samples, int num_res,
729 struct evsel *evsel, enum rstype rstype);
730 void res_sample_init(void);
731
732 int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
733 float min_percent, struct perf_env *env);
734 #else
735 static inline
evlist__tui_browse_hists(struct evlist * evlist __maybe_unused,const char * help __maybe_unused,struct hist_browser_timer * hbt __maybe_unused,float min_pcnt __maybe_unused,struct perf_env * env __maybe_unused,bool warn_lost_event __maybe_unused)736 int evlist__tui_browse_hists(struct evlist *evlist __maybe_unused,
737 const char *help __maybe_unused,
738 struct hist_browser_timer *hbt __maybe_unused,
739 float min_pcnt __maybe_unused,
740 struct perf_env *env __maybe_unused,
741 bool warn_lost_event __maybe_unused)
742 {
743 return 0;
744 }
map_symbol__tui_annotate(struct map_symbol * ms __maybe_unused,struct evsel * evsel __maybe_unused,struct hist_browser_timer * hbt __maybe_unused)745 static inline int map_symbol__tui_annotate(struct map_symbol *ms __maybe_unused,
746 struct evsel *evsel __maybe_unused,
747 struct hist_browser_timer *hbt __maybe_unused)
748 {
749 return 0;
750 }
751
hist_entry__tui_annotate(struct hist_entry * he __maybe_unused,struct evsel * evsel __maybe_unused,struct hist_browser_timer * hbt __maybe_unused)752 static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
753 struct evsel *evsel __maybe_unused,
754 struct hist_browser_timer *hbt __maybe_unused)
755 {
756 return 0;
757 }
758
script_browse(const char * script_opt __maybe_unused,struct evsel * evsel __maybe_unused)759 static inline int script_browse(const char *script_opt __maybe_unused,
760 struct evsel *evsel __maybe_unused)
761 {
762 return 0;
763 }
764
res_sample_browse(struct res_sample * res_samples __maybe_unused,int num_res __maybe_unused,struct evsel * evsel __maybe_unused,enum rstype rstype __maybe_unused)765 static inline int res_sample_browse(struct res_sample *res_samples __maybe_unused,
766 int num_res __maybe_unused,
767 struct evsel *evsel __maybe_unused,
768 enum rstype rstype __maybe_unused)
769 {
770 return 0;
771 }
772
res_sample_init(void)773 static inline void res_sample_init(void) {}
774
block_hists_tui_browse(struct block_hist * bh __maybe_unused,struct evsel * evsel __maybe_unused,float min_percent __maybe_unused,struct perf_env * env __maybe_unused)775 static inline int block_hists_tui_browse(struct block_hist *bh __maybe_unused,
776 struct evsel *evsel __maybe_unused,
777 float min_percent __maybe_unused,
778 struct perf_env *env __maybe_unused)
779 {
780 return 0;
781 }
782
783 #define K_LEFT -1000
784 #define K_RIGHT -2000
785 #define K_SWITCH_INPUT_DATA -3000
786 #define K_RELOAD -4000
787 #endif
788
789 unsigned int hists__sort_list_width(struct hists *hists);
790 unsigned int hists__overhead_width(struct hists *hists);
791
792 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
793 struct perf_sample *sample, bool nonany_branch_mode,
794 u64 *total_cycles, struct evsel *evsel);
795
796 struct option;
797 int parse_filter_percentage(const struct option *opt, const char *arg, int unset);
798 int perf_hist_config(const char *var, const char *value);
799
800 void perf_hpp_list__init(struct perf_hpp_list *list);
801
802 enum hierarchy_move_dir {
803 HMD_NORMAL,
804 HMD_FORCE_SIBLING,
805 HMD_FORCE_CHILD,
806 };
807
808 struct rb_node *rb_hierarchy_last(struct rb_node *node);
809 struct rb_node *__rb_hierarchy_next(struct rb_node *node,
810 enum hierarchy_move_dir hmd);
811 struct rb_node *rb_hierarchy_prev(struct rb_node *node);
812
rb_hierarchy_next(struct rb_node * node)813 static inline struct rb_node *rb_hierarchy_next(struct rb_node *node)
814 {
815 return __rb_hierarchy_next(node, HMD_NORMAL);
816 }
817
818 #define HIERARCHY_INDENT 3
819
820 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit);
821 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...);
822 int __hpp__slsmg_color_printf(struct perf_hpp *hpp, const char *fmt, ...);
823 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
824 struct perf_hpp_list *hpp_list);
825 int hists__fprintf_headers(struct hists *hists, FILE *fp);
826 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq);
827
hists__scnprintf_title(struct hists * hists,char * bf,size_t size)828 static inline int hists__scnprintf_title(struct hists *hists, char *bf, size_t size)
829 {
830 return __hists__scnprintf_title(hists, bf, size, true);
831 }
832
833 #endif /* __PERF_HIST_H */
834