1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <math.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <linux/compiler.h>
8
9 #include "../util/callchain.h"
10 #include "../util/debug.h"
11 #include "../util/hist.h"
12 #include "../util/sort.h"
13 #include "../util/evsel.h"
14 #include "../util/evlist.h"
15 #include "../util/mem-events.h"
16 #include "../util/string2.h"
17 #include "../util/thread.h"
18 #include "../util/util.h"
19
20 /* hist period print (hpp) functions */
21
22 #define hpp__call_print_fn(hpp, fn, fmt, ...) \
23 ({ \
24 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
25 advance_hpp(hpp, __ret); \
26 __ret; \
27 })
28
__hpp__fmt_print(struct perf_hpp * hpp,struct hists * hists,u64 val,int nr_samples,const char * fmt,int len,hpp_snprint_fn print_fn,enum perf_hpp_fmt_type fmtype)29 static int __hpp__fmt_print(struct perf_hpp *hpp, struct hists *hists, u64 val,
30 int nr_samples, const char *fmt, int len,
31 hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype)
32 {
33 if (fmtype == PERF_HPP_FMT_TYPE__PERCENT || fmtype == PERF_HPP_FMT_TYPE__LATENCY) {
34 double percent = 0.0;
35 u64 total = fmtype == PERF_HPP_FMT_TYPE__PERCENT ? hists__total_period(hists) :
36 hists__total_latency(hists);
37
38 if (total)
39 percent = 100.0 * val / total;
40
41 return hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
42 }
43
44 if (fmtype == PERF_HPP_FMT_TYPE__AVERAGE) {
45 double avg = nr_samples ? (1.0 * val / nr_samples) : 0;
46
47 return hpp__call_print_fn(hpp, print_fn, fmt, len, avg);
48 }
49
50 return hpp__call_print_fn(hpp, print_fn, fmt, len, val);
51 }
52
53 struct hpp_fmt_value {
54 struct hists *hists;
55 u64 val;
56 int samples;
57 };
58
__hpp__fmt(struct perf_hpp * hpp,struct hist_entry * he,hpp_field_fn get_field,const char * fmt,int len,hpp_snprint_fn print_fn,enum perf_hpp_fmt_type fmtype)59 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
60 hpp_field_fn get_field, const char *fmt, int len,
61 hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype)
62 {
63 int ret = 0;
64 struct hists *hists = he->hists;
65 struct evsel *evsel = hists_to_evsel(hists);
66 struct evsel *pos;
67 char *buf = hpp->buf;
68 size_t size = hpp->size;
69 int i = 0, nr_members = 1;
70 struct hpp_fmt_value *values;
71
72 if (evsel__is_group_event(evsel))
73 nr_members = evsel->core.nr_members;
74
75 values = calloc(nr_members, sizeof(*values));
76 if (values == NULL)
77 return 0;
78
79 values[0].hists = evsel__hists(evsel);
80 values[0].val = get_field(he);
81 values[0].samples = he->stat.nr_events;
82
83 if (evsel__is_group_event(evsel)) {
84 struct hist_entry *pair;
85
86 for_each_group_member(pos, evsel)
87 values[++i].hists = evsel__hists(pos);
88
89 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
90 for (i = 0; i < nr_members; i++) {
91 if (values[i].hists != pair->hists)
92 continue;
93
94 values[i].val = get_field(pair);
95 values[i].samples = pair->stat.nr_events;
96 break;
97 }
98 }
99 }
100
101 for (i = 0; i < nr_members; i++) {
102 if (symbol_conf.skip_empty &&
103 values[i].hists->stats.nr_samples == 0)
104 continue;
105
106 ret += __hpp__fmt_print(hpp, values[i].hists, values[i].val,
107 values[i].samples, fmt, len,
108 print_fn, fmtype);
109 }
110
111 free(values);
112
113 /*
114 * Restore original buf and size as it's where caller expects
115 * the result will be saved.
116 */
117 hpp->buf = buf;
118 hpp->size = size;
119
120 return ret;
121 }
122
hpp__fmt(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he,hpp_field_fn get_field,const char * fmtstr,hpp_snprint_fn print_fn,enum perf_hpp_fmt_type fmtype)123 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
124 struct hist_entry *he, hpp_field_fn get_field,
125 const char *fmtstr, hpp_snprint_fn print_fn,
126 enum perf_hpp_fmt_type fmtype)
127 {
128 int len = max(fmt->user_len ?: fmt->len, (int)strlen(fmt->name));
129
130 if (symbol_conf.field_sep) {
131 return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
132 print_fn, fmtype);
133 }
134
135 if (fmtype == PERF_HPP_FMT_TYPE__PERCENT || fmtype == PERF_HPP_FMT_TYPE__LATENCY)
136 len -= 2; /* 2 for a space and a % sign */
137 else
138 len -= 1;
139
140 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmtype);
141 }
142
hpp__fmt_acc(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he,hpp_field_fn get_field,const char * fmtstr,hpp_snprint_fn print_fn,enum perf_hpp_fmt_type fmtype)143 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
144 struct hist_entry *he, hpp_field_fn get_field,
145 const char *fmtstr, hpp_snprint_fn print_fn,
146 enum perf_hpp_fmt_type fmtype)
147 {
148 if (!symbol_conf.cumulate_callchain) {
149 int len = fmt->user_len ?: fmt->len;
150 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
151 }
152
153 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmtype);
154 }
155
hpp__fmt_mem_stat(struct perf_hpp_fmt * fmt __maybe_unused,struct perf_hpp * hpp,struct hist_entry * he,enum mem_stat_type mst,const char * fmtstr,hpp_snprint_fn print_fn)156 int hpp__fmt_mem_stat(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp,
157 struct hist_entry *he, enum mem_stat_type mst,
158 const char *fmtstr, hpp_snprint_fn print_fn)
159 {
160 struct hists *hists = he->hists;
161 int mem_stat_idx = -1;
162 char *buf = hpp->buf;
163 size_t size = hpp->size;
164 u64 total = 0;
165 int ret = 0;
166
167 for (int i = 0; i < hists->nr_mem_stats; i++) {
168 if (hists->mem_stat_types[i] == mst) {
169 mem_stat_idx = i;
170 break;
171 }
172 }
173 assert(mem_stat_idx != -1);
174
175 for (int i = 0; i < MEM_STAT_LEN; i++)
176 total += hists->mem_stat_total[mem_stat_idx].entries[i];
177 assert(total != 0);
178
179 for (int i = 0; i < MEM_STAT_LEN; i++) {
180 u64 val = he->mem_stat[mem_stat_idx].entries[i];
181
182 if (hists->mem_stat_total[mem_stat_idx].entries[i] == 0)
183 continue;
184
185 ret += hpp__call_print_fn(hpp, print_fn, fmtstr, 100.0 * val / total);
186 }
187
188 /*
189 * Restore original buf and size as it's where caller expects
190 * the result will be saved.
191 */
192 hpp->buf = buf;
193 hpp->size = size;
194
195 return ret;
196 }
197
field_cmp(u64 field_a,u64 field_b)198 static int field_cmp(u64 field_a, u64 field_b)
199 {
200 if (field_a > field_b)
201 return 1;
202 if (field_a < field_b)
203 return -1;
204 return 0;
205 }
206
hist_entry__new_pair(struct hist_entry * a,struct hist_entry * b,hpp_field_fn get_field,int nr_members,u64 ** fields_a,u64 ** fields_b)207 static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b,
208 hpp_field_fn get_field, int nr_members,
209 u64 **fields_a, u64 **fields_b)
210 {
211 u64 *fa = calloc(nr_members, sizeof(*fa)),
212 *fb = calloc(nr_members, sizeof(*fb));
213 struct hist_entry *pair;
214
215 if (!fa || !fb)
216 goto out_free;
217
218 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
219 struct evsel *evsel = hists_to_evsel(pair->hists);
220 fa[evsel__group_idx(evsel)] = get_field(pair);
221 }
222
223 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
224 struct evsel *evsel = hists_to_evsel(pair->hists);
225 fb[evsel__group_idx(evsel)] = get_field(pair);
226 }
227
228 *fields_a = fa;
229 *fields_b = fb;
230 return 0;
231 out_free:
232 free(fa);
233 free(fb);
234 *fields_a = *fields_b = NULL;
235 return -1;
236 }
237
__hpp__group_sort_idx(struct hist_entry * a,struct hist_entry * b,hpp_field_fn get_field,int idx)238 static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b,
239 hpp_field_fn get_field, int idx)
240 {
241 struct evsel *evsel = hists_to_evsel(a->hists);
242 u64 *fields_a, *fields_b;
243 int cmp, nr_members, ret, i;
244
245 cmp = field_cmp(get_field(a), get_field(b));
246 if (!evsel__is_group_event(evsel))
247 return cmp;
248
249 nr_members = evsel->core.nr_members;
250 if (idx < 1 || idx >= nr_members)
251 return cmp;
252
253 ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
254 if (ret) {
255 ret = cmp;
256 goto out;
257 }
258
259 ret = field_cmp(fields_a[idx], fields_b[idx]);
260 if (ret)
261 goto out;
262
263 for (i = 1; i < nr_members; i++) {
264 if (i != idx) {
265 ret = field_cmp(fields_a[i], fields_b[i]);
266 if (ret)
267 goto out;
268 }
269 }
270
271 out:
272 free(fields_a);
273 free(fields_b);
274
275 return ret;
276 }
277
__hpp__sort(struct hist_entry * a,struct hist_entry * b,hpp_field_fn get_field)278 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
279 hpp_field_fn get_field)
280 {
281 s64 ret;
282 int i, nr_members;
283 struct evsel *evsel;
284 u64 *fields_a, *fields_b;
285
286 if (symbol_conf.group_sort_idx && symbol_conf.event_group) {
287 return __hpp__group_sort_idx(a, b, get_field,
288 symbol_conf.group_sort_idx);
289 }
290
291 ret = field_cmp(get_field(a), get_field(b));
292 if (ret || !symbol_conf.event_group)
293 return ret;
294
295 evsel = hists_to_evsel(a->hists);
296 if (!evsel__is_group_event(evsel))
297 return ret;
298
299 nr_members = evsel->core.nr_members;
300 i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
301 if (i)
302 goto out;
303
304 for (i = 1; i < nr_members; i++) {
305 ret = field_cmp(fields_a[i], fields_b[i]);
306 if (ret)
307 break;
308 }
309
310 out:
311 free(fields_a);
312 free(fields_b);
313
314 return ret;
315 }
316
__hpp__sort_acc(struct hist_entry * a,struct hist_entry * b,hpp_field_fn get_field)317 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
318 hpp_field_fn get_field)
319 {
320 s64 ret = 0;
321
322 if (symbol_conf.cumulate_callchain) {
323 /*
324 * Put caller above callee when they have equal period.
325 */
326 ret = field_cmp(get_field(a), get_field(b));
327 if (ret)
328 return ret;
329
330 if ((a->thread == NULL ? NULL : RC_CHK_ACCESS(a->thread)) !=
331 (b->thread == NULL ? NULL : RC_CHK_ACCESS(b->thread)) ||
332 !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
333 return 0;
334
335 ret = b->callchain->max_depth - a->callchain->max_depth;
336 if (callchain_param.order == ORDER_CALLER)
337 ret = -ret;
338 }
339 return ret;
340 }
341
342 static bool perf_hpp__is_mem_stat_entry(struct perf_hpp_fmt *fmt);
343
hpp__mem_stat_type(struct perf_hpp_fmt * fmt)344 static enum mem_stat_type hpp__mem_stat_type(struct perf_hpp_fmt *fmt)
345 {
346 if (!perf_hpp__is_mem_stat_entry(fmt))
347 return -1;
348
349 switch (fmt->idx) {
350 case PERF_HPP__MEM_STAT_OP:
351 return PERF_MEM_STAT_OP;
352 case PERF_HPP__MEM_STAT_CACHE:
353 return PERF_MEM_STAT_CACHE;
354 case PERF_HPP__MEM_STAT_MEMORY:
355 return PERF_MEM_STAT_MEMORY;
356 case PERF_HPP__MEM_STAT_SNOOP:
357 return PERF_MEM_STAT_SNOOP;
358 case PERF_HPP__MEM_STAT_DTLB:
359 return PERF_MEM_STAT_DTLB;
360 default:
361 break;
362 }
363 pr_debug("Should not reach here\n");
364 return -1;
365 }
366
hpp__sort_mem_stat(struct perf_hpp_fmt * fmt __maybe_unused,struct hist_entry * a,struct hist_entry * b)367 static int64_t hpp__sort_mem_stat(struct perf_hpp_fmt *fmt __maybe_unused,
368 struct hist_entry *a, struct hist_entry *b)
369 {
370 return a->stat.period - b->stat.period;
371 }
372
hpp__width_fn(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists)373 static int hpp__width_fn(struct perf_hpp_fmt *fmt,
374 struct perf_hpp *hpp __maybe_unused,
375 struct hists *hists)
376 {
377 int len = fmt->user_len ?: fmt->len;
378 struct evsel *evsel = hists_to_evsel(hists);
379
380 if (symbol_conf.event_group) {
381 int nr = 0;
382 struct evsel *pos;
383
384 for_each_group_evsel(pos, evsel) {
385 if (!symbol_conf.skip_empty ||
386 evsel__hists(pos)->stats.nr_samples)
387 nr++;
388 }
389
390 len = max(len, nr * fmt->len);
391 }
392
393 if (len < (int)strlen(fmt->name))
394 len = strlen(fmt->name);
395
396 return len;
397 }
398
hpp__header_fn(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists,int line,int * span __maybe_unused)399 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
400 struct hists *hists, int line,
401 int *span __maybe_unused)
402 {
403 int len = hpp__width_fn(fmt, hpp, hists);
404 const char *hdr = "";
405
406 if (line == hists->hpp_list->nr_header_lines - 1)
407 hdr = fmt->name;
408
409 return scnprintf(hpp->buf, hpp->size, "%*s", len, hdr);
410 }
411
hpp__header_mem_stat_fn(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists,int line,int * span __maybe_unused)412 static int hpp__header_mem_stat_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
413 struct hists *hists, int line,
414 int *span __maybe_unused)
415 {
416 char *buf = hpp->buf;
417 int ret = 0;
418 int len;
419 enum mem_stat_type mst = hpp__mem_stat_type(fmt);
420 int mem_stat_idx = -1;
421
422 for (int i = 0; i < hists->nr_mem_stats; i++) {
423 if (hists->mem_stat_types[i] == mst) {
424 mem_stat_idx = i;
425 break;
426 }
427 }
428 assert(mem_stat_idx != -1);
429
430 if (line == 0) {
431 int left, right;
432
433 len = 0;
434 /* update fmt->len for acutally used columns only */
435 for (int i = 0; i < MEM_STAT_LEN; i++) {
436 if (hists->mem_stat_total[mem_stat_idx].entries[i])
437 len += MEM_STAT_PRINT_LEN;
438 }
439 fmt->len = len;
440
441 /* print header directly if single column only */
442 if (len == MEM_STAT_PRINT_LEN)
443 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
444
445 left = (len - strlen(fmt->name)) / 2 - 1;
446 right = len - left - strlen(fmt->name) - 2;
447
448 if (left < 0)
449 left = 0;
450 if (right < 0)
451 right = 0;
452
453 return scnprintf(hpp->buf, hpp->size, "%.*s %s %.*s",
454 left, graph_dotted_line, fmt->name, right, graph_dotted_line);
455 }
456
457
458 len = hpp->size;
459 for (int i = 0; i < MEM_STAT_LEN; i++) {
460 int printed;
461
462 if (hists->mem_stat_total[mem_stat_idx].entries[i] == 0)
463 continue;
464
465 printed = scnprintf(buf, len, "%*s", MEM_STAT_PRINT_LEN,
466 mem_stat_name(mst, i));
467 ret += printed;
468 buf += printed;
469 len -= printed;
470 }
471 return ret;
472 }
473
hpp_color_scnprintf(struct perf_hpp * hpp,const char * fmt,...)474 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
475 {
476 va_list args;
477 ssize_t ssize = hpp->size;
478 double percent;
479 int ret, len;
480
481 va_start(args, fmt);
482 len = va_arg(args, int);
483 percent = va_arg(args, double);
484 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
485 va_end(args);
486
487 return (ret >= ssize) ? (ssize - 1) : ret;
488 }
489
hpp_entry_scnprintf(struct perf_hpp * hpp,const char * fmt,...)490 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
491 {
492 va_list args;
493 ssize_t ssize = hpp->size;
494 int ret;
495
496 va_start(args, fmt);
497 ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
498 va_end(args);
499
500 return (ret >= ssize) ? (ssize - 1) : ret;
501 }
502
503 #define __HPP_COLOR_PERCENT_FN(_type, _field, _fmttype) \
504 static u64 he_get_##_field(struct hist_entry *he) \
505 { \
506 return he->stat._field; \
507 } \
508 \
509 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
510 struct perf_hpp *hpp, struct hist_entry *he) \
511 { \
512 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
513 hpp_color_scnprintf, _fmttype); \
514 }
515
516 #define __HPP_ENTRY_PERCENT_FN(_type, _field, _fmttype) \
517 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
518 struct perf_hpp *hpp, struct hist_entry *he) \
519 { \
520 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
521 hpp_entry_scnprintf, _fmttype); \
522 }
523
524 #define __HPP_SORT_FN(_type, _field) \
525 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
526 struct hist_entry *a, struct hist_entry *b) \
527 { \
528 return __hpp__sort(a, b, he_get_##_field); \
529 }
530
531 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field, _fmttype) \
532 static u64 he_get_acc_##_field(struct hist_entry *he) \
533 { \
534 return he->stat_acc->_field; \
535 } \
536 \
537 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
538 struct perf_hpp *hpp, struct hist_entry *he) \
539 { \
540 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
541 hpp_color_scnprintf, _fmttype); \
542 }
543
544 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field, _fmttype) \
545 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
546 struct perf_hpp *hpp, struct hist_entry *he) \
547 { \
548 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
549 hpp_entry_scnprintf, _fmttype); \
550 }
551
552 #define __HPP_SORT_ACC_FN(_type, _field) \
553 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
554 struct hist_entry *a, struct hist_entry *b) \
555 { \
556 return __hpp__sort_acc(a, b, he_get_acc_##_field); \
557 }
558
559 #define __HPP_ENTRY_RAW_FN(_type, _field) \
560 static u64 he_get_raw_##_field(struct hist_entry *he) \
561 { \
562 return he->stat._field; \
563 } \
564 \
565 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
566 struct perf_hpp *hpp, struct hist_entry *he) \
567 { \
568 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
569 hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__RAW); \
570 }
571
572 #define __HPP_SORT_RAW_FN(_type, _field) \
573 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
574 struct hist_entry *a, struct hist_entry *b) \
575 { \
576 return __hpp__sort(a, b, he_get_raw_##_field); \
577 }
578
579 #define __HPP_ENTRY_AVERAGE_FN(_type, _field) \
580 static u64 he_get_##_field(struct hist_entry *he) \
581 { \
582 return he->stat._field; \
583 } \
584 \
585 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
586 struct perf_hpp *hpp, struct hist_entry *he) \
587 { \
588 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.1f", \
589 hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__AVERAGE); \
590 }
591
592 #define __HPP_SORT_AVERAGE_FN(_type, _field) \
593 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
594 struct hist_entry *a, struct hist_entry *b) \
595 { \
596 return __hpp__sort(a, b, he_get_##_field); \
597 }
598
599 #define __HPP_COLOR_MEM_STAT_FN(_name, _type) \
600 static int hpp__color_mem_stat_##_name(struct perf_hpp_fmt *fmt, \
601 struct perf_hpp *hpp, \
602 struct hist_entry *he) \
603 { \
604 return hpp__fmt_mem_stat(fmt, hpp, he, PERF_MEM_STAT_##_type, \
605 " %5.1f%%", hpp_color_scnprintf); \
606 }
607
608 #define __HPP_ENTRY_MEM_STAT_FN(_name, _type) \
609 static int hpp__entry_mem_stat_##_name(struct perf_hpp_fmt *fmt, \
610 struct perf_hpp *hpp, \
611 struct hist_entry *he) \
612 { \
613 return hpp__fmt_mem_stat(fmt, hpp, he, PERF_MEM_STAT_##_type, \
614 " %5.1f%%", hpp_entry_scnprintf); \
615 }
616
617 #define HPP_PERCENT_FNS(_type, _field, _fmttype) \
618 __HPP_COLOR_PERCENT_FN(_type, _field, _fmttype) \
619 __HPP_ENTRY_PERCENT_FN(_type, _field, _fmttype) \
620 __HPP_SORT_FN(_type, _field)
621
622 #define HPP_PERCENT_ACC_FNS(_type, _field, _fmttype) \
623 __HPP_COLOR_ACC_PERCENT_FN(_type, _field, _fmttype) \
624 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field, _fmttype) \
625 __HPP_SORT_ACC_FN(_type, _field)
626
627 #define HPP_RAW_FNS(_type, _field) \
628 __HPP_ENTRY_RAW_FN(_type, _field) \
629 __HPP_SORT_RAW_FN(_type, _field)
630
631 #define HPP_AVERAGE_FNS(_type, _field) \
632 __HPP_ENTRY_AVERAGE_FN(_type, _field) \
633 __HPP_SORT_AVERAGE_FN(_type, _field)
634
635 #define HPP_MEM_STAT_FNS(_name, _type) \
636 __HPP_COLOR_MEM_STAT_FN(_name, _type) \
637 __HPP_ENTRY_MEM_STAT_FN(_name, _type)
638
HPP_PERCENT_FNS(overhead,period,PERF_HPP_FMT_TYPE__PERCENT)639 HPP_PERCENT_FNS(overhead, period, PERF_HPP_FMT_TYPE__PERCENT)
640 HPP_PERCENT_FNS(latency, latency, PERF_HPP_FMT_TYPE__LATENCY)
641 HPP_PERCENT_FNS(overhead_sys, period_sys, PERF_HPP_FMT_TYPE__PERCENT)
642 HPP_PERCENT_FNS(overhead_us, period_us, PERF_HPP_FMT_TYPE__PERCENT)
643 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys, PERF_HPP_FMT_TYPE__PERCENT)
644 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us, PERF_HPP_FMT_TYPE__PERCENT)
645 HPP_PERCENT_ACC_FNS(overhead_acc, period, PERF_HPP_FMT_TYPE__PERCENT)
646 HPP_PERCENT_ACC_FNS(latency_acc, latency, PERF_HPP_FMT_TYPE__LATENCY)
647
648 HPP_RAW_FNS(samples, nr_events)
649 HPP_RAW_FNS(period, period)
650
651 HPP_AVERAGE_FNS(weight1, weight1)
652 HPP_AVERAGE_FNS(weight2, weight2)
653 HPP_AVERAGE_FNS(weight3, weight3)
654
655 HPP_MEM_STAT_FNS(op, OP)
656 HPP_MEM_STAT_FNS(cache, CACHE)
657 HPP_MEM_STAT_FNS(memory, MEMORY)
658 HPP_MEM_STAT_FNS(snoop, SNOOP)
659 HPP_MEM_STAT_FNS(dtlb, DTLB)
660
661 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
662 struct hist_entry *a __maybe_unused,
663 struct hist_entry *b __maybe_unused)
664 {
665 return 0;
666 }
667
perf_hpp__is_mem_stat_entry(struct perf_hpp_fmt * fmt)668 static bool perf_hpp__is_mem_stat_entry(struct perf_hpp_fmt *fmt)
669 {
670 return fmt->sort == hpp__sort_mem_stat;
671 }
672
perf_hpp__is_hpp_entry(struct perf_hpp_fmt * a)673 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
674 {
675 return a->header == hpp__header_fn;
676 }
677
hpp__equal(struct perf_hpp_fmt * a,struct perf_hpp_fmt * b)678 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
679 {
680 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
681 return false;
682
683 return a->idx == b->idx;
684 }
685
hpp__equal_mem_stat(struct perf_hpp_fmt * a,struct perf_hpp_fmt * b)686 static bool hpp__equal_mem_stat(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
687 {
688 if (!perf_hpp__is_mem_stat_entry(a) || !perf_hpp__is_mem_stat_entry(b))
689 return false;
690
691 return a->entry == b->entry;
692 }
693
694 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
695 { \
696 .name = _name, \
697 .header = hpp__header_fn, \
698 .width = hpp__width_fn, \
699 .color = hpp__color_ ## _fn, \
700 .entry = hpp__entry_ ## _fn, \
701 .cmp = hpp__nop_cmp, \
702 .collapse = hpp__nop_cmp, \
703 .sort = hpp__sort_ ## _fn, \
704 .idx = PERF_HPP__ ## _idx, \
705 .equal = hpp__equal, \
706 }
707
708 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
709 { \
710 .name = _name, \
711 .header = hpp__header_fn, \
712 .width = hpp__width_fn, \
713 .color = hpp__color_ ## _fn, \
714 .entry = hpp__entry_ ## _fn, \
715 .cmp = hpp__nop_cmp, \
716 .collapse = hpp__nop_cmp, \
717 .sort = hpp__sort_ ## _fn, \
718 .idx = PERF_HPP__ ## _idx, \
719 .equal = hpp__equal, \
720 }
721
722 #define HPP__PRINT_FNS(_name, _fn, _idx) \
723 { \
724 .name = _name, \
725 .header = hpp__header_fn, \
726 .width = hpp__width_fn, \
727 .entry = hpp__entry_ ## _fn, \
728 .cmp = hpp__nop_cmp, \
729 .collapse = hpp__nop_cmp, \
730 .sort = hpp__sort_ ## _fn, \
731 .idx = PERF_HPP__ ## _idx, \
732 .equal = hpp__equal, \
733 }
734
735 #define HPP__MEM_STAT_PRINT_FNS(_name, _fn, _type) \
736 { \
737 .name = _name, \
738 .header = hpp__header_mem_stat_fn, \
739 .width = hpp__width_fn, \
740 .color = hpp__color_mem_stat_ ## _fn, \
741 .entry = hpp__entry_mem_stat_ ## _fn, \
742 .cmp = hpp__nop_cmp, \
743 .collapse = hpp__nop_cmp, \
744 .sort = hpp__sort_mem_stat, \
745 .idx = PERF_HPP__MEM_STAT_ ## _type, \
746 .equal = hpp__equal_mem_stat, \
747 }
748
749 struct perf_hpp_fmt perf_hpp__format[] = {
750 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
751 HPP__COLOR_PRINT_FNS("Latency", latency, LATENCY),
752 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
753 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
754 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
755 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
756 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
757 HPP__COLOR_ACC_PRINT_FNS("Latency", latency_acc, LATENCY_ACC),
758 HPP__PRINT_FNS("Samples", samples, SAMPLES),
759 HPP__PRINT_FNS("Period", period, PERIOD),
760 HPP__PRINT_FNS("Weight1", weight1, WEIGHT1),
761 HPP__PRINT_FNS("Weight2", weight2, WEIGHT2),
762 HPP__PRINT_FNS("Weight3", weight3, WEIGHT3),
763 HPP__MEM_STAT_PRINT_FNS("Mem Op", op, OP),
764 HPP__MEM_STAT_PRINT_FNS("Cache", cache, CACHE),
765 HPP__MEM_STAT_PRINT_FNS("Memory", memory, MEMORY),
766 HPP__MEM_STAT_PRINT_FNS("Snoop", snoop, SNOOP),
767 HPP__MEM_STAT_PRINT_FNS("D-TLB", dtlb, DTLB),
768 };
769
770 struct perf_hpp_list perf_hpp_list = {
771 .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
772 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
773 .nr_header_lines = 1,
774 };
775
776 #undef HPP__COLOR_PRINT_FNS
777 #undef HPP__COLOR_ACC_PRINT_FNS
778 #undef HPP__PRINT_FNS
779 #undef HPP__MEM_STAT_PRINT_FNS
780
781 #undef HPP_PERCENT_FNS
782 #undef HPP_PERCENT_ACC_FNS
783 #undef HPP_RAW_FNS
784 #undef HPP_AVERAGE_FNS
785 #undef HPP_MEM_STAT_FNS
786
787 #undef __HPP_HEADER_FN
788 #undef __HPP_WIDTH_FN
789 #undef __HPP_COLOR_PERCENT_FN
790 #undef __HPP_ENTRY_PERCENT_FN
791 #undef __HPP_COLOR_ACC_PERCENT_FN
792 #undef __HPP_ENTRY_ACC_PERCENT_FN
793 #undef __HPP_ENTRY_RAW_FN
794 #undef __HPP_ENTRY_AVERAGE_FN
795 #undef __HPP_COLOR_MEM_STAT_FN
796 #undef __HPP_ENTRY_MEM_STAT_FN
797
798 #undef __HPP_SORT_FN
799 #undef __HPP_SORT_ACC_FN
800 #undef __HPP_SORT_RAW_FN
801 #undef __HPP_SORT_AVERAGE_FN
802
fmt_free(struct perf_hpp_fmt * fmt)803 static void fmt_free(struct perf_hpp_fmt *fmt)
804 {
805 /*
806 * At this point fmt should be completely
807 * unhooked, if not it's a bug.
808 */
809 BUG_ON(!list_empty(&fmt->list));
810 BUG_ON(!list_empty(&fmt->sort_list));
811
812 if (fmt->free)
813 fmt->free(fmt);
814 }
815
fmt_equal(struct perf_hpp_fmt * a,struct perf_hpp_fmt * b)816 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
817 {
818 return a->equal && a->equal(a, b);
819 }
820
perf_hpp__init(void)821 void perf_hpp__init(void)
822 {
823 int i;
824
825 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
826 struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
827
828 INIT_LIST_HEAD(&fmt->list);
829
830 /* sort_list may be linked by setup_sorting() */
831 if (fmt->sort_list.next == NULL)
832 INIT_LIST_HEAD(&fmt->sort_list);
833 }
834
835 /*
836 * If user specified field order, no need to setup default fields.
837 */
838 if (is_strict_order(field_order))
839 return;
840
841 /*
842 * Overhead and latency columns are added in setup_overhead(),
843 * so they are added implicitly here only if they were added
844 * by setup_overhead() before (have was_taken flag set).
845 * This is required because setup_overhead() has more complex
846 * logic, in particular it does not add "overhead" if user
847 * specified "latency" in sort order, and vise versa.
848 */
849 if (symbol_conf.cumulate_callchain) {
850 /*
851 * Addition of fields is idempotent, so we add latency
852 * column twice to get desired order with simpler logic.
853 */
854 if (symbol_conf.prefer_latency)
855 hpp_dimension__add_output(PERF_HPP__LATENCY_ACC, true);
856 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC, true);
857 if (symbol_conf.enable_latency)
858 hpp_dimension__add_output(PERF_HPP__LATENCY_ACC, true);
859 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
860 }
861
862 if (symbol_conf.prefer_latency)
863 hpp_dimension__add_output(PERF_HPP__LATENCY, true);
864 hpp_dimension__add_output(PERF_HPP__OVERHEAD, true);
865 if (symbol_conf.enable_latency)
866 hpp_dimension__add_output(PERF_HPP__LATENCY, true);
867
868 if (symbol_conf.show_cpu_utilization) {
869 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS, false);
870 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US, false);
871
872 if (perf_guest) {
873 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS, false);
874 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US, false);
875 }
876 }
877
878 if (symbol_conf.show_nr_samples)
879 hpp_dimension__add_output(PERF_HPP__SAMPLES, false);
880
881 if (symbol_conf.show_total_period)
882 hpp_dimension__add_output(PERF_HPP__PERIOD, false);
883 }
884
perf_hpp_list__column_register(struct perf_hpp_list * list,struct perf_hpp_fmt * format)885 void perf_hpp_list__column_register(struct perf_hpp_list *list,
886 struct perf_hpp_fmt *format)
887 {
888 list_add_tail(&format->list, &list->fields);
889 }
890
perf_hpp_list__register_sort_field(struct perf_hpp_list * list,struct perf_hpp_fmt * format)891 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
892 struct perf_hpp_fmt *format)
893 {
894 list_add_tail(&format->sort_list, &list->sorts);
895 }
896
perf_hpp_list__prepend_sort_field(struct perf_hpp_list * list,struct perf_hpp_fmt * format)897 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
898 struct perf_hpp_fmt *format)
899 {
900 list_add(&format->sort_list, &list->sorts);
901 }
902
perf_hpp__column_unregister(struct perf_hpp_fmt * format)903 static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
904 {
905 list_del_init(&format->list);
906 list_del_init(&format->sort_list);
907 fmt_free(format);
908 }
909
perf_hpp__cancel_cumulate(struct evlist * evlist)910 void perf_hpp__cancel_cumulate(struct evlist *evlist)
911 {
912 struct perf_hpp_fmt *fmt, *acc, *ovh, *acc_lat, *tmp;
913 struct evsel *evsel;
914
915 if (is_strict_order(field_order))
916 return;
917
918 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
919 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
920 acc_lat = &perf_hpp__format[PERF_HPP__LATENCY_ACC];
921
922 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
923 if (fmt_equal(acc, fmt) || fmt_equal(acc_lat, fmt)) {
924 perf_hpp__column_unregister(fmt);
925 continue;
926 }
927
928 if (fmt_equal(ovh, fmt))
929 fmt->name = "Overhead";
930 }
931
932 evlist__for_each_entry(evlist, evsel) {
933 struct hists *hists = evsel__hists(evsel);
934 struct perf_hpp_list_node *node;
935
936 list_for_each_entry(node, &hists->hpp_formats, list) {
937 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, tmp) {
938 if (fmt_equal(acc, fmt) || fmt_equal(acc_lat, fmt)) {
939 perf_hpp__column_unregister(fmt);
940 continue;
941 }
942
943 if (fmt_equal(ovh, fmt))
944 fmt->name = "Overhead";
945 }
946 }
947 }
948 }
949
perf_hpp__cancel_latency(struct evlist * evlist)950 void perf_hpp__cancel_latency(struct evlist *evlist)
951 {
952 struct perf_hpp_fmt *fmt, *lat, *acc, *tmp;
953 struct evsel *evsel;
954
955 if (is_strict_order(field_order))
956 return;
957 if (sort_order && strstr(sort_order, "latency"))
958 return;
959
960 lat = &perf_hpp__format[PERF_HPP__LATENCY];
961 acc = &perf_hpp__format[PERF_HPP__LATENCY_ACC];
962
963 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
964 if (fmt_equal(lat, fmt) || fmt_equal(acc, fmt))
965 perf_hpp__column_unregister(fmt);
966 }
967
968 evlist__for_each_entry(evlist, evsel) {
969 struct hists *hists = evsel__hists(evsel);
970 struct perf_hpp_list_node *node;
971
972 list_for_each_entry(node, &hists->hpp_formats, list) {
973 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, tmp) {
974 if (fmt_equal(lat, fmt) || fmt_equal(acc, fmt))
975 perf_hpp__column_unregister(fmt);
976 }
977 }
978 }
979 }
980
perf_hpp__setup_output_field(struct perf_hpp_list * list)981 void perf_hpp__setup_output_field(struct perf_hpp_list *list)
982 {
983 struct perf_hpp_fmt *fmt;
984
985 /* append sort keys to output field */
986 perf_hpp_list__for_each_sort_list(list, fmt) {
987 struct perf_hpp_fmt *pos;
988
989 /* skip sort-only fields ("sort_compute" in perf diff) */
990 if (!fmt->entry && !fmt->color)
991 continue;
992
993 perf_hpp_list__for_each_format(list, pos) {
994 if (fmt_equal(fmt, pos))
995 goto next;
996 }
997
998 perf_hpp__column_register(fmt);
999 next:
1000 continue;
1001 }
1002 }
1003
perf_hpp__append_sort_keys(struct perf_hpp_list * list)1004 void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
1005 {
1006 struct perf_hpp_fmt *fmt;
1007
1008 /* append output fields to sort keys */
1009 perf_hpp_list__for_each_format(list, fmt) {
1010 struct perf_hpp_fmt *pos;
1011
1012 perf_hpp_list__for_each_sort_list(list, pos) {
1013 if (fmt_equal(fmt, pos))
1014 goto next;
1015 }
1016
1017 perf_hpp__register_sort_field(fmt);
1018 next:
1019 continue;
1020 }
1021 }
1022
1023
perf_hpp__reset_output_field(struct perf_hpp_list * list)1024 void perf_hpp__reset_output_field(struct perf_hpp_list *list)
1025 {
1026 struct perf_hpp_fmt *fmt, *tmp;
1027
1028 /* reset output fields */
1029 perf_hpp_list__for_each_format_safe(list, fmt, tmp)
1030 perf_hpp__column_unregister(fmt);
1031
1032 /* reset sort keys */
1033 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp)
1034 perf_hpp__column_unregister(fmt);
1035 }
1036
1037 /*
1038 * See hists__fprintf to match the column widths
1039 */
hists__sort_list_width(struct hists * hists)1040 unsigned int hists__sort_list_width(struct hists *hists)
1041 {
1042 struct perf_hpp_fmt *fmt;
1043 int ret = 0;
1044 bool first = true;
1045 struct perf_hpp dummy_hpp;
1046
1047 hists__for_each_format(hists, fmt) {
1048 if (perf_hpp__should_skip(fmt, hists))
1049 continue;
1050
1051 if (first)
1052 first = false;
1053 else
1054 ret += 2;
1055
1056 ret += fmt->width(fmt, &dummy_hpp, hists);
1057 }
1058
1059 if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
1060 ret += 3 + BITS_PER_LONG / 4;
1061
1062 return ret;
1063 }
1064
hists__overhead_width(struct hists * hists)1065 unsigned int hists__overhead_width(struct hists *hists)
1066 {
1067 struct perf_hpp_fmt *fmt;
1068 int ret = 0;
1069 bool first = true;
1070 struct perf_hpp dummy_hpp;
1071
1072 hists__for_each_format(hists, fmt) {
1073 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1074 break;
1075
1076 if (first)
1077 first = false;
1078 else
1079 ret += 2;
1080
1081 ret += fmt->width(fmt, &dummy_hpp, hists);
1082 }
1083
1084 return ret;
1085 }
1086
perf_hpp__reset_width(struct perf_hpp_fmt * fmt,struct hists * hists)1087 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1088 {
1089 if (perf_hpp__is_sort_entry(fmt))
1090 return perf_hpp__reset_sort_width(fmt, hists);
1091
1092 if (perf_hpp__is_dynamic_entry(fmt))
1093 return;
1094
1095 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
1096
1097 switch (fmt->idx) {
1098 case PERF_HPP__OVERHEAD:
1099 case PERF_HPP__LATENCY:
1100 case PERF_HPP__OVERHEAD_SYS:
1101 case PERF_HPP__OVERHEAD_US:
1102 case PERF_HPP__OVERHEAD_ACC:
1103 fmt->len = 8;
1104 break;
1105
1106 case PERF_HPP__OVERHEAD_GUEST_SYS:
1107 case PERF_HPP__OVERHEAD_GUEST_US:
1108 fmt->len = 9;
1109 break;
1110
1111 case PERF_HPP__SAMPLES:
1112 case PERF_HPP__PERIOD:
1113 fmt->len = 12;
1114 break;
1115
1116 case PERF_HPP__WEIGHT1:
1117 case PERF_HPP__WEIGHT2:
1118 case PERF_HPP__WEIGHT3:
1119 fmt->len = 8;
1120 break;
1121
1122 case PERF_HPP__MEM_STAT_OP:
1123 case PERF_HPP__MEM_STAT_CACHE:
1124 case PERF_HPP__MEM_STAT_MEMORY:
1125 case PERF_HPP__MEM_STAT_SNOOP:
1126 case PERF_HPP__MEM_STAT_DTLB:
1127 fmt->len = MEM_STAT_LEN * MEM_STAT_PRINT_LEN;
1128 break;
1129
1130 default:
1131 break;
1132 }
1133 }
1134
hists__reset_column_width(struct hists * hists)1135 void hists__reset_column_width(struct hists *hists)
1136 {
1137 struct perf_hpp_fmt *fmt;
1138 struct perf_hpp_list_node *node;
1139
1140 hists__for_each_format(hists, fmt)
1141 perf_hpp__reset_width(fmt, hists);
1142
1143 /* hierarchy entries have their own hpp list */
1144 list_for_each_entry(node, &hists->hpp_formats, list) {
1145 perf_hpp_list__for_each_format(&node->hpp, fmt)
1146 perf_hpp__reset_width(fmt, hists);
1147 }
1148 }
1149
perf_hpp__set_user_width(const char * width_list_str)1150 void perf_hpp__set_user_width(const char *width_list_str)
1151 {
1152 struct perf_hpp_fmt *fmt;
1153 const char *ptr = width_list_str;
1154
1155 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
1156 char *p;
1157
1158 int len = strtol(ptr, &p, 10);
1159 fmt->user_len = len;
1160
1161 if (*p == ',')
1162 ptr = p + 1;
1163 else
1164 break;
1165 }
1166 }
1167
add_hierarchy_fmt(struct hists * hists,struct perf_hpp_fmt * fmt)1168 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
1169 {
1170 struct perf_hpp_list_node *node = NULL;
1171 struct perf_hpp_fmt *fmt_copy;
1172 bool found = false;
1173 bool skip = perf_hpp__should_skip(fmt, hists);
1174
1175 list_for_each_entry(node, &hists->hpp_formats, list) {
1176 if (node->level == fmt->level) {
1177 found = true;
1178 break;
1179 }
1180 }
1181
1182 if (!found) {
1183 node = malloc(sizeof(*node));
1184 if (node == NULL)
1185 return -1;
1186
1187 node->skip = skip;
1188 node->level = fmt->level;
1189 perf_hpp_list__init(&node->hpp);
1190
1191 hists->nr_hpp_node++;
1192 list_add_tail(&node->list, &hists->hpp_formats);
1193 }
1194
1195 fmt_copy = perf_hpp_fmt__dup(fmt);
1196 if (fmt_copy == NULL)
1197 return -1;
1198
1199 if (!skip)
1200 node->skip = false;
1201
1202 list_add_tail(&fmt_copy->list, &node->hpp.fields);
1203 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
1204
1205 return 0;
1206 }
1207
perf_hpp__setup_hists_formats(struct perf_hpp_list * list,struct evlist * evlist)1208 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
1209 struct evlist *evlist)
1210 {
1211 struct evsel *evsel;
1212 struct perf_hpp_fmt *fmt;
1213 struct hists *hists;
1214 int ret;
1215
1216 if (!symbol_conf.report_hierarchy)
1217 return 0;
1218
1219 evlist__for_each_entry(evlist, evsel) {
1220 hists = evsel__hists(evsel);
1221
1222 perf_hpp_list__for_each_sort_list(list, fmt) {
1223 if (perf_hpp__is_dynamic_entry(fmt) &&
1224 !perf_hpp__defined_dynamic_entry(fmt, hists))
1225 continue;
1226
1227 ret = add_hierarchy_fmt(hists, fmt);
1228 if (ret < 0)
1229 return ret;
1230 }
1231 }
1232
1233 return 0;
1234 }
1235
perf_hpp__alloc_mem_stats(struct perf_hpp_list * list,struct evlist * evlist)1236 int perf_hpp__alloc_mem_stats(struct perf_hpp_list *list, struct evlist *evlist)
1237 {
1238 struct perf_hpp_fmt *fmt;
1239 struct evsel *evsel;
1240 enum mem_stat_type mst[16];
1241 unsigned nr_mem_stats = 0;
1242
1243 perf_hpp_list__for_each_format(list, fmt) {
1244 if (!perf_hpp__is_mem_stat_entry(fmt))
1245 continue;
1246
1247 assert(nr_mem_stats < ARRAY_SIZE(mst));
1248 mst[nr_mem_stats++] = hpp__mem_stat_type(fmt);
1249 }
1250
1251 if (nr_mem_stats == 0)
1252 return 0;
1253
1254 list->nr_header_lines = 2;
1255
1256 evlist__for_each_entry(evlist, evsel) {
1257 struct hists *hists = evsel__hists(evsel);
1258
1259 hists->mem_stat_types = calloc(nr_mem_stats,
1260 sizeof(*hists->mem_stat_types));
1261 if (hists->mem_stat_types == NULL)
1262 return -ENOMEM;
1263
1264 hists->mem_stat_total = calloc(nr_mem_stats,
1265 sizeof(*hists->mem_stat_total));
1266 if (hists->mem_stat_total == NULL)
1267 return -ENOMEM;
1268
1269 memcpy(hists->mem_stat_types, mst, nr_mem_stats * sizeof(*mst));
1270 hists->nr_mem_stats = nr_mem_stats;
1271 }
1272 return 0;
1273 }
1274