1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from builtin-annotate.c, see those files for further
6 * copyright notes.
7 */
8
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <libgen.h>
12 #include <stdlib.h>
13 #include "util.h" // hex_width()
14 #include "ui/ui.h"
15 #include "sort.h"
16 #include "build-id.h"
17 #include "color.h"
18 #include "config.h"
19 #include "disasm.h"
20 #include "dso.h"
21 #include "env.h"
22 #include "map.h"
23 #include "maps.h"
24 #include "symbol.h"
25 #include "srcline.h"
26 #include "units.h"
27 #include "debug.h"
28 #include "debuginfo.h"
29 #include "annotate.h"
30 #include "annotate-data.h"
31 #include "evsel.h"
32 #include "evlist.h"
33 #include "bpf-event.h"
34 #include "bpf-utils.h"
35 #include "block-range.h"
36 #include "string2.h"
37 #include "dwarf-regs.h"
38 #include "util/event.h"
39 #include "util/sharded_mutex.h"
40 #include "arch/common.h"
41 #include "namespaces.h"
42 #include "thread.h"
43 #include "hashmap.h"
44 #include "strbuf.h"
45 #include <regex.h>
46 #include <linux/bitops.h>
47 #include <linux/kernel.h>
48 #include <linux/string.h>
49 #include <linux/zalloc.h>
50 #include <subcmd/parse-options.h>
51 #include <subcmd/run-command.h>
52 #include <math.h>
53
54 /* FIXME: For the HE_COLORSET */
55 #include "ui/browser.h"
56
57 /*
58 * FIXME: Using the same values as slang.h,
59 * but that header may not be available everywhere
60 */
61 #define LARROW_CHAR ((unsigned char)',')
62 #define RARROW_CHAR ((unsigned char)'+')
63 #define DARROW_CHAR ((unsigned char)'.')
64 #define UARROW_CHAR ((unsigned char)'-')
65
66 #include <linux/ctype.h>
67
68 /* global annotation options */
69 struct annotation_options annotate_opts;
70
71 /* Data type collection debug statistics */
72 struct annotated_data_stat ann_data_stat;
73 LIST_HEAD(ann_insn_stat);
74
75 /* Pseudo data types */
76 struct annotated_data_type stackop_type = {
77 .self = {
78 .type_name = (char *)"(stack operation)",
79 .children = LIST_HEAD_INIT(stackop_type.self.children),
80 },
81 };
82
83 struct annotated_data_type canary_type = {
84 .self = {
85 .type_name = (char *)"(stack canary)",
86 .children = LIST_HEAD_INIT(canary_type.self.children),
87 },
88 };
89
90 #define NO_TYPE ((struct annotated_data_type *)-1UL)
91
92 /* symbol histogram: key = offset << 16 | evsel->core.idx */
sym_hist_hash(long key,void * ctx __maybe_unused)93 static size_t sym_hist_hash(long key, void *ctx __maybe_unused)
94 {
95 return (key >> 16) + (key & 0xffff);
96 }
97
sym_hist_equal(long key1,long key2,void * ctx __maybe_unused)98 static bool sym_hist_equal(long key1, long key2, void *ctx __maybe_unused)
99 {
100 return key1 == key2;
101 }
102
annotated_source__new(void)103 static struct annotated_source *annotated_source__new(void)
104 {
105 struct annotated_source *src = zalloc(sizeof(*src));
106
107 if (src != NULL)
108 INIT_LIST_HEAD(&src->source);
109
110 return src;
111 }
112
annotated_source__delete(struct annotated_source * src)113 static __maybe_unused void annotated_source__delete(struct annotated_source *src)
114 {
115 struct hashmap_entry *cur;
116 size_t bkt;
117
118 if (src == NULL)
119 return;
120
121 if (src->samples) {
122 hashmap__for_each_entry(src->samples, cur, bkt)
123 zfree(&cur->pvalue);
124 hashmap__free(src->samples);
125 }
126 zfree(&src->histograms);
127 free(src);
128 }
129
annotated_source__alloc_histograms(struct annotated_source * src,int nr_hists)130 static int annotated_source__alloc_histograms(struct annotated_source *src,
131 int nr_hists)
132 {
133 src->nr_histograms = nr_hists;
134 src->histograms = calloc(nr_hists, sizeof(*src->histograms));
135
136 if (src->histograms == NULL)
137 return -1;
138
139 src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL);
140 if (src->samples == NULL)
141 zfree(&src->histograms);
142
143 return src->histograms ? 0 : -1;
144 }
145
symbol__annotate_zero_histograms(struct symbol * sym)146 void symbol__annotate_zero_histograms(struct symbol *sym)
147 {
148 struct annotation *notes = symbol__annotation(sym);
149
150 annotation__lock(notes);
151 if (notes->src != NULL) {
152 memset(notes->src->histograms, 0,
153 notes->src->nr_histograms * sizeof(*notes->src->histograms));
154 hashmap__clear(notes->src->samples);
155 }
156 if (notes->branch && notes->branch->cycles_hist) {
157 memset(notes->branch->cycles_hist, 0,
158 symbol__size(sym) * sizeof(struct cyc_hist));
159 }
160 annotation__unlock(notes);
161 }
162
__symbol__account_cycles(struct cyc_hist * ch,u64 start,unsigned offset,unsigned cycles,unsigned have_start)163 static int __symbol__account_cycles(struct cyc_hist *ch,
164 u64 start,
165 unsigned offset, unsigned cycles,
166 unsigned have_start)
167 {
168 /*
169 * For now we can only account one basic block per
170 * final jump. But multiple could be overlapping.
171 * Always account the longest one. So when
172 * a shorter one has been already seen throw it away.
173 *
174 * We separately always account the full cycles.
175 */
176 ch[offset].num_aggr++;
177 ch[offset].cycles_aggr += cycles;
178
179 if (cycles > ch[offset].cycles_max)
180 ch[offset].cycles_max = cycles;
181
182 if (ch[offset].cycles_min) {
183 if (cycles && cycles < ch[offset].cycles_min)
184 ch[offset].cycles_min = cycles;
185 } else
186 ch[offset].cycles_min = cycles;
187
188 if (!have_start && ch[offset].have_start)
189 return 0;
190 if (ch[offset].num) {
191 if (have_start && (!ch[offset].have_start ||
192 ch[offset].start > start)) {
193 ch[offset].have_start = 0;
194 ch[offset].cycles = 0;
195 ch[offset].num = 0;
196 if (ch[offset].reset < 0xffff)
197 ch[offset].reset++;
198 } else if (have_start &&
199 ch[offset].start < start)
200 return 0;
201 }
202
203 if (ch[offset].num < NUM_SPARKS)
204 ch[offset].cycles_spark[ch[offset].num] = cycles;
205
206 ch[offset].have_start = have_start;
207 ch[offset].start = start;
208 ch[offset].cycles += cycles;
209 ch[offset].num++;
210 return 0;
211 }
212
__symbol__inc_addr_samples(struct map_symbol * ms,struct annotated_source * src,struct evsel * evsel,u64 addr,struct perf_sample * sample)213 static int __symbol__inc_addr_samples(struct map_symbol *ms,
214 struct annotated_source *src, struct evsel *evsel, u64 addr,
215 struct perf_sample *sample)
216 {
217 struct symbol *sym = ms->sym;
218 long hash_key;
219 u64 offset;
220 struct sym_hist *h;
221 struct sym_hist_entry *entry;
222
223 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr));
224
225 if ((addr < sym->start || addr >= sym->end) &&
226 (addr != sym->end || sym->start != sym->end)) {
227 pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
228 __func__, __LINE__, sym->name, sym->start, addr, sym->end);
229 return -ERANGE;
230 }
231
232 offset = addr - sym->start;
233 h = annotated_source__histogram(src, evsel);
234 if (h == NULL) {
235 pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n",
236 __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC);
237 return -ENOMEM;
238 }
239
240 hash_key = offset << 16 | evsel->core.idx;
241 if (!hashmap__find(src->samples, hash_key, &entry)) {
242 entry = zalloc(sizeof(*entry));
243 if (entry == NULL)
244 return -ENOMEM;
245
246 if (hashmap__add(src->samples, hash_key, entry) < 0)
247 return -ENOMEM;
248 }
249
250 h->nr_samples++;
251 h->period += sample->period;
252 entry->nr_samples++;
253 entry->period += sample->period;
254
255 pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
256 ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n",
257 sym->start, sym->name, addr, addr - sym->start, evsel->core.idx,
258 entry->nr_samples, entry->period);
259 return 0;
260 }
261
annotation__get_branch(struct annotation * notes)262 struct annotated_branch *annotation__get_branch(struct annotation *notes)
263 {
264 if (notes == NULL)
265 return NULL;
266
267 if (notes->branch == NULL)
268 notes->branch = zalloc(sizeof(*notes->branch));
269
270 return notes->branch;
271 }
272
symbol__find_branch_hist(struct symbol * sym,unsigned int br_cntr_nr)273 static struct annotated_branch *symbol__find_branch_hist(struct symbol *sym,
274 unsigned int br_cntr_nr)
275 {
276 struct annotation *notes = symbol__annotation(sym);
277 struct annotated_branch *branch;
278 const size_t size = symbol__size(sym);
279
280 branch = annotation__get_branch(notes);
281 if (branch == NULL)
282 return NULL;
283
284 if (branch->cycles_hist == NULL) {
285 branch->cycles_hist = calloc(size, sizeof(struct cyc_hist));
286 if (!branch->cycles_hist)
287 return NULL;
288 }
289
290 if (br_cntr_nr && branch->br_cntr == NULL) {
291 branch->br_cntr = calloc(br_cntr_nr * size, sizeof(u64));
292 if (!branch->br_cntr)
293 return NULL;
294 }
295
296 return branch;
297 }
298
symbol__hists(struct symbol * sym,int nr_hists)299 struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
300 {
301 struct annotation *notes = symbol__annotation(sym);
302
303 if (notes->src == NULL) {
304 notes->src = annotated_source__new();
305 if (notes->src == NULL)
306 return NULL;
307 goto alloc_histograms;
308 }
309
310 if (notes->src->histograms == NULL) {
311 alloc_histograms:
312 annotated_source__alloc_histograms(notes->src, nr_hists);
313 }
314
315 return notes->src;
316 }
317
symbol__inc_addr_samples(struct map_symbol * ms,struct evsel * evsel,u64 addr,struct perf_sample * sample)318 static int symbol__inc_addr_samples(struct map_symbol *ms,
319 struct evsel *evsel, u64 addr,
320 struct perf_sample *sample)
321 {
322 struct symbol *sym = ms->sym;
323 struct annotated_source *src;
324
325 if (sym == NULL)
326 return 0;
327 src = symbol__hists(sym, evsel->evlist->core.nr_entries);
328 return src ? __symbol__inc_addr_samples(ms, src, evsel, addr, sample) : 0;
329 }
330
symbol__account_br_cntr(struct annotated_branch * branch,struct evsel * evsel,unsigned offset,u64 br_cntr)331 static int symbol__account_br_cntr(struct annotated_branch *branch,
332 struct evsel *evsel,
333 unsigned offset,
334 u64 br_cntr)
335 {
336 unsigned int br_cntr_nr = evsel__leader(evsel)->br_cntr_nr;
337 unsigned int base = evsel__leader(evsel)->br_cntr_idx;
338 unsigned int off = offset * evsel->evlist->nr_br_cntr;
339 u64 *branch_br_cntr = branch->br_cntr;
340 unsigned int i, mask, width;
341
342 if (!br_cntr || !branch_br_cntr)
343 return 0;
344
345 perf_env__find_br_cntr_info(evsel__env(evsel), NULL, &width);
346 mask = (1L << width) - 1;
347 for (i = 0; i < br_cntr_nr; i++) {
348 u64 cntr = (br_cntr >> i * width) & mask;
349
350 branch_br_cntr[off + i + base] += cntr;
351 if (cntr == mask)
352 branch_br_cntr[off + i + base] |= ANNOTATION__BR_CNTR_SATURATED_FLAG;
353 }
354
355 return 0;
356 }
357
symbol__account_cycles(u64 addr,u64 start,struct symbol * sym,unsigned cycles,struct evsel * evsel,u64 br_cntr)358 static int symbol__account_cycles(u64 addr, u64 start, struct symbol *sym,
359 unsigned cycles, struct evsel *evsel,
360 u64 br_cntr)
361 {
362 struct annotated_branch *branch;
363 unsigned offset;
364 int ret;
365
366 if (sym == NULL)
367 return 0;
368 branch = symbol__find_branch_hist(sym, evsel->evlist->nr_br_cntr);
369 if (!branch)
370 return -ENOMEM;
371 if (addr < sym->start || addr >= sym->end)
372 return -ERANGE;
373
374 if (start) {
375 if (start < sym->start || start >= sym->end)
376 return -ERANGE;
377 if (start >= addr)
378 start = 0;
379 }
380 offset = addr - sym->start;
381 ret = __symbol__account_cycles(branch->cycles_hist,
382 start ? start - sym->start : 0,
383 offset, cycles,
384 !!start);
385
386 if (ret)
387 return ret;
388
389 return symbol__account_br_cntr(branch, evsel, offset, br_cntr);
390 }
391
addr_map_symbol__account_cycles(struct addr_map_symbol * ams,struct addr_map_symbol * start,unsigned cycles,struct evsel * evsel,u64 br_cntr)392 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
393 struct addr_map_symbol *start,
394 unsigned cycles,
395 struct evsel *evsel,
396 u64 br_cntr)
397 {
398 u64 saddr = 0;
399 int err;
400
401 if (!cycles)
402 return 0;
403
404 /*
405 * Only set start when IPC can be computed. We can only
406 * compute it when the basic block is completely in a single
407 * function.
408 * Special case the case when the jump is elsewhere, but
409 * it starts on the function start.
410 */
411 if (start &&
412 (start->ms.sym == ams->ms.sym ||
413 (ams->ms.sym &&
414 start->addr == ams->ms.sym->start + map__start(ams->ms.map))))
415 saddr = start->al_addr;
416 if (saddr == 0)
417 pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
418 ams->addr,
419 start ? start->addr : 0,
420 ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0,
421 saddr);
422 err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles, evsel, br_cntr);
423 if (err)
424 pr_debug2("account_cycles failed %d\n", err);
425 return err;
426 }
427
annotated_source__get_line(struct annotated_source * src,s64 offset)428 struct annotation_line *annotated_source__get_line(struct annotated_source *src,
429 s64 offset)
430 {
431 struct annotation_line *al;
432
433 list_for_each_entry(al, &src->source, node) {
434 if (al->offset == offset)
435 return al;
436 }
437 return NULL;
438 }
439
annotation__count_insn(struct annotation * notes,u64 start,u64 end)440 static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end)
441 {
442 struct annotation_line *al;
443 unsigned n_insn = 0;
444
445 al = annotated_source__get_line(notes->src, start);
446 if (al == NULL)
447 return 0;
448
449 list_for_each_entry_from(al, ¬es->src->source, node) {
450 if (al->offset == -1)
451 continue;
452 if ((u64)al->offset > end)
453 break;
454 n_insn++;
455 }
456 return n_insn;
457 }
458
annotated_branch__delete(struct annotated_branch * branch)459 static void annotated_branch__delete(struct annotated_branch *branch)
460 {
461 if (branch) {
462 zfree(&branch->cycles_hist);
463 free(branch->br_cntr);
464 free(branch);
465 }
466 }
467
annotation__count_and_fill(struct annotation * notes,u64 start,u64 end,struct cyc_hist * ch)468 static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
469 {
470 unsigned n_insn;
471 unsigned int cover_insn = 0;
472
473 n_insn = annotation__count_insn(notes, start, end);
474 if (n_insn && ch->num && ch->cycles) {
475 struct annotation_line *al;
476 struct annotated_branch *branch;
477 float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
478
479 /* Hide data when there are too many overlaps. */
480 if (ch->reset >= 0x7fff)
481 return;
482
483 al = annotated_source__get_line(notes->src, start);
484 if (al == NULL)
485 return;
486
487 list_for_each_entry_from(al, ¬es->src->source, node) {
488 if (al->offset == -1)
489 continue;
490 if ((u64)al->offset > end)
491 break;
492 if (al->cycles && al->cycles->ipc == 0.0) {
493 al->cycles->ipc = ipc;
494 cover_insn++;
495 }
496 }
497
498 branch = annotation__get_branch(notes);
499 if (cover_insn && branch) {
500 branch->hit_cycles += ch->cycles;
501 branch->hit_insn += n_insn * ch->num;
502 branch->cover_insn += cover_insn;
503 }
504 }
505 }
506
annotation__compute_ipc(struct annotation * notes,size_t size,struct evsel * evsel)507 static int annotation__compute_ipc(struct annotation *notes, size_t size,
508 struct evsel *evsel)
509 {
510 unsigned int br_cntr_nr = evsel->evlist->nr_br_cntr;
511 int err = 0;
512 s64 offset;
513
514 if (!notes->branch || !notes->branch->cycles_hist)
515 return 0;
516
517 notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1);
518 notes->branch->hit_cycles = 0;
519 notes->branch->hit_insn = 0;
520 notes->branch->cover_insn = 0;
521
522 annotation__lock(notes);
523 for (offset = size - 1; offset >= 0; --offset) {
524 struct cyc_hist *ch;
525
526 ch = ¬es->branch->cycles_hist[offset];
527 if (ch && ch->cycles) {
528 struct annotation_line *al;
529
530 al = annotated_source__get_line(notes->src, offset);
531 if (al && al->cycles == NULL) {
532 al->cycles = zalloc(sizeof(*al->cycles));
533 if (al->cycles == NULL) {
534 err = ENOMEM;
535 break;
536 }
537 }
538 if (ch->have_start)
539 annotation__count_and_fill(notes, ch->start, offset, ch);
540 if (al && ch->num_aggr) {
541 al->cycles->avg = ch->cycles_aggr / ch->num_aggr;
542 al->cycles->max = ch->cycles_max;
543 al->cycles->min = ch->cycles_min;
544 }
545 if (al && notes->branch->br_cntr) {
546 if (!al->br_cntr) {
547 al->br_cntr = calloc(br_cntr_nr, sizeof(u64));
548 if (!al->br_cntr) {
549 err = ENOMEM;
550 break;
551 }
552 }
553 al->num_aggr = ch->num_aggr;
554 al->br_cntr_nr = br_cntr_nr;
555 al->evsel = evsel;
556 memcpy(al->br_cntr, ¬es->branch->br_cntr[offset * br_cntr_nr],
557 br_cntr_nr * sizeof(u64));
558 }
559 }
560 }
561
562 if (err) {
563 while (++offset < (s64)size) {
564 struct cyc_hist *ch = ¬es->branch->cycles_hist[offset];
565
566 if (ch && ch->cycles) {
567 struct annotation_line *al;
568
569 al = annotated_source__get_line(notes->src, offset);
570 if (al) {
571 zfree(&al->cycles);
572 zfree(&al->br_cntr);
573 }
574 }
575 }
576 }
577
578 annotation__unlock(notes);
579 return 0;
580 }
581
addr_map_symbol__inc_samples(struct addr_map_symbol * ams,struct perf_sample * sample,struct evsel * evsel)582 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
583 struct evsel *evsel)
584 {
585 return symbol__inc_addr_samples(&ams->ms, evsel, ams->al_addr, sample);
586 }
587
hist_entry__inc_addr_samples(struct hist_entry * he,struct perf_sample * sample,struct evsel * evsel,u64 ip)588 int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
589 struct evsel *evsel, u64 ip)
590 {
591 return symbol__inc_addr_samples(&he->ms, evsel, ip, sample);
592 }
593
594
annotation__exit(struct annotation * notes)595 void annotation__exit(struct annotation *notes)
596 {
597 annotated_source__delete(notes->src);
598 annotated_branch__delete(notes->branch);
599 }
600
601 static struct sharded_mutex *sharded_mutex;
602
annotation__init_sharded_mutex(void)603 static void annotation__init_sharded_mutex(void)
604 {
605 /* As many mutexes as there are CPUs. */
606 sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu);
607 }
608
annotation__hash(const struct annotation * notes)609 static size_t annotation__hash(const struct annotation *notes)
610 {
611 return (size_t)notes;
612 }
613
annotation__get_mutex(const struct annotation * notes)614 static struct mutex *annotation__get_mutex(const struct annotation *notes)
615 {
616 static pthread_once_t once = PTHREAD_ONCE_INIT;
617
618 pthread_once(&once, annotation__init_sharded_mutex);
619 if (!sharded_mutex)
620 return NULL;
621
622 return sharded_mutex__get_mutex(sharded_mutex, annotation__hash(notes));
623 }
624
annotation__lock(struct annotation * notes)625 void annotation__lock(struct annotation *notes)
626 NO_THREAD_SAFETY_ANALYSIS
627 {
628 struct mutex *mutex = annotation__get_mutex(notes);
629
630 if (mutex)
631 mutex_lock(mutex);
632 }
633
annotation__unlock(struct annotation * notes)634 void annotation__unlock(struct annotation *notes)
635 NO_THREAD_SAFETY_ANALYSIS
636 {
637 struct mutex *mutex = annotation__get_mutex(notes);
638
639 if (mutex)
640 mutex_unlock(mutex);
641 }
642
annotation__trylock(struct annotation * notes)643 bool annotation__trylock(struct annotation *notes)
644 {
645 struct mutex *mutex = annotation__get_mutex(notes);
646
647 if (!mutex)
648 return false;
649
650 return mutex_trylock(mutex);
651 }
652
annotation_line__add(struct annotation_line * al,struct list_head * head)653 void annotation_line__add(struct annotation_line *al, struct list_head *head)
654 {
655 list_add_tail(&al->node, head);
656 }
657
658 struct annotation_line *
annotation_line__next(struct annotation_line * pos,struct list_head * head)659 annotation_line__next(struct annotation_line *pos, struct list_head *head)
660 {
661 list_for_each_entry_continue(pos, head, node)
662 if (pos->offset >= 0)
663 return pos;
664
665 return NULL;
666 }
667
annotate__address_color(struct block_range * br)668 static const char *annotate__address_color(struct block_range *br)
669 {
670 double cov = block_range__coverage(br);
671
672 if (cov >= 0) {
673 /* mark red for >75% coverage */
674 if (cov > 0.75)
675 return PERF_COLOR_RED;
676
677 /* mark dull for <1% coverage */
678 if (cov < 0.01)
679 return PERF_COLOR_NORMAL;
680 }
681
682 return PERF_COLOR_MAGENTA;
683 }
684
annotate__asm_color(struct block_range * br)685 static const char *annotate__asm_color(struct block_range *br)
686 {
687 double cov = block_range__coverage(br);
688
689 if (cov >= 0) {
690 /* mark dull for <1% coverage */
691 if (cov < 0.01)
692 return PERF_COLOR_NORMAL;
693 }
694
695 return PERF_COLOR_BLUE;
696 }
697
annotate__branch_printf(struct block_range * br,u64 addr)698 static void annotate__branch_printf(struct block_range *br, u64 addr)
699 {
700 bool emit_comment = true;
701
702 if (!br)
703 return;
704
705 #if 1
706 if (br->is_target && br->start == addr) {
707 struct block_range *branch = br;
708 double p;
709
710 /*
711 * Find matching branch to our target.
712 */
713 while (!branch->is_branch)
714 branch = block_range__next(branch);
715
716 p = 100 *(double)br->entry / branch->coverage;
717
718 if (p > 0.1) {
719 if (emit_comment) {
720 emit_comment = false;
721 printf("\t#");
722 }
723
724 /*
725 * The percentage of coverage joined at this target in relation
726 * to the next branch.
727 */
728 printf(" +%.2f%%", p);
729 }
730 }
731 #endif
732 if (br->is_branch && br->end == addr) {
733 double p = 100*(double)br->taken / br->coverage;
734
735 if (p > 0.1) {
736 if (emit_comment) {
737 emit_comment = false;
738 printf("\t#");
739 }
740
741 /*
742 * The percentage of coverage leaving at this branch, and
743 * its prediction ratio.
744 */
745 printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken);
746 }
747 }
748 }
749
disasm_line__print(struct disasm_line * dl,u64 start,int addr_fmt_width)750 static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width)
751 {
752 s64 offset = dl->al.offset;
753 const u64 addr = start + offset;
754 struct block_range *br;
755
756 br = block_range__find(addr);
757 color_fprintf(stdout, annotate__address_color(br), " %*" PRIx64 ":", addr_fmt_width, addr);
758 color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line);
759 annotate__branch_printf(br, addr);
760 return 0;
761 }
762
763 static struct annotated_data_type *
764 __hist_entry__get_data_type(struct hist_entry *he, const struct arch *arch,
765 struct debuginfo *dbg, struct disasm_line *dl,
766 int *type_offset);
767
needs_type_info(struct annotated_data_type * data_type)768 static bool needs_type_info(struct annotated_data_type *data_type)
769 {
770 if (data_type == NULL || data_type == NO_TYPE)
771 return false;
772
773 if (verbose)
774 return true;
775
776 return (data_type != &stackop_type) && (data_type != &canary_type);
777 }
778
779 static int
annotation_line__print(struct annotation_line * al,struct annotation_print_data * apd,struct annotation_options * opts,int printed,struct annotation_line * queue)780 annotation_line__print(struct annotation_line *al, struct annotation_print_data *apd,
781 struct annotation_options *opts, int printed,
782 struct annotation_line *queue)
783 {
784 struct symbol *sym = apd->he->ms.sym;
785 struct disasm_line *dl = container_of(al, struct disasm_line, al);
786 struct annotation *notes = symbol__annotation(sym);
787 static const char *prev_line;
788 int max_lines = opts->max_lines;
789 int percent_type = opts->percent_type;
790
791 if (al->offset != -1) {
792 double max_percent = 0.0;
793 int i, nr_percent = 1;
794 const char *color;
795
796 for (i = 0; i < al->data_nr; i++) {
797 double percent;
798
799 percent = annotation_data__percent(&al->data[i],
800 percent_type);
801
802 if (percent > max_percent)
803 max_percent = percent;
804 }
805
806 if (al->data_nr > nr_percent)
807 nr_percent = al->data_nr;
808
809 if (max_percent < opts->min_pcnt)
810 return -1;
811
812 if (max_lines && printed >= max_lines)
813 return 1;
814
815 if (queue != NULL) {
816 struct annotation_options queue_opts = {
817 .max_lines = 1,
818 .percent_type = percent_type,
819 };
820
821 list_for_each_entry_from(queue, ¬es->src->source, node) {
822 if (queue == al)
823 break;
824 annotation_line__print(queue, apd, &queue_opts,
825 /*printed=*/0, /*queue=*/NULL);
826 }
827 }
828
829 color = get_percent_color(max_percent);
830
831 for (i = 0; i < nr_percent; i++) {
832 struct annotation_data *data = &al->data[i];
833 double percent;
834
835 percent = annotation_data__percent(data, percent_type);
836 color = get_percent_color(percent);
837
838 if (symbol_conf.show_total_period)
839 color_fprintf(stdout, color, " %11" PRIu64,
840 data->he.period);
841 else if (symbol_conf.show_nr_samples)
842 color_fprintf(stdout, color, " %7" PRIu64,
843 data->he.nr_samples);
844 else
845 color_fprintf(stdout, color, " %7.2f", percent);
846 }
847
848 printf(" : ");
849
850 disasm_line__print(dl, notes->src->start, apd->addr_fmt_width);
851
852 if (opts->code_with_type && apd->dbg) {
853 struct annotated_data_type *data_type;
854 int offset = 0;
855
856 data_type = __hist_entry__get_data_type(apd->he, apd->arch,
857 apd->dbg, dl, &offset);
858 if (needs_type_info(data_type)) {
859 char buf[4096];
860
861 printf("\t\t# data-type: %s",
862 data_type->self.type_name);
863
864 if (data_type != &stackop_type &&
865 data_type != &canary_type)
866 printf(" +%#x", offset);
867
868 if (annotated_data_type__get_member_name(data_type,
869 buf,
870 sizeof(buf),
871 offset))
872 printf(" (%s)", buf);
873 }
874 }
875
876 /*
877 * Also color the filename and line if needed, with
878 * the same color than the percentage. Don't print it
879 * twice for close colored addr with the same filename:line
880 */
881 if (al->path) {
882 if (!prev_line || strcmp(prev_line, al->path)) {
883 color_fprintf(stdout, color, " // %s", al->path);
884 prev_line = al->path;
885 }
886 }
887
888 printf("\n");
889 } else if (max_lines && printed >= max_lines)
890 return 1;
891 else {
892 int width = annotation__pcnt_width(notes);
893
894 if (queue)
895 return -1;
896
897 if (!*al->line)
898 printf(" %*s:\n", width, " ");
899 else
900 printf(" %*s: %-*d %s\n", width, " ", apd->addr_fmt_width,
901 al->line_nr, al->line);
902 }
903
904 return 0;
905 }
906
calc_percent(struct annotation * notes,struct evsel * evsel,struct annotation_data * data,s64 offset,s64 end)907 static void calc_percent(struct annotation *notes,
908 struct evsel *evsel,
909 struct annotation_data *data,
910 s64 offset, s64 end)
911 {
912 struct hists *hists = evsel__hists(evsel);
913 struct sym_hist *sym_hist = annotation__histogram(notes, evsel);
914 unsigned int hits = 0;
915 u64 period = 0;
916
917 while (offset < end) {
918 struct sym_hist_entry *entry;
919
920 entry = annotated_source__hist_entry(notes->src, evsel, offset);
921 if (entry) {
922 hits += entry->nr_samples;
923 period += entry->period;
924 }
925 ++offset;
926 }
927
928 if (sym_hist->nr_samples) {
929 data->he.period = period;
930 data->he.nr_samples = hits;
931 data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples;
932 }
933
934 if (hists->stats.nr_non_filtered_samples)
935 data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples;
936
937 if (sym_hist->period)
938 data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period;
939
940 if (hists->stats.total_period)
941 data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period;
942 }
943
annotation__calc_percent(struct annotation * notes,struct evsel * leader,s64 len)944 static void annotation__calc_percent(struct annotation *notes,
945 struct evsel *leader, s64 len)
946 {
947 struct annotation_line *al, *next;
948 struct evsel *evsel;
949
950 list_for_each_entry(al, ¬es->src->source, node) {
951 s64 end;
952 int i = 0;
953
954 if (al->offset == -1)
955 continue;
956
957 next = annotation_line__next(al, ¬es->src->source);
958 end = next ? next->offset : len;
959
960 for_each_group_evsel(evsel, leader) {
961 struct annotation_data *data;
962
963 BUG_ON(i >= al->data_nr);
964
965 if (symbol_conf.skip_empty &&
966 evsel__hists(evsel)->stats.nr_samples == 0)
967 continue;
968
969 data = &al->data[i++];
970
971 calc_percent(notes, evsel, data, al->offset, end);
972 }
973 }
974 }
975
symbol__calc_percent(struct symbol * sym,struct evsel * evsel)976 void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
977 {
978 struct annotation *notes = symbol__annotation(sym);
979
980 annotation__calc_percent(notes, evsel, symbol__size(sym));
981 }
982
thread__get_arch(struct thread * thread,const struct arch ** parch)983 int thread__get_arch(struct thread *thread, const struct arch **parch)
984 {
985 const struct arch *arch;
986 struct machine *machine;
987 uint32_t e_flags;
988 uint16_t e_machine;
989
990 if (!thread) {
991 *parch = NULL;
992 return -1;
993 }
994
995 machine = maps__machine(thread__maps(thread));
996 e_machine = thread__e_machine(thread, machine, &e_flags);
997 arch = arch__find(e_machine, e_flags, machine->env ? machine->env->cpuid : NULL);
998 if (arch == NULL) {
999 pr_err("%s: unsupported arch %d\n", __func__, e_machine);
1000 return errno;
1001 }
1002 if (parch)
1003 *parch = arch;
1004
1005 return 0;
1006 }
1007
symbol__annotate(struct map_symbol * ms,struct evsel * evsel,const struct arch ** parch)1008 int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
1009 const struct arch **parch)
1010 {
1011 struct symbol *sym = ms->sym;
1012 struct annotation *notes = symbol__annotation(sym);
1013 struct annotate_args args = {
1014 .options = &annotate_opts,
1015 };
1016 const struct arch *arch = NULL;
1017 int err, nr;
1018
1019 err = thread__get_arch(ms->thread, &arch);
1020 if (err)
1021 return err;
1022
1023 if (parch)
1024 *parch = arch;
1025
1026 if (notes->src && !list_empty(¬es->src->source))
1027 return 0;
1028
1029 args.arch = arch;
1030 args.ms = ms;
1031
1032 if (notes->src == NULL) {
1033 notes->src = annotated_source__new();
1034 if (notes->src == NULL)
1035 return -1;
1036 }
1037
1038 nr = 0;
1039 if (evsel__is_group_event(evsel)) {
1040 struct evsel *pos;
1041
1042 for_each_group_evsel(pos, evsel) {
1043 if (symbol_conf.skip_empty &&
1044 evsel__hists(pos)->stats.nr_samples == 0)
1045 continue;
1046 nr++;
1047 }
1048 }
1049 notes->src->nr_events = nr ? nr : 1;
1050
1051 if (annotate_opts.full_addr)
1052 notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
1053 else
1054 notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
1055
1056 return symbol__disassemble(sym, &args);
1057 }
1058
insert_source_line(struct rb_root * root,struct annotation_line * al)1059 static void insert_source_line(struct rb_root *root, struct annotation_line *al)
1060 {
1061 struct annotation_line *iter;
1062 struct rb_node **p = &root->rb_node;
1063 struct rb_node *parent = NULL;
1064 unsigned int percent_type = annotate_opts.percent_type;
1065 int i, ret;
1066
1067 while (*p != NULL) {
1068 parent = *p;
1069 iter = rb_entry(parent, struct annotation_line, rb_node);
1070
1071 ret = strcmp(iter->path, al->path);
1072 if (ret == 0) {
1073 for (i = 0; i < al->data_nr; i++) {
1074 iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
1075 percent_type);
1076 }
1077 return;
1078 }
1079
1080 if (ret < 0)
1081 p = &(*p)->rb_left;
1082 else
1083 p = &(*p)->rb_right;
1084 }
1085
1086 for (i = 0; i < al->data_nr; i++) {
1087 al->data[i].percent_sum = annotation_data__percent(&al->data[i],
1088 percent_type);
1089 }
1090
1091 rb_link_node(&al->rb_node, parent, p);
1092 rb_insert_color(&al->rb_node, root);
1093 }
1094
cmp_source_line(struct annotation_line * a,struct annotation_line * b)1095 static int cmp_source_line(struct annotation_line *a, struct annotation_line *b)
1096 {
1097 int i;
1098
1099 for (i = 0; i < a->data_nr; i++) {
1100 if (a->data[i].percent_sum == b->data[i].percent_sum)
1101 continue;
1102 return a->data[i].percent_sum > b->data[i].percent_sum;
1103 }
1104
1105 return 0;
1106 }
1107
__resort_source_line(struct rb_root * root,struct annotation_line * al)1108 static void __resort_source_line(struct rb_root *root, struct annotation_line *al)
1109 {
1110 struct annotation_line *iter;
1111 struct rb_node **p = &root->rb_node;
1112 struct rb_node *parent = NULL;
1113
1114 while (*p != NULL) {
1115 parent = *p;
1116 iter = rb_entry(parent, struct annotation_line, rb_node);
1117
1118 if (cmp_source_line(al, iter))
1119 p = &(*p)->rb_left;
1120 else
1121 p = &(*p)->rb_right;
1122 }
1123
1124 rb_link_node(&al->rb_node, parent, p);
1125 rb_insert_color(&al->rb_node, root);
1126 }
1127
resort_source_line(struct rb_root * dest_root,struct rb_root * src_root)1128 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
1129 {
1130 struct annotation_line *al;
1131 struct rb_node *node;
1132
1133 node = rb_first(src_root);
1134 while (node) {
1135 struct rb_node *next;
1136
1137 al = rb_entry(node, struct annotation_line, rb_node);
1138 next = rb_next(node);
1139 rb_erase(node, src_root);
1140
1141 __resort_source_line(dest_root, al);
1142 node = next;
1143 }
1144 }
1145
print_summary(struct rb_root * root,const char * filename)1146 static void print_summary(struct rb_root *root, const char *filename)
1147 {
1148 struct annotation_line *al;
1149 struct rb_node *node;
1150
1151 printf("\nSorted summary for file %s\n", filename);
1152 printf("----------------------------------------------\n\n");
1153
1154 if (RB_EMPTY_ROOT(root)) {
1155 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
1156 return;
1157 }
1158
1159 node = rb_first(root);
1160 while (node) {
1161 double percent, percent_max = 0.0;
1162 const char *color;
1163 char *path;
1164 int i;
1165
1166 al = rb_entry(node, struct annotation_line, rb_node);
1167 for (i = 0; i < al->data_nr; i++) {
1168 percent = al->data[i].percent_sum;
1169 color = get_percent_color(percent);
1170 color_fprintf(stdout, color, " %7.2f", percent);
1171
1172 if (percent > percent_max)
1173 percent_max = percent;
1174 }
1175
1176 path = al->path;
1177 color = get_percent_color(percent_max);
1178 color_fprintf(stdout, color, " %s\n", path);
1179
1180 node = rb_next(node);
1181 }
1182 }
1183
symbol__annotate_hits(struct symbol * sym,struct evsel * evsel)1184 static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel)
1185 {
1186 struct annotation *notes = symbol__annotation(sym);
1187 struct sym_hist *h = annotation__histogram(notes, evsel);
1188 u64 len = symbol__size(sym), offset;
1189
1190 for (offset = 0; offset < len; ++offset) {
1191 struct sym_hist_entry *entry;
1192
1193 entry = annotated_source__hist_entry(notes->src, evsel, offset);
1194 if (entry && entry->nr_samples != 0)
1195 printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
1196 sym->start + offset, entry->nr_samples);
1197 }
1198 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
1199 }
1200
annotated_source__addr_fmt_width(struct list_head * lines,u64 start)1201 static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
1202 {
1203 char bf[32];
1204 struct annotation_line *line;
1205
1206 list_for_each_entry_reverse(line, lines, node) {
1207 if (line->offset != -1)
1208 return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset);
1209 }
1210
1211 return 0;
1212 }
1213
hist_entry__annotate_printf(struct hist_entry * he,struct evsel * evsel)1214 int hist_entry__annotate_printf(struct hist_entry *he, struct evsel *evsel)
1215 {
1216 struct map_symbol *ms = &he->ms;
1217 struct map *map = ms->map;
1218 struct symbol *sym = ms->sym;
1219 struct dso *dso = map__dso(map);
1220 char *filename;
1221 const char *d_filename;
1222 const char *evsel_name = evsel__name(evsel);
1223 struct annotation *notes = symbol__annotation(sym);
1224 struct sym_hist *h = annotation__histogram(notes, evsel);
1225 struct annotation_line *pos, *queue = NULL;
1226 struct annotation_options *opts = &annotate_opts;
1227 struct annotation_print_data apd = {
1228 .he = he,
1229 .evsel = evsel,
1230 };
1231 int printed = 2, queue_len = 0;
1232 int more = 0;
1233 bool context = opts->context;
1234 int width = annotation__pcnt_width(notes);
1235 int graph_dotted_len;
1236 char buf[512];
1237
1238 filename = strdup(dso__long_name(dso));
1239 if (!filename)
1240 return -ENOMEM;
1241
1242 if (opts->full_path)
1243 d_filename = filename;
1244 else
1245 d_filename = basename(filename);
1246
1247 if (evsel__is_group_event(evsel)) {
1248 evsel__group_desc(evsel, buf, sizeof(buf));
1249 evsel_name = buf;
1250 }
1251
1252 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples, "
1253 "percent: %s)\n",
1254 width, width, symbol_conf.show_total_period ? "Period" :
1255 symbol_conf.show_nr_samples ? "Samples" : "Percent",
1256 d_filename, evsel_name, h->nr_samples,
1257 percent_type_str(opts->percent_type));
1258
1259 printf("%-*.*s----\n",
1260 graph_dotted_len, graph_dotted_len, graph_dotted_line);
1261
1262 if (verbose > 0)
1263 symbol__annotate_hits(sym, evsel);
1264
1265 apd.addr_fmt_width = annotated_source__addr_fmt_width(¬es->src->source,
1266 notes->src->start);
1267 thread__get_arch(ms->thread, &apd.arch);
1268 apd.dbg = dso__debuginfo(dso);
1269
1270 list_for_each_entry(pos, ¬es->src->source, node) {
1271 int err;
1272
1273 if (context && queue == NULL) {
1274 queue = pos;
1275 queue_len = 0;
1276 }
1277
1278 err = annotation_line__print(pos, &apd, opts, printed, queue);
1279
1280 switch (err) {
1281 case 0:
1282 ++printed;
1283 if (context) {
1284 printed += queue_len;
1285 queue = NULL;
1286 queue_len = 0;
1287 }
1288 break;
1289 case 1:
1290 /* filtered by max_lines */
1291 ++more;
1292 break;
1293 case -1:
1294 default:
1295 /*
1296 * Filtered by min_pcnt or non IP lines when
1297 * context != 0
1298 */
1299 if (!context)
1300 break;
1301 if (queue_len == context)
1302 queue = list_entry(queue->node.next, typeof(*queue), node);
1303 else
1304 ++queue_len;
1305 break;
1306 }
1307 }
1308
1309 debuginfo__delete(apd.dbg);
1310 free(filename);
1311
1312 return more;
1313 }
1314
FILE__set_percent_color(void * fp __maybe_unused,double percent __maybe_unused,bool current __maybe_unused)1315 static void FILE__set_percent_color(void *fp __maybe_unused,
1316 double percent __maybe_unused,
1317 bool current __maybe_unused)
1318 {
1319 }
1320
FILE__set_jumps_percent_color(void * fp __maybe_unused,int nr __maybe_unused,bool current __maybe_unused)1321 static int FILE__set_jumps_percent_color(void *fp __maybe_unused,
1322 int nr __maybe_unused, bool current __maybe_unused)
1323 {
1324 return 0;
1325 }
1326
FILE__set_color(void * fp __maybe_unused,int color __maybe_unused)1327 static int FILE__set_color(void *fp __maybe_unused, int color __maybe_unused)
1328 {
1329 return 0;
1330 }
1331
FILE__printf(void * fp,const char * fmt,...)1332 static void FILE__printf(void *fp, const char *fmt, ...)
1333 {
1334 va_list args;
1335
1336 va_start(args, fmt);
1337 vfprintf(fp, fmt, args);
1338 va_end(args);
1339 }
1340
FILE__write_graph(void * fp,int graph)1341 static void FILE__write_graph(void *fp, int graph)
1342 {
1343 const char *s;
1344 switch (graph) {
1345
1346 case DARROW_CHAR: s = "↓"; break;
1347 case UARROW_CHAR: s = "↑"; break;
1348 case LARROW_CHAR: s = "←"; break;
1349 case RARROW_CHAR: s = "→"; break;
1350 default: s = "?"; break;
1351 }
1352
1353 fputs(s, fp);
1354 }
1355
symbol__annotate_fprintf2(struct symbol * sym,FILE * fp,struct annotation_print_data * apd)1356 static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp,
1357 struct annotation_print_data *apd)
1358 {
1359 struct annotation *notes = symbol__annotation(sym);
1360 struct annotation_write_ops wops = {
1361 .first_line = true,
1362 .obj = fp,
1363 .set_color = FILE__set_color,
1364 .set_percent_color = FILE__set_percent_color,
1365 .set_jumps_percent_color = FILE__set_jumps_percent_color,
1366 .printf = FILE__printf,
1367 .write_graph = FILE__write_graph,
1368 };
1369 struct annotation_line *al;
1370
1371 if (annotate_opts.code_with_type) {
1372 thread__get_arch(apd->he->ms.thread, &apd->arch);
1373 apd->dbg = dso__debuginfo(map__dso(apd->he->ms.map));
1374 }
1375
1376 list_for_each_entry(al, ¬es->src->source, node) {
1377 if (annotation_line__filter(al))
1378 continue;
1379 annotation_line__write(al, notes, &wops, apd);
1380 fputc('\n', fp);
1381 wops.first_line = false;
1382 }
1383
1384 if (annotate_opts.code_with_type)
1385 debuginfo__delete(apd->dbg);
1386
1387 return 0;
1388 }
1389
map_symbol__annotation_dump(struct map_symbol * ms,struct evsel * evsel,struct hist_entry * he)1390 int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel,
1391 struct hist_entry *he)
1392 {
1393 const char *ev_name = evsel__name(evsel);
1394 char buf[1024];
1395 char *filename;
1396 int err = -1;
1397 FILE *fp;
1398 struct annotation_print_data apd = {
1399 .he = he,
1400 .evsel = evsel,
1401 };
1402
1403 if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0)
1404 return -1;
1405
1406 fp = fopen(filename, "w");
1407 if (fp == NULL)
1408 goto out_free_filename;
1409
1410 if (evsel__is_group_event(evsel)) {
1411 evsel__group_desc(evsel, buf, sizeof(buf));
1412 ev_name = buf;
1413 }
1414
1415 fprintf(fp, "%s() %s\nEvent: %s\n\n",
1416 ms->sym->name, dso__long_name(map__dso(ms->map)), ev_name);
1417 symbol__annotate_fprintf2(ms->sym, fp, &apd);
1418
1419 fclose(fp);
1420 err = 0;
1421 out_free_filename:
1422 free(filename);
1423 return err;
1424 }
1425
symbol__annotate_zero_histogram(struct symbol * sym,struct evsel * evsel)1426 void symbol__annotate_zero_histogram(struct symbol *sym, struct evsel *evsel)
1427 {
1428 struct annotation *notes = symbol__annotation(sym);
1429 struct sym_hist *h = annotation__histogram(notes, evsel);
1430
1431 memset(h, 0, sizeof(*notes->src->histograms) * notes->src->nr_histograms);
1432 }
1433
symbol__annotate_decay_histogram(struct symbol * sym,struct evsel * evsel)1434 void symbol__annotate_decay_histogram(struct symbol *sym, struct evsel *evsel)
1435 {
1436 struct annotation *notes = symbol__annotation(sym);
1437 struct sym_hist *h = annotation__histogram(notes, evsel);
1438 struct annotation_line *al;
1439
1440 h->nr_samples = 0;
1441 list_for_each_entry(al, ¬es->src->source, node) {
1442 struct sym_hist_entry *entry;
1443
1444 if (al->offset == -1)
1445 continue;
1446
1447 entry = annotated_source__hist_entry(notes->src, evsel, al->offset);
1448 if (entry == NULL)
1449 continue;
1450
1451 entry->nr_samples = entry->nr_samples * 7 / 8;
1452 h->nr_samples += entry->nr_samples;
1453 }
1454 }
1455
annotated_source__purge(struct annotated_source * as)1456 void annotated_source__purge(struct annotated_source *as)
1457 {
1458 struct annotation_line *al, *n;
1459
1460 list_for_each_entry_safe(al, n, &as->source, node) {
1461 list_del_init(&al->node);
1462 disasm_line__free(disasm_line(al));
1463 }
1464 as->tried_source = false;
1465 }
1466
disasm_line__fprintf(struct disasm_line * dl,FILE * fp)1467 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
1468 {
1469 size_t printed;
1470
1471 if (dl->al.offset == -1)
1472 return fprintf(fp, "%s\n", dl->al.line);
1473
1474 printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name);
1475
1476 if (dl->ops.raw[0] != '\0') {
1477 printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
1478 dl->ops.raw);
1479 }
1480
1481 return printed + fprintf(fp, "\n");
1482 }
1483
disasm__fprintf(struct list_head * head,FILE * fp)1484 size_t disasm__fprintf(struct list_head *head, FILE *fp)
1485 {
1486 struct disasm_line *pos;
1487 size_t printed = 0;
1488
1489 list_for_each_entry(pos, head, al.node)
1490 printed += disasm_line__fprintf(pos, fp);
1491
1492 return printed;
1493 }
1494
disasm_line__is_valid_local_jump(struct disasm_line * dl,struct symbol * sym)1495 bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym)
1496 {
1497 if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins) ||
1498 !disasm_line__has_local_offset(dl) || dl->ops.target.offset < 0 ||
1499 dl->ops.target.offset >= (s64)symbol__size(sym))
1500 return false;
1501
1502 return true;
1503 }
1504
1505 static void
annotation__mark_jump_targets(struct annotation * notes,struct symbol * sym)1506 annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
1507 {
1508 struct annotation_line *al;
1509
1510 /* PLT symbols contain external offsets */
1511 if (strstr(sym->name, "@plt"))
1512 return;
1513
1514 list_for_each_entry(al, ¬es->src->source, node) {
1515 struct disasm_line *dl;
1516 struct annotation_line *target;
1517
1518 dl = disasm_line(al);
1519
1520 if (!disasm_line__is_valid_local_jump(dl, sym))
1521 continue;
1522
1523 target = annotated_source__get_line(notes->src,
1524 dl->ops.target.offset);
1525 /*
1526 * FIXME: Oops, no jump target? Buggy disassembler? Or do we
1527 * have to adjust to the previous offset?
1528 */
1529 if (target == NULL)
1530 continue;
1531
1532 if (++target->jump_sources > notes->src->max_jump_sources)
1533 notes->src->max_jump_sources = target->jump_sources;
1534 }
1535 }
1536
annotation__set_index(struct annotation * notes)1537 static void annotation__set_index(struct annotation *notes)
1538 {
1539 struct annotation_line *al;
1540 struct annotated_source *src = notes->src;
1541
1542 src->widths.max_line_len = 0;
1543 src->nr_entries = 0;
1544 src->nr_asm_entries = 0;
1545
1546 list_for_each_entry(al, &src->source, node) {
1547 size_t line_len = strlen(al->line);
1548
1549 if (src->widths.max_line_len < line_len)
1550 src->widths.max_line_len = line_len;
1551 al->idx = src->nr_entries++;
1552 if (al->offset != -1)
1553 al->idx_asm = src->nr_asm_entries++;
1554 else
1555 al->idx_asm = -1;
1556 }
1557 }
1558
width_jumps(int n)1559 static inline int width_jumps(int n)
1560 {
1561 if (n >= 100)
1562 return 5;
1563 if (n / 10)
1564 return 2;
1565 return 1;
1566 }
1567
annotation__max_ins_name(struct annotation * notes)1568 static int annotation__max_ins_name(struct annotation *notes)
1569 {
1570 int max_name = 0, len;
1571 struct annotation_line *al;
1572
1573 list_for_each_entry(al, ¬es->src->source, node) {
1574 if (al->offset == -1)
1575 continue;
1576
1577 len = strlen(disasm_line(al)->ins.name);
1578 if (max_name < len)
1579 max_name = len;
1580 }
1581
1582 return max_name;
1583 }
1584
1585 static void
annotation__init_column_widths(struct annotation * notes,struct symbol * sym)1586 annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
1587 {
1588 notes->src->widths.addr = notes->src->widths.target =
1589 notes->src->widths.min_addr = hex_width(symbol__size(sym));
1590 notes->src->widths.max_addr = hex_width(sym->end);
1591 notes->src->widths.jumps = width_jumps(notes->src->max_jump_sources);
1592 notes->src->widths.max_ins_name = annotation__max_ins_name(notes);
1593 }
1594
annotation__update_column_widths(struct annotation * notes)1595 void annotation__update_column_widths(struct annotation *notes)
1596 {
1597 if (annotate_opts.use_offset)
1598 notes->src->widths.target = notes->src->widths.min_addr;
1599 else if (annotate_opts.full_addr)
1600 notes->src->widths.target = BITS_PER_LONG / 4;
1601 else
1602 notes->src->widths.target = notes->src->widths.max_addr;
1603
1604 notes->src->widths.addr = notes->src->widths.target;
1605
1606 if (annotate_opts.show_nr_jumps)
1607 notes->src->widths.addr += notes->src->widths.jumps + 1;
1608 }
1609
annotation__toggle_full_addr(struct annotation * notes,struct map_symbol * ms)1610 void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms)
1611 {
1612 annotate_opts.full_addr = !annotate_opts.full_addr;
1613
1614 if (annotate_opts.full_addr)
1615 notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
1616 else
1617 notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
1618
1619 annotation__update_column_widths(notes);
1620 }
1621
annotation__calc_lines(struct annotation * notes,struct map_symbol * ms,struct rb_root * root)1622 static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms,
1623 struct rb_root *root)
1624 {
1625 struct annotation_line *al;
1626 struct rb_root tmp_root = RB_ROOT;
1627
1628 list_for_each_entry(al, ¬es->src->source, node) {
1629 double percent_max = 0.0;
1630 u64 addr;
1631 int i;
1632
1633 for (i = 0; i < al->data_nr; i++) {
1634 double percent;
1635
1636 percent = annotation_data__percent(&al->data[i],
1637 annotate_opts.percent_type);
1638
1639 if (percent > percent_max)
1640 percent_max = percent;
1641 }
1642
1643 if (percent_max <= 0.5)
1644 continue;
1645
1646 addr = map__rip_2objdump(ms->map, ms->sym->start);
1647 al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL,
1648 false, true, ms->sym->start + al->offset);
1649 insert_source_line(&tmp_root, al);
1650 }
1651
1652 resort_source_line(root, &tmp_root);
1653 }
1654
symbol__calc_lines(struct map_symbol * ms,struct rb_root * root)1655 static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root)
1656 {
1657 struct annotation *notes = symbol__annotation(ms->sym);
1658
1659 annotation__calc_lines(notes, ms, root);
1660 }
1661
hist_entry__tty_annotate2(struct hist_entry * he,struct evsel * evsel)1662 int hist_entry__tty_annotate2(struct hist_entry *he, struct evsel *evsel)
1663 {
1664 struct map_symbol *ms = &he->ms;
1665 struct dso *dso = map__dso(ms->map);
1666 struct symbol *sym = ms->sym;
1667 struct rb_root source_line = RB_ROOT;
1668 struct hists *hists = evsel__hists(evsel);
1669 struct annotation_print_data apd = {
1670 .he = he,
1671 .evsel = evsel,
1672 };
1673 char buf[1024];
1674 int err;
1675
1676 err = symbol__annotate2(ms, evsel, NULL);
1677 if (err) {
1678 char msg[BUFSIZ];
1679
1680 dso__set_annotate_warned(dso);
1681 symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
1682 ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
1683 return -1;
1684 }
1685
1686 if (annotate_opts.print_lines) {
1687 srcline_full_filename = annotate_opts.full_path;
1688 symbol__calc_lines(ms, &source_line);
1689 print_summary(&source_line, dso__long_name(dso));
1690 }
1691
1692 hists__scnprintf_title(hists, buf, sizeof(buf));
1693 fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
1694 buf, percent_type_str(annotate_opts.percent_type), sym->name, dso__long_name(dso));
1695 symbol__annotate_fprintf2(sym, stdout, &apd);
1696
1697 annotated_source__purge(symbol__annotation(sym)->src);
1698
1699 return 0;
1700 }
1701
hist_entry__tty_annotate(struct hist_entry * he,struct evsel * evsel)1702 int hist_entry__tty_annotate(struct hist_entry *he, struct evsel *evsel)
1703 {
1704 struct map_symbol *ms = &he->ms;
1705 struct dso *dso = map__dso(ms->map);
1706 struct symbol *sym = ms->sym;
1707 struct rb_root source_line = RB_ROOT;
1708 int err;
1709
1710 err = symbol__annotate(ms, evsel, NULL);
1711 if (err) {
1712 char msg[BUFSIZ];
1713
1714 dso__set_annotate_warned(dso);
1715 symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
1716 ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
1717 return -1;
1718 }
1719
1720 symbol__calc_percent(sym, evsel);
1721
1722 if (annotate_opts.print_lines) {
1723 srcline_full_filename = annotate_opts.full_path;
1724 symbol__calc_lines(ms, &source_line);
1725 print_summary(&source_line, dso__long_name(dso));
1726 }
1727
1728 hist_entry__annotate_printf(he, evsel);
1729
1730 annotated_source__purge(symbol__annotation(sym)->src);
1731
1732 return 0;
1733 }
1734
ui__has_annotation(void)1735 bool ui__has_annotation(void)
1736 {
1737 return use_browser == 1 && perf_hpp_list.sym;
1738 }
1739
1740
annotation_line__max_percent(struct annotation_line * al,unsigned int percent_type)1741 static double annotation_line__max_percent(struct annotation_line *al,
1742 unsigned int percent_type)
1743 {
1744 double percent_max = 0.0;
1745 int i;
1746
1747 for (i = 0; i < al->data_nr; i++) {
1748 double percent;
1749
1750 percent = annotation_data__percent(&al->data[i],
1751 percent_type);
1752
1753 if (percent > percent_max)
1754 percent_max = percent;
1755 }
1756
1757 return percent_max;
1758 }
1759
disasm_line__write(struct disasm_line * dl,struct annotation * notes,void * obj,char * bf,size_t size,void (* obj__printf)(void * obj,const char * fmt,...),void (* obj__write_graph)(void * obj,int graph))1760 static int disasm_line__write(struct disasm_line *dl, struct annotation *notes,
1761 void *obj, char *bf, size_t size,
1762 void (*obj__printf)(void *obj, const char *fmt, ...),
1763 void (*obj__write_graph)(void *obj, int graph))
1764 {
1765 if (dl->ins.ops && dl->ins.ops->scnprintf) {
1766 if (ins__is_jump(&dl->ins)) {
1767 bool fwd;
1768
1769 if (dl->ops.target.outside)
1770 goto call_like;
1771 fwd = dl->ops.target.offset > dl->al.offset;
1772 obj__write_graph(obj, fwd ? DARROW_CHAR : UARROW_CHAR);
1773 obj__printf(obj, " ");
1774 } else if (ins__is_call(&dl->ins)) {
1775 call_like:
1776 obj__write_graph(obj, RARROW_CHAR);
1777 obj__printf(obj, " ");
1778 } else if (ins__is_ret(&dl->ins)) {
1779 obj__write_graph(obj, LARROW_CHAR);
1780 obj__printf(obj, " ");
1781 } else {
1782 obj__printf(obj, " ");
1783 }
1784 } else {
1785 obj__printf(obj, " ");
1786 }
1787
1788 return disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset,
1789 notes->src->widths.max_ins_name) + 2;
1790 }
1791
ipc_coverage_string(char * bf,int size,struct annotation * notes)1792 static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
1793 {
1794 double ipc = 0.0, coverage = 0.0;
1795 struct annotated_branch *branch = annotation__get_branch(notes);
1796
1797 if (branch && branch->hit_cycles)
1798 ipc = branch->hit_insn / ((double)branch->hit_cycles);
1799
1800 if (branch && branch->total_insn) {
1801 coverage = branch->cover_insn * 100.0 /
1802 ((double)branch->total_insn);
1803 }
1804
1805 scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
1806 ipc, coverage);
1807 }
1808
annotation_br_cntr_abbr_list(char ** str,struct evsel * evsel,bool header)1809 int annotation_br_cntr_abbr_list(char **str, struct evsel *evsel, bool header)
1810 {
1811 struct evsel *pos;
1812 struct strbuf sb;
1813
1814 if (evsel->evlist->nr_br_cntr <= 0)
1815 return -ENOTSUP;
1816
1817 strbuf_init(&sb, /*hint=*/ 0);
1818
1819 if (header && strbuf_addf(&sb, "# Branch counter abbr list:\n"))
1820 goto err;
1821
1822 evlist__for_each_entry(evsel->evlist, pos) {
1823 if (!(pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS))
1824 continue;
1825 if (header && strbuf_addf(&sb, "#"))
1826 goto err;
1827
1828 if (strbuf_addf(&sb, " %s = %s\n", pos->name, pos->abbr_name))
1829 goto err;
1830 }
1831
1832 if (header && strbuf_addf(&sb, "#"))
1833 goto err;
1834 if (strbuf_addf(&sb, " '-' No event occurs\n"))
1835 goto err;
1836
1837 if (header && strbuf_addf(&sb, "#"))
1838 goto err;
1839 if (strbuf_addf(&sb, " '+' Event occurrences may be lost due to branch counter saturated\n"))
1840 goto err;
1841
1842 *str = strbuf_detach(&sb, NULL);
1843
1844 return 0;
1845 err:
1846 strbuf_release(&sb);
1847 return -ENOMEM;
1848 }
1849
1850 /* Assume the branch counter saturated at 3 */
1851 #define ANNOTATION_BR_CNTR_SATURATION 3
1852
annotation_br_cntr_entry(char ** str,int br_cntr_nr,u64 * br_cntr,int num_aggr,struct evsel * evsel)1853 int annotation_br_cntr_entry(char **str, int br_cntr_nr,
1854 u64 *br_cntr, int num_aggr,
1855 struct evsel *evsel)
1856 {
1857 struct evsel *pos = evsel ? evlist__first(evsel->evlist) : NULL;
1858 bool saturated = false;
1859 int i, j, avg, used;
1860 struct strbuf sb;
1861
1862 strbuf_init(&sb, /*hint=*/ 0);
1863 for (i = 0; i < br_cntr_nr; i++) {
1864 used = 0;
1865 avg = ceil((double)(br_cntr[i] & ~ANNOTATION__BR_CNTR_SATURATED_FLAG) /
1866 (double)num_aggr);
1867
1868 /*
1869 * A histogram with the abbr name is displayed by default.
1870 * With -v, the exact number of branch counter is displayed.
1871 */
1872 if (verbose) {
1873 evlist__for_each_entry_from(evsel->evlist, pos) {
1874 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
1875 (pos->br_cntr_idx == i))
1876 break;
1877 }
1878 if (strbuf_addstr(&sb, pos->abbr_name))
1879 goto err;
1880
1881 if (!br_cntr[i]) {
1882 if (strbuf_addstr(&sb, "=-"))
1883 goto err;
1884 } else {
1885 if (strbuf_addf(&sb, "=%d", avg))
1886 goto err;
1887 }
1888 if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG) {
1889 if (strbuf_addch(&sb, '+'))
1890 goto err;
1891 } else {
1892 if (strbuf_addch(&sb, ' '))
1893 goto err;
1894 }
1895
1896 if ((i < br_cntr_nr - 1) && strbuf_addch(&sb, ','))
1897 goto err;
1898 continue;
1899 }
1900
1901 if (strbuf_addch(&sb, '|'))
1902 goto err;
1903
1904 if (!br_cntr[i]) {
1905 if (strbuf_addch(&sb, '-'))
1906 goto err;
1907 used++;
1908 } else {
1909 evlist__for_each_entry_from(evsel->evlist, pos) {
1910 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
1911 (pos->br_cntr_idx == i))
1912 break;
1913 }
1914 if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG)
1915 saturated = true;
1916
1917 for (j = 0; j < avg; j++, used++) {
1918 /* Print + if the number of logged events > 3 */
1919 if (j >= ANNOTATION_BR_CNTR_SATURATION) {
1920 saturated = true;
1921 break;
1922 }
1923 if (strbuf_addstr(&sb, pos->abbr_name))
1924 goto err;
1925 }
1926
1927 if (saturated) {
1928 if (strbuf_addch(&sb, '+'))
1929 goto err;
1930 used++;
1931 }
1932 pos = list_next_entry(pos, core.node);
1933 }
1934
1935 for (j = used; j < ANNOTATION_BR_CNTR_SATURATION + 1; j++) {
1936 if (strbuf_addch(&sb, ' '))
1937 goto err;
1938 }
1939 }
1940
1941 if (!verbose && strbuf_addch(&sb, br_cntr_nr ? '|' : ' '))
1942 goto err;
1943
1944 *str = strbuf_detach(&sb, NULL);
1945
1946 return 0;
1947 err:
1948 strbuf_release(&sb);
1949 return -ENOMEM;
1950 }
1951
1952 struct type_hash_entry {
1953 struct annotated_data_type *type;
1954 int offset;
1955 };
1956
disasm_line__snprint_type_info(struct disasm_line * dl,char * buf,int len,struct annotation_print_data * apd)1957 static int disasm_line__snprint_type_info(struct disasm_line *dl,
1958 char *buf, int len,
1959 struct annotation_print_data *apd)
1960 {
1961 struct annotated_data_type *data_type = NULL;
1962 struct type_hash_entry *entry = NULL;
1963 char member[256];
1964 int offset = 0;
1965 int printed;
1966
1967 scnprintf(buf, len, " ");
1968
1969 if (!annotate_opts.code_with_type || apd->dbg == NULL)
1970 return 1;
1971
1972 if (apd->type_hash) {
1973 hashmap__find(apd->type_hash, dl->al.offset, &entry);
1974 if (entry != NULL) {
1975 data_type = entry->type;
1976 offset = entry->offset;
1977 }
1978 }
1979
1980 if (data_type == NULL)
1981 data_type = __hist_entry__get_data_type(apd->he, apd->arch, apd->dbg, dl, &offset);
1982
1983 if (apd->type_hash && entry == NULL) {
1984 entry = malloc(sizeof(*entry));
1985 if (entry != NULL) {
1986 entry->type = data_type;
1987 entry->offset = offset;
1988 hashmap__add(apd->type_hash, dl->al.offset, entry);
1989 }
1990 }
1991
1992 if (!needs_type_info(data_type))
1993 return 1;
1994
1995 printed = scnprintf(buf, len, "\t\t# data-type: %s", data_type->self.type_name);
1996
1997 if (data_type != &stackop_type && data_type != &canary_type && len > printed)
1998 printed += scnprintf(buf + printed, len - printed, " +%#x", offset);
1999
2000 if (annotated_data_type__get_member_name(data_type, member, sizeof(member), offset) &&
2001 len > printed) {
2002 printed += scnprintf(buf + printed, len - printed, " (%s)", member);
2003 }
2004 return printed;
2005 }
2006
annotation_line__write(struct annotation_line * al,struct annotation * notes,const struct annotation_write_ops * wops,struct annotation_print_data * apd)2007 void annotation_line__write(struct annotation_line *al, struct annotation *notes,
2008 const struct annotation_write_ops *wops,
2009 struct annotation_print_data *apd)
2010 {
2011 bool current_entry = wops->current_entry;
2012 bool change_color = wops->change_color;
2013 double percent_max = annotation_line__max_percent(al, annotate_opts.percent_type);
2014 int width = wops->width;
2015 int pcnt_width = annotation__pcnt_width(notes);
2016 int cycles_width = annotation__cycles_width(notes);
2017 bool show_title = false;
2018 char bf[256];
2019 int printed;
2020 void *obj = wops->obj;
2021 int (*obj__set_color)(void *obj, int color) = wops->set_color;
2022 void (*obj__set_percent_color)(void *obj, double percent, bool current) = wops->set_percent_color;
2023 int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current) = wops->set_jumps_percent_color;
2024 void (*obj__printf)(void *obj, const char *fmt, ...) = wops->printf;
2025 void (*obj__write_graph)(void *obj, int graph) = wops->write_graph;
2026
2027 if (wops->first_line && (al->offset == -1 || percent_max == 0.0)) {
2028 if (notes->branch && al->cycles) {
2029 if (al->cycles->ipc == 0.0 && al->cycles->avg == 0)
2030 show_title = true;
2031 } else
2032 show_title = true;
2033 }
2034
2035 if (al->offset != -1 && percent_max != 0.0) {
2036 int i;
2037
2038 for (i = 0; i < al->data_nr; i++) {
2039 double percent;
2040
2041 percent = annotation_data__percent(&al->data[i],
2042 annotate_opts.percent_type);
2043
2044 obj__set_percent_color(obj, percent, current_entry);
2045 if (symbol_conf.show_total_period) {
2046 obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period);
2047 } else if (symbol_conf.show_nr_samples) {
2048 obj__printf(obj, "%7" PRIu64 " ",
2049 al->data[i].he.nr_samples);
2050 } else {
2051 obj__printf(obj, "%7.2f ", percent);
2052 }
2053 }
2054 } else {
2055 obj__set_percent_color(obj, 0, current_entry);
2056
2057 if (!show_title)
2058 obj__printf(obj, "%-*s", pcnt_width, " ");
2059 else {
2060 obj__printf(obj, "%-*s", pcnt_width,
2061 symbol_conf.show_total_period ? "Period" :
2062 symbol_conf.show_nr_samples ? "Samples" : "Percent");
2063 }
2064 }
2065 width -= pcnt_width;
2066
2067 if (notes->branch) {
2068 if (al->cycles && al->cycles->ipc)
2069 obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc);
2070 else if (!show_title)
2071 obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " ");
2072 else
2073 obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
2074
2075 if (!annotate_opts.show_minmax_cycle) {
2076 if (al->cycles && al->cycles->avg)
2077 obj__printf(obj, "%*" PRIu64 " ",
2078 ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg);
2079 else if (!show_title)
2080 obj__printf(obj, "%*s",
2081 ANNOTATION__CYCLES_WIDTH, " ");
2082 else
2083 obj__printf(obj, "%*s ",
2084 ANNOTATION__CYCLES_WIDTH - 1,
2085 "Cycle");
2086 } else {
2087 if (al->cycles) {
2088 char str[32];
2089
2090 scnprintf(str, sizeof(str),
2091 "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
2092 al->cycles->avg, al->cycles->min,
2093 al->cycles->max);
2094
2095 obj__printf(obj, "%*s ",
2096 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
2097 str);
2098 } else if (!show_title)
2099 obj__printf(obj, "%*s",
2100 ANNOTATION__MINMAX_CYCLES_WIDTH,
2101 " ");
2102 else
2103 obj__printf(obj, "%*s ",
2104 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
2105 "Cycle(min/max)");
2106 }
2107
2108 if (annotate_opts.show_br_cntr) {
2109 if (show_title) {
2110 obj__printf(obj, "%*s ",
2111 ANNOTATION__BR_CNTR_WIDTH,
2112 "Branch Counter");
2113 } else {
2114 char *buf;
2115
2116 if (!annotation_br_cntr_entry(&buf, al->br_cntr_nr, al->br_cntr,
2117 al->num_aggr, al->evsel)) {
2118 obj__printf(obj, "%*s ", ANNOTATION__BR_CNTR_WIDTH, buf);
2119 free(buf);
2120 }
2121 }
2122 }
2123
2124 if (show_title && !*al->line) {
2125 ipc_coverage_string(bf, sizeof(bf), notes);
2126 obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf);
2127 }
2128 }
2129 width -= cycles_width;
2130
2131 obj__printf(obj, " ");
2132 width -= 1;
2133
2134 if (!*al->line)
2135 obj__printf(obj, "%-*s", width, " ");
2136 else if (al->offset == -1) {
2137 if (al->line_nr && annotate_opts.show_linenr)
2138 printed = scnprintf(bf, sizeof(bf), "%-*d ",
2139 notes->src->widths.addr + 1, al->line_nr);
2140 else
2141 printed = scnprintf(bf, sizeof(bf), "%-*s ",
2142 notes->src->widths.addr, " ");
2143 obj__printf(obj, bf);
2144 width -= printed;
2145 obj__printf(obj, "%-*s", width, al->line);
2146 } else {
2147 u64 addr = al->offset;
2148 int color = -1;
2149
2150 if (!annotate_opts.use_offset)
2151 addr += notes->src->start;
2152
2153 if (!annotate_opts.use_offset) {
2154 printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
2155 } else {
2156 if (al->jump_sources &&
2157 annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
2158 if (annotate_opts.show_nr_jumps) {
2159 int prev;
2160 printed = scnprintf(bf, sizeof(bf), "%*d ",
2161 notes->src->widths.jumps,
2162 al->jump_sources);
2163 prev = obj__set_jumps_percent_color(obj, al->jump_sources,
2164 current_entry);
2165 obj__printf(obj, bf);
2166 obj__set_color(obj, prev);
2167 }
2168 print_addr:
2169 printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
2170 notes->src->widths.target, addr);
2171 } else if (ins__is_call(&disasm_line(al)->ins) &&
2172 annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) {
2173 goto print_addr;
2174 } else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
2175 goto print_addr;
2176 } else {
2177 printed = scnprintf(bf, sizeof(bf), "%-*s ",
2178 notes->src->widths.addr, " ");
2179 }
2180 }
2181
2182 if (change_color)
2183 color = obj__set_color(obj, HE_COLORSET_ADDR);
2184 obj__printf(obj, bf);
2185 if (change_color)
2186 obj__set_color(obj, color);
2187
2188 width -= printed;
2189
2190 printed = disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf),
2191 obj__printf, obj__write_graph);
2192
2193 obj__printf(obj, "%s", bf);
2194 width -= printed;
2195
2196 disasm_line__snprint_type_info(disasm_line(al), bf, sizeof(bf), apd);
2197 obj__printf(obj, "%-*s", width, bf);
2198 }
2199
2200 }
2201
symbol__annotate2(struct map_symbol * ms,struct evsel * evsel,const struct arch ** parch)2202 int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
2203 const struct arch **parch)
2204 {
2205 struct symbol *sym = ms->sym;
2206 struct annotation *notes = symbol__annotation(sym);
2207 size_t size = symbol__size(sym);
2208 int err;
2209
2210 err = symbol__annotate(ms, evsel, parch);
2211 if (err)
2212 return err;
2213
2214 symbol__calc_percent(sym, evsel);
2215
2216 annotation__set_index(notes);
2217 annotation__mark_jump_targets(notes, sym);
2218
2219 err = annotation__compute_ipc(notes, size, evsel);
2220 if (err)
2221 return err;
2222
2223 annotation__init_column_widths(notes, sym);
2224 annotation__update_column_widths(notes);
2225 sym->annotate2 = 1;
2226
2227 return 0;
2228 }
2229
2230 const char * const perf_disassembler__strs[] = {
2231 [PERF_DISASM_UNKNOWN] = "unknown",
2232 [PERF_DISASM_LLVM] = "llvm",
2233 [PERF_DISASM_CAPSTONE] = "capstone",
2234 [PERF_DISASM_OBJDUMP] = "objdump",
2235 };
2236
2237
annotation_options__add_disassembler(struct annotation_options * options,enum perf_disassembler dis)2238 static void annotation_options__add_disassembler(struct annotation_options *options,
2239 enum perf_disassembler dis)
2240 {
2241 for (u8 i = 0; i < ARRAY_SIZE(options->disassemblers); i++) {
2242 if (options->disassemblers[i] == dis) {
2243 /* Disassembler is already present then don't add again. */
2244 return;
2245 }
2246 if (options->disassemblers[i] == PERF_DISASM_UNKNOWN) {
2247 /* Found a free slot. */
2248 options->disassemblers[i] = dis;
2249 return;
2250 }
2251 }
2252 pr_err("Failed to add disassembler %d\n", dis);
2253 }
2254
annotation_options__add_disassemblers_str(struct annotation_options * options,const char * str)2255 static int annotation_options__add_disassemblers_str(struct annotation_options *options,
2256 const char *str)
2257 {
2258 while (str && *str != '\0') {
2259 const char *comma = strchr(str, ',');
2260 int len = comma ? comma - str : (int)strlen(str);
2261 bool match = false;
2262
2263 for (u8 i = 0; i < ARRAY_SIZE(perf_disassembler__strs); i++) {
2264 const char *dis_str = perf_disassembler__strs[i];
2265
2266 if (len == (int)strlen(dis_str) && !strncmp(str, dis_str, len)) {
2267 annotation_options__add_disassembler(options, i);
2268 match = true;
2269 break;
2270 }
2271 }
2272 if (!match) {
2273 pr_err("Invalid disassembler '%.*s'\n", len, str);
2274 return -1;
2275 }
2276 str = comma ? comma + 1 : NULL;
2277 }
2278 return 0;
2279 }
2280
annotation__config(const char * var,const char * value,void * data)2281 static int annotation__config(const char *var, const char *value, void *data)
2282 {
2283 struct annotation_options *opt = data;
2284
2285 if (!strstarts(var, "annotate."))
2286 return 0;
2287
2288 if (!strcmp(var, "annotate.offset_level")) {
2289 perf_config_u8(&opt->offset_level, "offset_level", value);
2290
2291 if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
2292 opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL;
2293 else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL)
2294 opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
2295 } else if (!strcmp(var, "annotate.disassemblers")) {
2296 int err = annotation_options__add_disassemblers_str(opt, value);
2297
2298 if (err)
2299 return err;
2300 } else if (!strcmp(var, "annotate.hide_src_code")) {
2301 opt->hide_src_code = perf_config_bool("hide_src_code", value);
2302 } else if (!strcmp(var, "annotate.jump_arrows")) {
2303 opt->jump_arrows = perf_config_bool("jump_arrows", value);
2304 } else if (!strcmp(var, "annotate.show_linenr")) {
2305 opt->show_linenr = perf_config_bool("show_linenr", value);
2306 } else if (!strcmp(var, "annotate.show_nr_jumps")) {
2307 opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value);
2308 } else if (!strcmp(var, "annotate.show_nr_samples")) {
2309 symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples",
2310 value);
2311 } else if (!strcmp(var, "annotate.show_total_period")) {
2312 symbol_conf.show_total_period = perf_config_bool("show_total_period",
2313 value);
2314 } else if (!strcmp(var, "annotate.use_offset")) {
2315 opt->use_offset = perf_config_bool("use_offset", value);
2316 } else if (!strcmp(var, "annotate.disassembler_style")) {
2317 opt->disassembler_style = strdup(value);
2318 if (!opt->disassembler_style) {
2319 pr_err("Not enough memory for annotate.disassembler_style\n");
2320 return -1;
2321 }
2322 } else if (!strcmp(var, "annotate.objdump")) {
2323 opt->objdump_path = strdup(value);
2324 if (!opt->objdump_path) {
2325 pr_err("Not enough memory for annotate.objdump\n");
2326 return -1;
2327 }
2328 } else if (!strcmp(var, "annotate.addr2line")) {
2329 symbol_conf.addr2line_path = strdup(value);
2330 if (!symbol_conf.addr2line_path) {
2331 pr_err("Not enough memory for annotate.addr2line\n");
2332 return -1;
2333 }
2334 } else if (!strcmp(var, "annotate.demangle")) {
2335 symbol_conf.demangle = perf_config_bool("demangle", value);
2336 } else if (!strcmp(var, "annotate.demangle_kernel")) {
2337 symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value);
2338 } else {
2339 pr_debug("%s variable unknown, ignoring...", var);
2340 }
2341
2342 return 0;
2343 }
2344
annotation_options__init(void)2345 void annotation_options__init(void)
2346 {
2347 struct annotation_options *opt = &annotate_opts;
2348
2349 memset(opt, 0, sizeof(*opt));
2350
2351 /* Default values. */
2352 opt->use_offset = true;
2353 opt->jump_arrows = true;
2354 opt->annotate_src = true;
2355 opt->offset_level = ANNOTATION__OFFSET_JUMP_TARGETS;
2356 opt->percent_type = PERCENT_PERIOD_LOCAL;
2357 opt->hide_src_code = true;
2358 opt->hide_src_code_on_title = true;
2359 }
2360
annotation_options__exit(void)2361 void annotation_options__exit(void)
2362 {
2363 zfree(&annotate_opts.disassembler_style);
2364 zfree(&annotate_opts.objdump_path);
2365 }
2366
annotation_options__default_init_disassemblers(struct annotation_options * options)2367 static void annotation_options__default_init_disassemblers(struct annotation_options *options)
2368 {
2369 if (options->disassemblers[0] != PERF_DISASM_UNKNOWN) {
2370 /* Already initialized. */
2371 return;
2372 }
2373 #ifdef HAVE_LIBLLVM_SUPPORT
2374 annotation_options__add_disassembler(options, PERF_DISASM_LLVM);
2375 #endif
2376 #ifdef HAVE_LIBCAPSTONE_SUPPORT
2377 annotation_options__add_disassembler(options, PERF_DISASM_CAPSTONE);
2378 #endif
2379 annotation_options__add_disassembler(options, PERF_DISASM_OBJDUMP);
2380 }
2381
annotation_config__init(void)2382 void annotation_config__init(void)
2383 {
2384 perf_config(annotation__config, &annotate_opts);
2385 annotation_options__default_init_disassemblers(&annotate_opts);
2386 }
2387
parse_percent_type(char * str1,char * str2)2388 static unsigned int parse_percent_type(char *str1, char *str2)
2389 {
2390 unsigned int type = (unsigned int) -1;
2391
2392 if (!strcmp("period", str1)) {
2393 if (!strcmp("local", str2))
2394 type = PERCENT_PERIOD_LOCAL;
2395 else if (!strcmp("global", str2))
2396 type = PERCENT_PERIOD_GLOBAL;
2397 }
2398
2399 if (!strcmp("hits", str1)) {
2400 if (!strcmp("local", str2))
2401 type = PERCENT_HITS_LOCAL;
2402 else if (!strcmp("global", str2))
2403 type = PERCENT_HITS_GLOBAL;
2404 }
2405
2406 return type;
2407 }
2408
annotate_parse_percent_type(const struct option * opt __maybe_unused,const char * _str,int unset __maybe_unused)2409 int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str,
2410 int unset __maybe_unused)
2411 {
2412 unsigned int type;
2413 char *str1, *str2;
2414 int err = -1;
2415
2416 str1 = strdup(_str);
2417 if (!str1)
2418 return -ENOMEM;
2419
2420 str2 = strchr(str1, '-');
2421 if (!str2)
2422 goto out;
2423
2424 *str2++ = 0;
2425
2426 type = parse_percent_type(str1, str2);
2427 if (type == (unsigned int) -1)
2428 type = parse_percent_type(str2, str1);
2429 if (type != (unsigned int) -1) {
2430 annotate_opts.percent_type = type;
2431 err = 0;
2432 }
2433
2434 out:
2435 free(str1);
2436 return err;
2437 }
2438
annotate_check_args(void)2439 int annotate_check_args(void)
2440 {
2441 struct annotation_options *args = &annotate_opts;
2442
2443 if (args->prefix_strip && !args->prefix) {
2444 pr_err("--prefix-strip requires --prefix\n");
2445 return -1;
2446 }
2447 return 0;
2448 }
2449
arch__dwarf_regnum(const struct arch * arch,const char * str)2450 static int arch__dwarf_regnum(const struct arch *arch, const char *str)
2451 {
2452 const char *p;
2453 char *regname, *q;
2454 int reg;
2455
2456 p = strchr(str, arch->objdump.register_char);
2457 if (p == NULL)
2458 return -1;
2459
2460 regname = strdup(p);
2461 if (regname == NULL)
2462 return -1;
2463
2464 q = strpbrk(regname, ",) ");
2465 if (q)
2466 *q = '\0';
2467
2468 reg = get_dwarf_regnum(regname, arch->id.e_machine, arch->id.e_flags);
2469 free(regname);
2470 return reg;
2471 }
2472
2473 /*
2474 * Get register number and access offset from the given instruction.
2475 * It assumes AT&T x86 asm format like OFFSET(REG). Maybe it needs
2476 * to revisit the format when it handles different architecture.
2477 * Fills @reg and @offset when return 0.
2478 */
extract_reg_offset(const struct arch * arch,const char * str,struct annotated_op_loc * op_loc)2479 static int extract_reg_offset(const struct arch *arch, const char *str,
2480 struct annotated_op_loc *op_loc)
2481 {
2482 char *p;
2483
2484 if (arch->objdump.register_char == 0)
2485 return -1;
2486
2487 /*
2488 * It should start from offset, but it's possible to skip 0
2489 * in the asm. So 0(%rax) should be same as (%rax).
2490 *
2491 * However, it also start with a segment select register like
2492 * %gs:0x18(%rbx). In that case it should skip the part.
2493 */
2494 if (*str == arch->objdump.register_char) {
2495 if (arch__is_x86(arch)) {
2496 /* FIXME: Handle other segment registers */
2497 if (!strncmp(str, "%gs:", 4))
2498 op_loc->segment = INSN_SEG_X86_GS;
2499 }
2500
2501 while (*str && !isdigit(*str) &&
2502 *str != arch->objdump.memory_ref_char)
2503 str++;
2504 }
2505
2506 op_loc->offset = strtol(str, &p, 0);
2507 op_loc->reg1 = arch__dwarf_regnum(arch, p);
2508 if (op_loc->reg1 == -1)
2509 return -1;
2510
2511 /* Get the second register */
2512 if (op_loc->multi_regs)
2513 op_loc->reg2 = arch__dwarf_regnum(arch, p + 1);
2514
2515 return 0;
2516 }
2517
2518 /**
2519 * annotate_get_insn_location - Get location of instruction
2520 * @arch: the architecture info
2521 * @dl: the target instruction
2522 * @loc: a buffer to save the data
2523 *
2524 * Get detailed location info (register and offset) in the instruction.
2525 * It needs both source and target operand and whether it accesses a
2526 * memory location. The offset field is meaningful only when the
2527 * corresponding mem flag is set. The reg2 field is meaningful only
2528 * when multi_regs flag is set.
2529 *
2530 * Some examples on x86:
2531 *
2532 * mov (%rax), %rcx # src_reg1 = rax, src_mem = 1, src_offset = 0
2533 * # dst_reg1 = rcx, dst_mem = 0
2534 *
2535 * mov 0x18, %r8 # src_reg1 = -1, src_mem = 0
2536 * # dst_reg1 = r8, dst_mem = 0
2537 *
2538 * mov %rsi, 8(%rbx,%rcx,4) # src_reg1 = rsi, src_mem = 0, src_multi_regs = 0
2539 * # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1
2540 * # dst_multi_regs = 1, dst_offset = 8
2541 */
annotate_get_insn_location(const struct arch * arch,struct disasm_line * dl,struct annotated_insn_loc * loc)2542 int annotate_get_insn_location(const struct arch *arch, struct disasm_line *dl,
2543 struct annotated_insn_loc *loc)
2544 {
2545 struct ins_operands *ops;
2546 struct annotated_op_loc *op_loc;
2547 int i;
2548
2549 if (ins__is_lock(&dl->ins))
2550 ops = dl->ops.locked.ops;
2551 else
2552 ops = &dl->ops;
2553
2554 if (ops == NULL)
2555 return -1;
2556
2557 memset(loc, 0, sizeof(*loc));
2558
2559 for_each_insn_op_loc(loc, i, op_loc) {
2560 const char *insn_str = ops->source.raw;
2561 bool multi_regs = ops->source.multi_regs;
2562 bool mem_ref = ops->source.mem_ref;
2563
2564 if (i == INSN_OP_TARGET) {
2565 insn_str = ops->target.raw;
2566 multi_regs = ops->target.multi_regs;
2567 mem_ref = ops->target.mem_ref;
2568 }
2569
2570 /* Invalidate the register by default */
2571 op_loc->reg1 = -1;
2572 op_loc->reg2 = -1;
2573
2574 if (insn_str == NULL) {
2575 if (!arch__is_powerpc(arch))
2576 continue;
2577 }
2578
2579 /*
2580 * For powerpc, call get_powerpc_regs function which extracts the
2581 * required fields for op_loc, ie reg1, reg2, offset from the
2582 * raw instruction.
2583 */
2584 if (arch__is_powerpc(arch)) {
2585 op_loc->mem_ref = mem_ref;
2586 op_loc->multi_regs = multi_regs;
2587 get_powerpc_regs(dl->raw.raw_insn, !i, op_loc);
2588 } else if (strchr(insn_str, arch->objdump.memory_ref_char)) {
2589 op_loc->mem_ref = true;
2590 op_loc->multi_regs = multi_regs;
2591 extract_reg_offset(arch, insn_str, op_loc);
2592 } else {
2593 const char *s = insn_str;
2594 char *p = NULL;
2595
2596 if (arch__is_x86(arch)) {
2597 /* FIXME: Handle other segment registers */
2598 if (!strncmp(insn_str, "%gs:", 4)) {
2599 op_loc->segment = INSN_SEG_X86_GS;
2600 op_loc->offset = strtol(insn_str + 4,
2601 &p, 0);
2602 if (p && p != insn_str + 4)
2603 op_loc->imm = true;
2604 continue;
2605 }
2606 }
2607
2608 if (*s == arch->objdump.register_char) {
2609 op_loc->reg1 = arch__dwarf_regnum(arch, s);
2610 }
2611 else if (*s == arch->objdump.imm_char) {
2612 op_loc->offset = strtol(s + 1, &p, 0);
2613 if (p && p != s + 1)
2614 op_loc->imm = true;
2615 }
2616 }
2617 }
2618
2619 return 0;
2620 }
2621
find_disasm_line(struct symbol * sym,u64 ip,bool allow_update)2622 static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip,
2623 bool allow_update)
2624 {
2625 struct disasm_line *dl;
2626 struct annotation *notes;
2627
2628 notes = symbol__annotation(sym);
2629
2630 list_for_each_entry(dl, ¬es->src->source, al.node) {
2631 if (dl->al.offset == -1)
2632 continue;
2633
2634 if (sym->start + dl->al.offset == ip) {
2635 /*
2636 * llvm-objdump places "lock" in a separate line and
2637 * in that case, we want to get the next line.
2638 */
2639 if (ins__is_lock(&dl->ins) &&
2640 *dl->ops.raw == '\0' && allow_update) {
2641 ip++;
2642 continue;
2643 }
2644 return dl;
2645 }
2646 }
2647 return NULL;
2648 }
2649
annotate_data_stat(struct list_head * head,const char * name)2650 static struct annotated_item_stat *annotate_data_stat(struct list_head *head,
2651 const char *name)
2652 {
2653 struct annotated_item_stat *istat;
2654
2655 list_for_each_entry(istat, head, list) {
2656 if (!strcmp(istat->name, name))
2657 return istat;
2658 }
2659
2660 istat = zalloc(sizeof(*istat));
2661 if (istat == NULL)
2662 return NULL;
2663
2664 istat->name = strdup(name);
2665 if ((istat->name == NULL) || (!strlen(istat->name))) {
2666 free(istat);
2667 return NULL;
2668 }
2669
2670 list_add_tail(&istat->list, head);
2671 return istat;
2672 }
2673
is_stack_operation(const struct arch * arch,struct disasm_line * dl)2674 static bool is_stack_operation(const struct arch *arch, struct disasm_line *dl)
2675 {
2676 if (arch__is_x86(arch)) {
2677 if (!strncmp(dl->ins.name, "push", 4) ||
2678 !strncmp(dl->ins.name, "pop", 3) ||
2679 !strncmp(dl->ins.name, "call", 4) ||
2680 !strncmp(dl->ins.name, "ret", 3))
2681 return true;
2682 }
2683
2684 return false;
2685 }
2686
is_stack_canary(const struct arch * arch,struct annotated_op_loc * loc)2687 static bool is_stack_canary(const struct arch *arch, struct annotated_op_loc *loc)
2688 {
2689 /* On x86_64, %gs:40 is used for stack canary */
2690 if (arch__is_x86(arch)) {
2691 if (loc->segment == INSN_SEG_X86_GS && loc->imm &&
2692 loc->offset == 40)
2693 return true;
2694 }
2695
2696 return false;
2697 }
2698
2699 /**
2700 * Returns true if the instruction has a memory operand without
2701 * performing a load/store
2702 */
is_address_gen_insn(const struct arch * arch,struct disasm_line * dl)2703 static bool is_address_gen_insn(const struct arch *arch, struct disasm_line *dl)
2704 {
2705 if (arch__is_x86(arch)) {
2706 if (!strncmp(dl->ins.name, "lea", 3))
2707 return true;
2708 }
2709
2710 return false;
2711 }
2712
2713 static struct disasm_line *
annotation__prev_asm_line(struct annotation * notes,struct disasm_line * curr)2714 annotation__prev_asm_line(struct annotation *notes, struct disasm_line *curr)
2715 {
2716 struct list_head *sources = ¬es->src->source;
2717 struct disasm_line *prev;
2718
2719 if (curr == list_first_entry(sources, struct disasm_line, al.node))
2720 return NULL;
2721
2722 prev = list_prev_entry(curr, al.node);
2723 while (prev->al.offset == -1 &&
2724 prev != list_first_entry(sources, struct disasm_line, al.node))
2725 prev = list_prev_entry(prev, al.node);
2726
2727 if (prev->al.offset == -1)
2728 return NULL;
2729
2730 return prev;
2731 }
2732
2733 static struct disasm_line *
annotation__next_asm_line(struct annotation * notes,struct disasm_line * curr)2734 annotation__next_asm_line(struct annotation *notes, struct disasm_line *curr)
2735 {
2736 struct list_head *sources = ¬es->src->source;
2737 struct disasm_line *next;
2738
2739 if (curr == list_last_entry(sources, struct disasm_line, al.node))
2740 return NULL;
2741
2742 next = list_next_entry(curr, al.node);
2743 while (next->al.offset == -1 &&
2744 next != list_last_entry(sources, struct disasm_line, al.node))
2745 next = list_next_entry(next, al.node);
2746
2747 if (next->al.offset == -1)
2748 return NULL;
2749
2750 return next;
2751 }
2752
annotate_calc_pcrel(struct map_symbol * ms,u64 ip,int offset,struct disasm_line * dl)2753 u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset,
2754 struct disasm_line *dl)
2755 {
2756 struct annotation *notes;
2757 struct disasm_line *next;
2758 u64 addr;
2759
2760 notes = symbol__annotation(ms->sym);
2761 /*
2762 * PC-relative addressing starts from the next instruction address
2763 * But the IP is for the current instruction. Since disasm_line
2764 * doesn't have the instruction size, calculate it using the next
2765 * disasm_line. If it's the last one, we can use symbol's end
2766 * address directly.
2767 */
2768 next = annotation__next_asm_line(notes, dl);
2769 if (next == NULL)
2770 addr = ms->sym->end + offset;
2771 else
2772 addr = ip + (next->al.offset - dl->al.offset) + offset;
2773
2774 return map__rip_2objdump(ms->map, addr);
2775 }
2776
2777 static struct debuginfo_cache {
2778 struct dso *dso;
2779 struct debuginfo *dbg;
2780 } di_cache;
2781
debuginfo_cache__delete(void)2782 void debuginfo_cache__delete(void)
2783 {
2784 dso__put(di_cache.dso);
2785 di_cache.dso = NULL;
2786
2787 debuginfo__delete(di_cache.dbg);
2788 di_cache.dbg = NULL;
2789 }
2790
2791 static struct annotated_data_type *
__hist_entry__get_data_type(struct hist_entry * he,const struct arch * arch,struct debuginfo * dbg,struct disasm_line * dl,int * type_offset)2792 __hist_entry__get_data_type(struct hist_entry *he, const struct arch *arch,
2793 struct debuginfo *dbg, struct disasm_line *dl,
2794 int *type_offset)
2795 {
2796 struct map_symbol *ms = &he->ms;
2797 struct annotated_insn_loc loc;
2798 struct annotated_op_loc *op_loc;
2799 struct annotated_data_type *mem_type;
2800 struct annotated_item_stat *istat;
2801 int i;
2802
2803 istat = annotate_data_stat(&ann_insn_stat, dl->ins.name);
2804 if (istat == NULL) {
2805 ann_data_stat.no_insn++;
2806 return NO_TYPE;
2807 }
2808
2809 if (annotate_get_insn_location(arch, dl, &loc) < 0) {
2810 ann_data_stat.no_insn_ops++;
2811 istat->bad++;
2812 return NO_TYPE;
2813 }
2814
2815 if (is_stack_operation(arch, dl)) {
2816 istat->good++;
2817 *type_offset = 0;
2818 return &stackop_type;
2819 }
2820
2821 if (is_address_gen_insn(arch, dl)) {
2822 istat->bad++;
2823 ann_data_stat.no_mem_ops++;
2824 return NO_TYPE;
2825 }
2826
2827 for_each_insn_op_loc(&loc, i, op_loc) {
2828 struct data_loc_info dloc = {
2829 .arch = arch,
2830 .thread = he->thread,
2831 .ms = ms,
2832 .ip = ms->sym->start + dl->al.offset,
2833 .cpumode = he->cpumode,
2834 .op = op_loc,
2835 .di = dbg,
2836 };
2837
2838 if (!op_loc->mem_ref && op_loc->segment == INSN_SEG_NONE)
2839 continue;
2840
2841 /* PC-relative addressing */
2842 if (op_loc->reg1 == DWARF_REG_PC) {
2843 dloc.var_addr = annotate_calc_pcrel(ms, dloc.ip,
2844 op_loc->offset, dl);
2845 }
2846
2847 /* This CPU access in kernel - pretend PC-relative addressing */
2848 if (dso__kernel(map__dso(ms->map)) && arch__is_x86(arch) &&
2849 op_loc->segment == INSN_SEG_X86_GS && op_loc->imm) {
2850 dloc.var_addr = op_loc->offset;
2851 op_loc->reg1 = DWARF_REG_PC;
2852 }
2853
2854 mem_type = find_data_type(&dloc);
2855
2856 if (mem_type == NULL && is_stack_canary(arch, op_loc)) {
2857 istat->good++;
2858 *type_offset = 0;
2859 return &canary_type;
2860 }
2861
2862 if (mem_type)
2863 istat->good++;
2864 else
2865 istat->bad++;
2866
2867 if (symbol_conf.annotate_data_sample) {
2868 struct evsel *evsel = hists_to_evsel(he->hists);
2869
2870 annotated_data_type__update_samples(mem_type, evsel,
2871 dloc.type_offset,
2872 he->stat.nr_events,
2873 he->stat.period);
2874 }
2875 *type_offset = dloc.type_offset;
2876 return mem_type ?: NO_TYPE;
2877 }
2878
2879 /* retry with a fused instruction */
2880 return NULL;
2881 }
2882
2883 /**
2884 * hist_entry__get_data_type - find data type for given hist entry
2885 * @he: hist entry
2886 *
2887 * This function first annotates the instruction at @he->ip and extracts
2888 * register and offset info from it. Then it searches the DWARF debug
2889 * info to get a variable and type information using the address, register,
2890 * and offset.
2891 */
hist_entry__get_data_type(struct hist_entry * he)2892 struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he)
2893 {
2894 struct map_symbol *ms = &he->ms;
2895 struct evsel *evsel = hists_to_evsel(he->hists);
2896 const struct arch *arch;
2897 struct disasm_line *dl;
2898 struct annotated_data_type *mem_type;
2899 struct annotated_item_stat *istat;
2900 u64 ip = he->ip;
2901
2902 ann_data_stat.total++;
2903
2904 if (ms->map == NULL || ms->sym == NULL) {
2905 ann_data_stat.no_sym++;
2906 return NULL;
2907 }
2908
2909 if (!symbol_conf.init_annotation) {
2910 ann_data_stat.no_sym++;
2911 return NULL;
2912 }
2913
2914 /*
2915 * di_cache holds a pair of values, but code below assumes
2916 * di_cache.dso can be compared/updated and di_cache.dbg can be
2917 * read/updated independently from each other. That assumption only
2918 * holds in single threaded code.
2919 */
2920 assert(perf_singlethreaded);
2921
2922 if (map__dso(ms->map) != di_cache.dso) {
2923 dso__put(di_cache.dso);
2924 di_cache.dso = dso__get(map__dso(ms->map));
2925
2926 debuginfo__delete(di_cache.dbg);
2927 di_cache.dbg = dso__debuginfo(di_cache.dso);
2928 }
2929
2930 if (di_cache.dbg == NULL) {
2931 ann_data_stat.no_dbginfo++;
2932 return NULL;
2933 }
2934
2935 /* Make sure it has the disasm of the function */
2936 if (symbol__annotate(ms, evsel, &arch) < 0) {
2937 ann_data_stat.no_insn++;
2938 return NULL;
2939 }
2940
2941 /*
2942 * Get a disasm to extract the location from the insn.
2943 * This is too slow...
2944 */
2945 dl = find_disasm_line(ms->sym, ip, /*allow_update=*/true);
2946 if (dl == NULL) {
2947 ann_data_stat.no_insn++;
2948 return NULL;
2949 }
2950
2951 retry:
2952 mem_type = __hist_entry__get_data_type(he, arch, di_cache.dbg, dl,
2953 &he->mem_type_off);
2954 if (mem_type)
2955 return mem_type == NO_TYPE ? NULL : mem_type;
2956
2957 /*
2958 * Some instructions can be fused and the actual memory access came
2959 * from the previous instruction.
2960 */
2961 if (dl->al.offset > 0) {
2962 struct annotation *notes;
2963 struct disasm_line *prev_dl;
2964
2965 notes = symbol__annotation(ms->sym);
2966 prev_dl = annotation__prev_asm_line(notes, dl);
2967
2968 if (prev_dl && ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) {
2969 dl = prev_dl;
2970 goto retry;
2971 }
2972 }
2973
2974 ann_data_stat.no_mem_ops++;
2975 istat = annotate_data_stat(&ann_insn_stat, dl->ins.name);
2976 if (istat)
2977 istat->bad++;
2978 return NULL;
2979 }
2980
2981 /* Basic block traversal (BFS) data structure */
2982 struct basic_block_data {
2983 struct list_head queue;
2984 struct list_head visited;
2985 };
2986
2987 /*
2988 * During the traversal, it needs to know the parent block where the current
2989 * block block started from. Note that single basic block can be parent of
2990 * two child basic blocks (in case of condition jump).
2991 */
2992 struct basic_block_link {
2993 struct list_head node;
2994 struct basic_block_link *parent;
2995 struct annotated_basic_block *bb;
2996 };
2997
2998 /* Check any of basic block in the list already has the offset */
basic_block_has_offset(struct list_head * head,s64 offset)2999 static bool basic_block_has_offset(struct list_head *head, s64 offset)
3000 {
3001 struct basic_block_link *link;
3002
3003 list_for_each_entry(link, head, node) {
3004 s64 begin_offset = link->bb->begin->al.offset;
3005 s64 end_offset = link->bb->end->al.offset;
3006
3007 if (begin_offset <= offset && offset <= end_offset)
3008 return true;
3009 }
3010 return false;
3011 }
3012
is_new_basic_block(struct basic_block_data * bb_data,struct disasm_line * dl)3013 static bool is_new_basic_block(struct basic_block_data *bb_data,
3014 struct disasm_line *dl)
3015 {
3016 s64 offset = dl->al.offset;
3017
3018 if (basic_block_has_offset(&bb_data->visited, offset))
3019 return false;
3020 if (basic_block_has_offset(&bb_data->queue, offset))
3021 return false;
3022 return true;
3023 }
3024
3025 /* Add a basic block starting from dl and link it to the parent */
add_basic_block(struct basic_block_data * bb_data,struct basic_block_link * parent,struct disasm_line * dl)3026 static int add_basic_block(struct basic_block_data *bb_data,
3027 struct basic_block_link *parent,
3028 struct disasm_line *dl)
3029 {
3030 struct annotated_basic_block *bb;
3031 struct basic_block_link *link;
3032
3033 if (dl == NULL)
3034 return -1;
3035
3036 if (!is_new_basic_block(bb_data, dl))
3037 return 0;
3038
3039 bb = zalloc(sizeof(*bb));
3040 if (bb == NULL)
3041 return -1;
3042
3043 bb->begin = dl;
3044 bb->end = dl;
3045 INIT_LIST_HEAD(&bb->list);
3046
3047 link = malloc(sizeof(*link));
3048 if (link == NULL) {
3049 free(bb);
3050 return -1;
3051 }
3052
3053 link->bb = bb;
3054 link->parent = parent;
3055 list_add_tail(&link->node, &bb_data->queue);
3056 return 0;
3057 }
3058
3059 /* Returns true when it finds the target in the current basic block */
process_basic_block(struct basic_block_data * bb_data,struct basic_block_link * link,struct symbol * sym,u64 target)3060 static bool process_basic_block(struct basic_block_data *bb_data,
3061 struct basic_block_link *link,
3062 struct symbol *sym, u64 target)
3063 {
3064 struct disasm_line *dl, *next_dl, *last_dl;
3065 struct annotation *notes = symbol__annotation(sym);
3066 bool found = false;
3067
3068 dl = link->bb->begin;
3069 /* Check if it's already visited */
3070 if (basic_block_has_offset(&bb_data->visited, dl->al.offset))
3071 return false;
3072
3073 last_dl = list_last_entry(¬es->src->source,
3074 struct disasm_line, al.node);
3075 if (last_dl->al.offset == -1)
3076 last_dl = annotation__prev_asm_line(notes, last_dl);
3077
3078 if (last_dl == NULL)
3079 return false;
3080
3081 list_for_each_entry_from(dl, ¬es->src->source, al.node) {
3082 /* Skip comment or debug info line */
3083 if (dl->al.offset == -1)
3084 continue;
3085 /* Found the target instruction */
3086 if (sym->start + dl->al.offset == target) {
3087 found = true;
3088 break;
3089 }
3090 /* End of the function, finish the block */
3091 if (dl == last_dl)
3092 break;
3093 /* 'return' instruction finishes the block */
3094 if (ins__is_ret(&dl->ins))
3095 break;
3096 /* normal instructions are part of the basic block */
3097 if (!ins__is_jump(&dl->ins))
3098 continue;
3099 /* jump to a different function, tail call or return */
3100 if (dl->ops.target.outside)
3101 break;
3102 /* jump instruction creates new basic block(s) */
3103 next_dl = find_disasm_line(sym, sym->start + dl->ops.target.offset,
3104 /*allow_update=*/false);
3105 if (next_dl)
3106 add_basic_block(bb_data, link, next_dl);
3107
3108 /*
3109 * FIXME: determine conditional jumps properly.
3110 * Conditional jumps create another basic block with the
3111 * next disasm line.
3112 */
3113 if (!strstr(dl->ins.name, "jmp")) {
3114 next_dl = annotation__next_asm_line(notes, dl);
3115 if (next_dl)
3116 add_basic_block(bb_data, link, next_dl);
3117 }
3118 break;
3119
3120 }
3121 link->bb->end = dl;
3122 return found;
3123 }
3124
3125 /*
3126 * It founds a target basic block, build a proper linked list of basic blocks
3127 * by following the link recursively.
3128 */
link_found_basic_blocks(struct basic_block_link * link,struct list_head * head)3129 static void link_found_basic_blocks(struct basic_block_link *link,
3130 struct list_head *head)
3131 {
3132 while (link) {
3133 struct basic_block_link *parent = link->parent;
3134
3135 list_move(&link->bb->list, head);
3136 list_del(&link->node);
3137 free(link);
3138
3139 link = parent;
3140 }
3141 }
3142
delete_basic_blocks(struct basic_block_data * bb_data)3143 static void delete_basic_blocks(struct basic_block_data *bb_data)
3144 {
3145 struct basic_block_link *link, *tmp;
3146
3147 list_for_each_entry_safe(link, tmp, &bb_data->queue, node) {
3148 list_del(&link->node);
3149 zfree(&link->bb);
3150 free(link);
3151 }
3152
3153 list_for_each_entry_safe(link, tmp, &bb_data->visited, node) {
3154 list_del(&link->node);
3155 zfree(&link->bb);
3156 free(link);
3157 }
3158 }
3159
3160 /**
3161 * annotate_get_basic_blocks - Get basic blocks for given address range
3162 * @sym: symbol to annotate
3163 * @src: source address
3164 * @dst: destination address
3165 * @head: list head to save basic blocks
3166 *
3167 * This function traverses disasm_lines from @src to @dst and save them in a
3168 * list of annotated_basic_block to @head. It uses BFS to find the shortest
3169 * path between two. The basic_block_link is to maintain parent links so
3170 * that it can build a list of blocks from the start.
3171 */
annotate_get_basic_blocks(struct symbol * sym,s64 src,s64 dst,struct list_head * head)3172 int annotate_get_basic_blocks(struct symbol *sym, s64 src, s64 dst,
3173 struct list_head *head)
3174 {
3175 struct basic_block_data bb_data = {
3176 .queue = LIST_HEAD_INIT(bb_data.queue),
3177 .visited = LIST_HEAD_INIT(bb_data.visited),
3178 };
3179 struct basic_block_link *link;
3180 struct disasm_line *dl;
3181 int ret = -1;
3182
3183 dl = find_disasm_line(sym, src, /*allow_update=*/false);
3184 if (dl == NULL)
3185 return -1;
3186
3187 if (add_basic_block(&bb_data, /*parent=*/NULL, dl) < 0)
3188 return -1;
3189
3190 /* Find shortest path from src to dst using BFS */
3191 while (!list_empty(&bb_data.queue)) {
3192 link = list_first_entry(&bb_data.queue, struct basic_block_link, node);
3193
3194 if (process_basic_block(&bb_data, link, sym, dst)) {
3195 link_found_basic_blocks(link, head);
3196 ret = 0;
3197 break;
3198 }
3199 list_move(&link->node, &bb_data.visited);
3200 }
3201 delete_basic_blocks(&bb_data);
3202 return ret;
3203 }
3204