1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from builtin-annotate.c, see those files for further
6 * copyright notes.
7 */
8
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <libgen.h>
12 #include <stdlib.h>
13 #include "util.h" // hex_width()
14 #include "ui/ui.h"
15 #include "sort.h"
16 #include "build-id.h"
17 #include "color.h"
18 #include "config.h"
19 #include "disasm.h"
20 #include "dso.h"
21 #include "env.h"
22 #include "map.h"
23 #include "maps.h"
24 #include "symbol.h"
25 #include "srcline.h"
26 #include "units.h"
27 #include "debug.h"
28 #include "debuginfo.h"
29 #include "annotate.h"
30 #include "annotate-data.h"
31 #include "evsel.h"
32 #include "evlist.h"
33 #include "bpf-event.h"
34 #include "bpf-utils.h"
35 #include "block-range.h"
36 #include "string2.h"
37 #include "dwarf-regs.h"
38 #include "util/event.h"
39 #include "util/sharded_mutex.h"
40 #include "arch/common.h"
41 #include "namespaces.h"
42 #include "thread.h"
43 #include "hashmap.h"
44 #include "strbuf.h"
45 #include <regex.h>
46 #include <linux/bitops.h>
47 #include <linux/kernel.h>
48 #include <linux/string.h>
49 #include <linux/zalloc.h>
50 #include <subcmd/parse-options.h>
51 #include <subcmd/run-command.h>
52 #include <math.h>
53
54 /* FIXME: For the HE_COLORSET */
55 #include "ui/browser.h"
56
57 /*
58 * FIXME: Using the same values as slang.h,
59 * but that header may not be available everywhere
60 */
61 #define LARROW_CHAR ((unsigned char)',')
62 #define RARROW_CHAR ((unsigned char)'+')
63 #define DARROW_CHAR ((unsigned char)'.')
64 #define UARROW_CHAR ((unsigned char)'-')
65
66 #include <linux/ctype.h>
67
68 /* global annotation options */
69 struct annotation_options annotate_opts;
70
71 /* Data type collection debug statistics */
72 struct annotated_data_stat ann_data_stat;
73 LIST_HEAD(ann_insn_stat);
74
75 /* Pseudo data types */
76 struct annotated_data_type stackop_type = {
77 .self = {
78 .type_name = (char *)"(stack operation)",
79 .children = LIST_HEAD_INIT(stackop_type.self.children),
80 },
81 };
82
83 struct annotated_data_type canary_type = {
84 .self = {
85 .type_name = (char *)"(stack canary)",
86 .children = LIST_HEAD_INIT(canary_type.self.children),
87 },
88 };
89
90 /* symbol histogram: key = offset << 16 | evsel->core.idx */
sym_hist_hash(long key,void * ctx __maybe_unused)91 static size_t sym_hist_hash(long key, void *ctx __maybe_unused)
92 {
93 return (key >> 16) + (key & 0xffff);
94 }
95
sym_hist_equal(long key1,long key2,void * ctx __maybe_unused)96 static bool sym_hist_equal(long key1, long key2, void *ctx __maybe_unused)
97 {
98 return key1 == key2;
99 }
100
annotated_source__new(void)101 static struct annotated_source *annotated_source__new(void)
102 {
103 struct annotated_source *src = zalloc(sizeof(*src));
104
105 if (src != NULL)
106 INIT_LIST_HEAD(&src->source);
107
108 return src;
109 }
110
annotated_source__delete(struct annotated_source * src)111 static __maybe_unused void annotated_source__delete(struct annotated_source *src)
112 {
113 struct hashmap_entry *cur;
114 size_t bkt;
115
116 if (src == NULL)
117 return;
118
119 if (src->samples) {
120 hashmap__for_each_entry(src->samples, cur, bkt)
121 zfree(&cur->pvalue);
122 hashmap__free(src->samples);
123 }
124 zfree(&src->histograms);
125 free(src);
126 }
127
annotated_source__alloc_histograms(struct annotated_source * src,int nr_hists)128 static int annotated_source__alloc_histograms(struct annotated_source *src,
129 int nr_hists)
130 {
131 src->nr_histograms = nr_hists;
132 src->histograms = calloc(nr_hists, sizeof(*src->histograms));
133
134 if (src->histograms == NULL)
135 return -1;
136
137 src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL);
138 if (src->samples == NULL)
139 zfree(&src->histograms);
140
141 return src->histograms ? 0 : -1;
142 }
143
symbol__annotate_zero_histograms(struct symbol * sym)144 void symbol__annotate_zero_histograms(struct symbol *sym)
145 {
146 struct annotation *notes = symbol__annotation(sym);
147
148 annotation__lock(notes);
149 if (notes->src != NULL) {
150 memset(notes->src->histograms, 0,
151 notes->src->nr_histograms * sizeof(*notes->src->histograms));
152 hashmap__clear(notes->src->samples);
153 }
154 if (notes->branch && notes->branch->cycles_hist) {
155 memset(notes->branch->cycles_hist, 0,
156 symbol__size(sym) * sizeof(struct cyc_hist));
157 }
158 annotation__unlock(notes);
159 }
160
__symbol__account_cycles(struct cyc_hist * ch,u64 start,unsigned offset,unsigned cycles,unsigned have_start)161 static int __symbol__account_cycles(struct cyc_hist *ch,
162 u64 start,
163 unsigned offset, unsigned cycles,
164 unsigned have_start)
165 {
166 /*
167 * For now we can only account one basic block per
168 * final jump. But multiple could be overlapping.
169 * Always account the longest one. So when
170 * a shorter one has been already seen throw it away.
171 *
172 * We separately always account the full cycles.
173 */
174 ch[offset].num_aggr++;
175 ch[offset].cycles_aggr += cycles;
176
177 if (cycles > ch[offset].cycles_max)
178 ch[offset].cycles_max = cycles;
179
180 if (ch[offset].cycles_min) {
181 if (cycles && cycles < ch[offset].cycles_min)
182 ch[offset].cycles_min = cycles;
183 } else
184 ch[offset].cycles_min = cycles;
185
186 if (!have_start && ch[offset].have_start)
187 return 0;
188 if (ch[offset].num) {
189 if (have_start && (!ch[offset].have_start ||
190 ch[offset].start > start)) {
191 ch[offset].have_start = 0;
192 ch[offset].cycles = 0;
193 ch[offset].num = 0;
194 if (ch[offset].reset < 0xffff)
195 ch[offset].reset++;
196 } else if (have_start &&
197 ch[offset].start < start)
198 return 0;
199 }
200
201 if (ch[offset].num < NUM_SPARKS)
202 ch[offset].cycles_spark[ch[offset].num] = cycles;
203
204 ch[offset].have_start = have_start;
205 ch[offset].start = start;
206 ch[offset].cycles += cycles;
207 ch[offset].num++;
208 return 0;
209 }
210
__symbol__inc_addr_samples(struct map_symbol * ms,struct annotated_source * src,struct evsel * evsel,u64 addr,struct perf_sample * sample)211 static int __symbol__inc_addr_samples(struct map_symbol *ms,
212 struct annotated_source *src, struct evsel *evsel, u64 addr,
213 struct perf_sample *sample)
214 {
215 struct symbol *sym = ms->sym;
216 long hash_key;
217 u64 offset;
218 struct sym_hist *h;
219 struct sym_hist_entry *entry;
220
221 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr));
222
223 if ((addr < sym->start || addr >= sym->end) &&
224 (addr != sym->end || sym->start != sym->end)) {
225 pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
226 __func__, __LINE__, sym->name, sym->start, addr, sym->end);
227 return -ERANGE;
228 }
229
230 offset = addr - sym->start;
231 h = annotated_source__histogram(src, evsel);
232 if (h == NULL) {
233 pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n",
234 __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC);
235 return -ENOMEM;
236 }
237
238 hash_key = offset << 16 | evsel->core.idx;
239 if (!hashmap__find(src->samples, hash_key, &entry)) {
240 entry = zalloc(sizeof(*entry));
241 if (entry == NULL)
242 return -ENOMEM;
243
244 if (hashmap__add(src->samples, hash_key, entry) < 0)
245 return -ENOMEM;
246 }
247
248 h->nr_samples++;
249 h->period += sample->period;
250 entry->nr_samples++;
251 entry->period += sample->period;
252
253 pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
254 ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n",
255 sym->start, sym->name, addr, addr - sym->start, evsel->core.idx,
256 entry->nr_samples, entry->period);
257 return 0;
258 }
259
annotation__get_branch(struct annotation * notes)260 struct annotated_branch *annotation__get_branch(struct annotation *notes)
261 {
262 if (notes == NULL)
263 return NULL;
264
265 if (notes->branch == NULL)
266 notes->branch = zalloc(sizeof(*notes->branch));
267
268 return notes->branch;
269 }
270
symbol__find_branch_hist(struct symbol * sym,unsigned int br_cntr_nr)271 static struct annotated_branch *symbol__find_branch_hist(struct symbol *sym,
272 unsigned int br_cntr_nr)
273 {
274 struct annotation *notes = symbol__annotation(sym);
275 struct annotated_branch *branch;
276 const size_t size = symbol__size(sym);
277
278 branch = annotation__get_branch(notes);
279 if (branch == NULL)
280 return NULL;
281
282 if (branch->cycles_hist == NULL) {
283 branch->cycles_hist = calloc(size, sizeof(struct cyc_hist));
284 if (!branch->cycles_hist)
285 return NULL;
286 }
287
288 if (br_cntr_nr && branch->br_cntr == NULL) {
289 branch->br_cntr = calloc(br_cntr_nr * size, sizeof(u64));
290 if (!branch->br_cntr)
291 return NULL;
292 }
293
294 return branch;
295 }
296
symbol__hists(struct symbol * sym,int nr_hists)297 struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
298 {
299 struct annotation *notes = symbol__annotation(sym);
300
301 if (notes->src == NULL) {
302 notes->src = annotated_source__new();
303 if (notes->src == NULL)
304 return NULL;
305 goto alloc_histograms;
306 }
307
308 if (notes->src->histograms == NULL) {
309 alloc_histograms:
310 annotated_source__alloc_histograms(notes->src, nr_hists);
311 }
312
313 return notes->src;
314 }
315
symbol__inc_addr_samples(struct map_symbol * ms,struct evsel * evsel,u64 addr,struct perf_sample * sample)316 static int symbol__inc_addr_samples(struct map_symbol *ms,
317 struct evsel *evsel, u64 addr,
318 struct perf_sample *sample)
319 {
320 struct symbol *sym = ms->sym;
321 struct annotated_source *src;
322
323 if (sym == NULL)
324 return 0;
325 src = symbol__hists(sym, evsel->evlist->core.nr_entries);
326 return src ? __symbol__inc_addr_samples(ms, src, evsel, addr, sample) : 0;
327 }
328
symbol__account_br_cntr(struct annotated_branch * branch,struct evsel * evsel,unsigned offset,u64 br_cntr)329 static int symbol__account_br_cntr(struct annotated_branch *branch,
330 struct evsel *evsel,
331 unsigned offset,
332 u64 br_cntr)
333 {
334 unsigned int br_cntr_nr = evsel__leader(evsel)->br_cntr_nr;
335 unsigned int base = evsel__leader(evsel)->br_cntr_idx;
336 unsigned int off = offset * evsel->evlist->nr_br_cntr;
337 u64 *branch_br_cntr = branch->br_cntr;
338 unsigned int i, mask, width;
339
340 if (!br_cntr || !branch_br_cntr)
341 return 0;
342
343 perf_env__find_br_cntr_info(evsel__env(evsel), NULL, &width);
344 mask = (1L << width) - 1;
345 for (i = 0; i < br_cntr_nr; i++) {
346 u64 cntr = (br_cntr >> i * width) & mask;
347
348 branch_br_cntr[off + i + base] += cntr;
349 if (cntr == mask)
350 branch_br_cntr[off + i + base] |= ANNOTATION__BR_CNTR_SATURATED_FLAG;
351 }
352
353 return 0;
354 }
355
symbol__account_cycles(u64 addr,u64 start,struct symbol * sym,unsigned cycles,struct evsel * evsel,u64 br_cntr)356 static int symbol__account_cycles(u64 addr, u64 start, struct symbol *sym,
357 unsigned cycles, struct evsel *evsel,
358 u64 br_cntr)
359 {
360 struct annotated_branch *branch;
361 unsigned offset;
362 int ret;
363
364 if (sym == NULL)
365 return 0;
366 branch = symbol__find_branch_hist(sym, evsel->evlist->nr_br_cntr);
367 if (!branch)
368 return -ENOMEM;
369 if (addr < sym->start || addr >= sym->end)
370 return -ERANGE;
371
372 if (start) {
373 if (start < sym->start || start >= sym->end)
374 return -ERANGE;
375 if (start >= addr)
376 start = 0;
377 }
378 offset = addr - sym->start;
379 ret = __symbol__account_cycles(branch->cycles_hist,
380 start ? start - sym->start : 0,
381 offset, cycles,
382 !!start);
383
384 if (ret)
385 return ret;
386
387 return symbol__account_br_cntr(branch, evsel, offset, br_cntr);
388 }
389
addr_map_symbol__account_cycles(struct addr_map_symbol * ams,struct addr_map_symbol * start,unsigned cycles,struct evsel * evsel,u64 br_cntr)390 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
391 struct addr_map_symbol *start,
392 unsigned cycles,
393 struct evsel *evsel,
394 u64 br_cntr)
395 {
396 u64 saddr = 0;
397 int err;
398
399 if (!cycles)
400 return 0;
401
402 /*
403 * Only set start when IPC can be computed. We can only
404 * compute it when the basic block is completely in a single
405 * function.
406 * Special case the case when the jump is elsewhere, but
407 * it starts on the function start.
408 */
409 if (start &&
410 (start->ms.sym == ams->ms.sym ||
411 (ams->ms.sym &&
412 start->addr == ams->ms.sym->start + map__start(ams->ms.map))))
413 saddr = start->al_addr;
414 if (saddr == 0)
415 pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
416 ams->addr,
417 start ? start->addr : 0,
418 ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0,
419 saddr);
420 err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles, evsel, br_cntr);
421 if (err)
422 pr_debug2("account_cycles failed %d\n", err);
423 return err;
424 }
425
annotated_source__get_line(struct annotated_source * src,s64 offset)426 struct annotation_line *annotated_source__get_line(struct annotated_source *src,
427 s64 offset)
428 {
429 struct annotation_line *al;
430
431 list_for_each_entry(al, &src->source, node) {
432 if (al->offset == offset)
433 return al;
434 }
435 return NULL;
436 }
437
annotation__count_insn(struct annotation * notes,u64 start,u64 end)438 static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end)
439 {
440 struct annotation_line *al;
441 unsigned n_insn = 0;
442
443 al = annotated_source__get_line(notes->src, start);
444 if (al == NULL)
445 return 0;
446
447 list_for_each_entry_from(al, ¬es->src->source, node) {
448 if (al->offset == -1)
449 continue;
450 if ((u64)al->offset > end)
451 break;
452 n_insn++;
453 }
454 return n_insn;
455 }
456
annotated_branch__delete(struct annotated_branch * branch)457 static void annotated_branch__delete(struct annotated_branch *branch)
458 {
459 if (branch) {
460 zfree(&branch->cycles_hist);
461 free(branch->br_cntr);
462 free(branch);
463 }
464 }
465
annotation__count_and_fill(struct annotation * notes,u64 start,u64 end,struct cyc_hist * ch)466 static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
467 {
468 unsigned n_insn;
469 unsigned int cover_insn = 0;
470
471 n_insn = annotation__count_insn(notes, start, end);
472 if (n_insn && ch->num && ch->cycles) {
473 struct annotation_line *al;
474 struct annotated_branch *branch;
475 float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
476
477 /* Hide data when there are too many overlaps. */
478 if (ch->reset >= 0x7fff)
479 return;
480
481 al = annotated_source__get_line(notes->src, start);
482 if (al == NULL)
483 return;
484
485 list_for_each_entry_from(al, ¬es->src->source, node) {
486 if (al->offset == -1)
487 continue;
488 if ((u64)al->offset > end)
489 break;
490 if (al->cycles && al->cycles->ipc == 0.0) {
491 al->cycles->ipc = ipc;
492 cover_insn++;
493 }
494 }
495
496 branch = annotation__get_branch(notes);
497 if (cover_insn && branch) {
498 branch->hit_cycles += ch->cycles;
499 branch->hit_insn += n_insn * ch->num;
500 branch->cover_insn += cover_insn;
501 }
502 }
503 }
504
annotation__compute_ipc(struct annotation * notes,size_t size,struct evsel * evsel)505 static int annotation__compute_ipc(struct annotation *notes, size_t size,
506 struct evsel *evsel)
507 {
508 unsigned int br_cntr_nr = evsel->evlist->nr_br_cntr;
509 int err = 0;
510 s64 offset;
511
512 if (!notes->branch || !notes->branch->cycles_hist)
513 return 0;
514
515 notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1);
516 notes->branch->hit_cycles = 0;
517 notes->branch->hit_insn = 0;
518 notes->branch->cover_insn = 0;
519
520 annotation__lock(notes);
521 for (offset = size - 1; offset >= 0; --offset) {
522 struct cyc_hist *ch;
523
524 ch = ¬es->branch->cycles_hist[offset];
525 if (ch && ch->cycles) {
526 struct annotation_line *al;
527
528 al = annotated_source__get_line(notes->src, offset);
529 if (al && al->cycles == NULL) {
530 al->cycles = zalloc(sizeof(*al->cycles));
531 if (al->cycles == NULL) {
532 err = ENOMEM;
533 break;
534 }
535 }
536 if (ch->have_start)
537 annotation__count_and_fill(notes, ch->start, offset, ch);
538 if (al && ch->num_aggr) {
539 al->cycles->avg = ch->cycles_aggr / ch->num_aggr;
540 al->cycles->max = ch->cycles_max;
541 al->cycles->min = ch->cycles_min;
542 }
543 if (al && notes->branch->br_cntr) {
544 if (!al->br_cntr) {
545 al->br_cntr = calloc(br_cntr_nr, sizeof(u64));
546 if (!al->br_cntr) {
547 err = ENOMEM;
548 break;
549 }
550 }
551 al->num_aggr = ch->num_aggr;
552 al->br_cntr_nr = br_cntr_nr;
553 al->evsel = evsel;
554 memcpy(al->br_cntr, ¬es->branch->br_cntr[offset * br_cntr_nr],
555 br_cntr_nr * sizeof(u64));
556 }
557 }
558 }
559
560 if (err) {
561 while (++offset < (s64)size) {
562 struct cyc_hist *ch = ¬es->branch->cycles_hist[offset];
563
564 if (ch && ch->cycles) {
565 struct annotation_line *al;
566
567 al = annotated_source__get_line(notes->src, offset);
568 if (al) {
569 zfree(&al->cycles);
570 zfree(&al->br_cntr);
571 }
572 }
573 }
574 }
575
576 annotation__unlock(notes);
577 return 0;
578 }
579
addr_map_symbol__inc_samples(struct addr_map_symbol * ams,struct perf_sample * sample,struct evsel * evsel)580 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
581 struct evsel *evsel)
582 {
583 return symbol__inc_addr_samples(&ams->ms, evsel, ams->al_addr, sample);
584 }
585
hist_entry__inc_addr_samples(struct hist_entry * he,struct perf_sample * sample,struct evsel * evsel,u64 ip)586 int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
587 struct evsel *evsel, u64 ip)
588 {
589 return symbol__inc_addr_samples(&he->ms, evsel, ip, sample);
590 }
591
592
annotation__exit(struct annotation * notes)593 void annotation__exit(struct annotation *notes)
594 {
595 annotated_source__delete(notes->src);
596 annotated_branch__delete(notes->branch);
597 }
598
599 static struct sharded_mutex *sharded_mutex;
600
annotation__init_sharded_mutex(void)601 static void annotation__init_sharded_mutex(void)
602 {
603 /* As many mutexes as there are CPUs. */
604 sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu);
605 }
606
annotation__hash(const struct annotation * notes)607 static size_t annotation__hash(const struct annotation *notes)
608 {
609 return (size_t)notes;
610 }
611
annotation__get_mutex(const struct annotation * notes)612 static struct mutex *annotation__get_mutex(const struct annotation *notes)
613 {
614 static pthread_once_t once = PTHREAD_ONCE_INIT;
615
616 pthread_once(&once, annotation__init_sharded_mutex);
617 if (!sharded_mutex)
618 return NULL;
619
620 return sharded_mutex__get_mutex(sharded_mutex, annotation__hash(notes));
621 }
622
annotation__lock(struct annotation * notes)623 void annotation__lock(struct annotation *notes)
624 NO_THREAD_SAFETY_ANALYSIS
625 {
626 struct mutex *mutex = annotation__get_mutex(notes);
627
628 if (mutex)
629 mutex_lock(mutex);
630 }
631
annotation__unlock(struct annotation * notes)632 void annotation__unlock(struct annotation *notes)
633 NO_THREAD_SAFETY_ANALYSIS
634 {
635 struct mutex *mutex = annotation__get_mutex(notes);
636
637 if (mutex)
638 mutex_unlock(mutex);
639 }
640
annotation__trylock(struct annotation * notes)641 bool annotation__trylock(struct annotation *notes)
642 {
643 struct mutex *mutex = annotation__get_mutex(notes);
644
645 if (!mutex)
646 return false;
647
648 return mutex_trylock(mutex);
649 }
650
annotation_line__add(struct annotation_line * al,struct list_head * head)651 void annotation_line__add(struct annotation_line *al, struct list_head *head)
652 {
653 list_add_tail(&al->node, head);
654 }
655
656 struct annotation_line *
annotation_line__next(struct annotation_line * pos,struct list_head * head)657 annotation_line__next(struct annotation_line *pos, struct list_head *head)
658 {
659 list_for_each_entry_continue(pos, head, node)
660 if (pos->offset >= 0)
661 return pos;
662
663 return NULL;
664 }
665
annotate__address_color(struct block_range * br)666 static const char *annotate__address_color(struct block_range *br)
667 {
668 double cov = block_range__coverage(br);
669
670 if (cov >= 0) {
671 /* mark red for >75% coverage */
672 if (cov > 0.75)
673 return PERF_COLOR_RED;
674
675 /* mark dull for <1% coverage */
676 if (cov < 0.01)
677 return PERF_COLOR_NORMAL;
678 }
679
680 return PERF_COLOR_MAGENTA;
681 }
682
annotate__asm_color(struct block_range * br)683 static const char *annotate__asm_color(struct block_range *br)
684 {
685 double cov = block_range__coverage(br);
686
687 if (cov >= 0) {
688 /* mark dull for <1% coverage */
689 if (cov < 0.01)
690 return PERF_COLOR_NORMAL;
691 }
692
693 return PERF_COLOR_BLUE;
694 }
695
annotate__branch_printf(struct block_range * br,u64 addr)696 static void annotate__branch_printf(struct block_range *br, u64 addr)
697 {
698 bool emit_comment = true;
699
700 if (!br)
701 return;
702
703 #if 1
704 if (br->is_target && br->start == addr) {
705 struct block_range *branch = br;
706 double p;
707
708 /*
709 * Find matching branch to our target.
710 */
711 while (!branch->is_branch)
712 branch = block_range__next(branch);
713
714 p = 100 *(double)br->entry / branch->coverage;
715
716 if (p > 0.1) {
717 if (emit_comment) {
718 emit_comment = false;
719 printf("\t#");
720 }
721
722 /*
723 * The percentage of coverage joined at this target in relation
724 * to the next branch.
725 */
726 printf(" +%.2f%%", p);
727 }
728 }
729 #endif
730 if (br->is_branch && br->end == addr) {
731 double p = 100*(double)br->taken / br->coverage;
732
733 if (p > 0.1) {
734 if (emit_comment) {
735 emit_comment = false;
736 printf("\t#");
737 }
738
739 /*
740 * The percentage of coverage leaving at this branch, and
741 * its prediction ratio.
742 */
743 printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken);
744 }
745 }
746 }
747
disasm_line__print(struct disasm_line * dl,u64 start,int addr_fmt_width)748 static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width)
749 {
750 s64 offset = dl->al.offset;
751 const u64 addr = start + offset;
752 struct block_range *br;
753
754 br = block_range__find(addr);
755 color_fprintf(stdout, annotate__address_color(br), " %*" PRIx64 ":", addr_fmt_width, addr);
756 color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line);
757 annotate__branch_printf(br, addr);
758 return 0;
759 }
760
761 static int
annotation_line__print(struct annotation_line * al,struct symbol * sym,u64 start,struct evsel * evsel,u64 len,int min_pcnt,int printed,int max_lines,struct annotation_line * queue,int addr_fmt_width,int percent_type)762 annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start,
763 struct evsel *evsel, u64 len, int min_pcnt, int printed,
764 int max_lines, struct annotation_line *queue, int addr_fmt_width,
765 int percent_type)
766 {
767 struct disasm_line *dl = container_of(al, struct disasm_line, al);
768 struct annotation *notes = symbol__annotation(sym);
769 static const char *prev_line;
770
771 if (al->offset != -1) {
772 double max_percent = 0.0;
773 int i, nr_percent = 1;
774 const char *color;
775
776 for (i = 0; i < al->data_nr; i++) {
777 double percent;
778
779 percent = annotation_data__percent(&al->data[i],
780 percent_type);
781
782 if (percent > max_percent)
783 max_percent = percent;
784 }
785
786 if (al->data_nr > nr_percent)
787 nr_percent = al->data_nr;
788
789 if (max_percent < min_pcnt)
790 return -1;
791
792 if (max_lines && printed >= max_lines)
793 return 1;
794
795 if (queue != NULL) {
796 list_for_each_entry_from(queue, ¬es->src->source, node) {
797 if (queue == al)
798 break;
799 annotation_line__print(queue, sym, start, evsel, len,
800 0, 0, 1, NULL, addr_fmt_width,
801 percent_type);
802 }
803 }
804
805 color = get_percent_color(max_percent);
806
807 for (i = 0; i < nr_percent; i++) {
808 struct annotation_data *data = &al->data[i];
809 double percent;
810
811 percent = annotation_data__percent(data, percent_type);
812 color = get_percent_color(percent);
813
814 if (symbol_conf.show_total_period)
815 color_fprintf(stdout, color, " %11" PRIu64,
816 data->he.period);
817 else if (symbol_conf.show_nr_samples)
818 color_fprintf(stdout, color, " %7" PRIu64,
819 data->he.nr_samples);
820 else
821 color_fprintf(stdout, color, " %7.2f", percent);
822 }
823
824 printf(" : ");
825
826 disasm_line__print(dl, start, addr_fmt_width);
827
828 /*
829 * Also color the filename and line if needed, with
830 * the same color than the percentage. Don't print it
831 * twice for close colored addr with the same filename:line
832 */
833 if (al->path) {
834 if (!prev_line || strcmp(prev_line, al->path)) {
835 color_fprintf(stdout, color, " // %s", al->path);
836 prev_line = al->path;
837 }
838 }
839
840 printf("\n");
841 } else if (max_lines && printed >= max_lines)
842 return 1;
843 else {
844 int width = annotation__pcnt_width(notes);
845
846 if (queue)
847 return -1;
848
849 if (!*al->line)
850 printf(" %*s:\n", width, " ");
851 else
852 printf(" %*s: %-*d %s\n", width, " ", addr_fmt_width, al->line_nr, al->line);
853 }
854
855 return 0;
856 }
857
calc_percent(struct annotation * notes,struct evsel * evsel,struct annotation_data * data,s64 offset,s64 end)858 static void calc_percent(struct annotation *notes,
859 struct evsel *evsel,
860 struct annotation_data *data,
861 s64 offset, s64 end)
862 {
863 struct hists *hists = evsel__hists(evsel);
864 struct sym_hist *sym_hist = annotation__histogram(notes, evsel);
865 unsigned int hits = 0;
866 u64 period = 0;
867
868 while (offset < end) {
869 struct sym_hist_entry *entry;
870
871 entry = annotated_source__hist_entry(notes->src, evsel, offset);
872 if (entry) {
873 hits += entry->nr_samples;
874 period += entry->period;
875 }
876 ++offset;
877 }
878
879 if (sym_hist->nr_samples) {
880 data->he.period = period;
881 data->he.nr_samples = hits;
882 data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples;
883 }
884
885 if (hists->stats.nr_non_filtered_samples)
886 data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples;
887
888 if (sym_hist->period)
889 data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period;
890
891 if (hists->stats.total_period)
892 data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period;
893 }
894
annotation__calc_percent(struct annotation * notes,struct evsel * leader,s64 len)895 static void annotation__calc_percent(struct annotation *notes,
896 struct evsel *leader, s64 len)
897 {
898 struct annotation_line *al, *next;
899 struct evsel *evsel;
900
901 list_for_each_entry(al, ¬es->src->source, node) {
902 s64 end;
903 int i = 0;
904
905 if (al->offset == -1)
906 continue;
907
908 next = annotation_line__next(al, ¬es->src->source);
909 end = next ? next->offset : len;
910
911 for_each_group_evsel(evsel, leader) {
912 struct annotation_data *data;
913
914 BUG_ON(i >= al->data_nr);
915
916 if (symbol_conf.skip_empty &&
917 evsel__hists(evsel)->stats.nr_samples == 0)
918 continue;
919
920 data = &al->data[i++];
921
922 calc_percent(notes, evsel, data, al->offset, end);
923 }
924 }
925 }
926
symbol__calc_percent(struct symbol * sym,struct evsel * evsel)927 void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
928 {
929 struct annotation *notes = symbol__annotation(sym);
930
931 annotation__calc_percent(notes, evsel, symbol__size(sym));
932 }
933
evsel__get_arch(struct evsel * evsel,struct arch ** parch)934 static int evsel__get_arch(struct evsel *evsel, struct arch **parch)
935 {
936 struct perf_env *env = evsel__env(evsel);
937 const char *arch_name = perf_env__arch(env);
938 struct arch *arch;
939 int err;
940
941 if (!arch_name) {
942 *parch = NULL;
943 return errno;
944 }
945
946 *parch = arch = arch__find(arch_name);
947 if (arch == NULL) {
948 pr_err("%s: unsupported arch %s\n", __func__, arch_name);
949 return ENOTSUP;
950 }
951
952 if (arch->init) {
953 err = arch->init(arch, env ? env->cpuid : NULL);
954 if (err) {
955 pr_err("%s: failed to initialize %s arch priv area\n",
956 __func__, arch->name);
957 return err;
958 }
959 }
960 return 0;
961 }
962
symbol__annotate(struct map_symbol * ms,struct evsel * evsel,struct arch ** parch)963 int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
964 struct arch **parch)
965 {
966 struct symbol *sym = ms->sym;
967 struct annotation *notes = symbol__annotation(sym);
968 struct annotate_args args = {
969 .evsel = evsel,
970 .options = &annotate_opts,
971 };
972 struct arch *arch = NULL;
973 int err, nr;
974
975 err = evsel__get_arch(evsel, &arch);
976 if (err < 0)
977 return err;
978
979 if (parch)
980 *parch = arch;
981
982 if (notes->src && !list_empty(¬es->src->source))
983 return 0;
984
985 args.arch = arch;
986 args.ms = *ms;
987
988 if (notes->src == NULL) {
989 notes->src = annotated_source__new();
990 if (notes->src == NULL)
991 return -1;
992 }
993
994 nr = 0;
995 if (evsel__is_group_event(evsel)) {
996 struct evsel *pos;
997
998 for_each_group_evsel(pos, evsel) {
999 if (symbol_conf.skip_empty &&
1000 evsel__hists(pos)->stats.nr_samples == 0)
1001 continue;
1002 nr++;
1003 }
1004 }
1005 notes->src->nr_events = nr ? nr : 1;
1006
1007 if (annotate_opts.full_addr)
1008 notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
1009 else
1010 notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
1011
1012 return symbol__disassemble(sym, &args);
1013 }
1014
insert_source_line(struct rb_root * root,struct annotation_line * al)1015 static void insert_source_line(struct rb_root *root, struct annotation_line *al)
1016 {
1017 struct annotation_line *iter;
1018 struct rb_node **p = &root->rb_node;
1019 struct rb_node *parent = NULL;
1020 unsigned int percent_type = annotate_opts.percent_type;
1021 int i, ret;
1022
1023 while (*p != NULL) {
1024 parent = *p;
1025 iter = rb_entry(parent, struct annotation_line, rb_node);
1026
1027 ret = strcmp(iter->path, al->path);
1028 if (ret == 0) {
1029 for (i = 0; i < al->data_nr; i++) {
1030 iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
1031 percent_type);
1032 }
1033 return;
1034 }
1035
1036 if (ret < 0)
1037 p = &(*p)->rb_left;
1038 else
1039 p = &(*p)->rb_right;
1040 }
1041
1042 for (i = 0; i < al->data_nr; i++) {
1043 al->data[i].percent_sum = annotation_data__percent(&al->data[i],
1044 percent_type);
1045 }
1046
1047 rb_link_node(&al->rb_node, parent, p);
1048 rb_insert_color(&al->rb_node, root);
1049 }
1050
cmp_source_line(struct annotation_line * a,struct annotation_line * b)1051 static int cmp_source_line(struct annotation_line *a, struct annotation_line *b)
1052 {
1053 int i;
1054
1055 for (i = 0; i < a->data_nr; i++) {
1056 if (a->data[i].percent_sum == b->data[i].percent_sum)
1057 continue;
1058 return a->data[i].percent_sum > b->data[i].percent_sum;
1059 }
1060
1061 return 0;
1062 }
1063
__resort_source_line(struct rb_root * root,struct annotation_line * al)1064 static void __resort_source_line(struct rb_root *root, struct annotation_line *al)
1065 {
1066 struct annotation_line *iter;
1067 struct rb_node **p = &root->rb_node;
1068 struct rb_node *parent = NULL;
1069
1070 while (*p != NULL) {
1071 parent = *p;
1072 iter = rb_entry(parent, struct annotation_line, rb_node);
1073
1074 if (cmp_source_line(al, iter))
1075 p = &(*p)->rb_left;
1076 else
1077 p = &(*p)->rb_right;
1078 }
1079
1080 rb_link_node(&al->rb_node, parent, p);
1081 rb_insert_color(&al->rb_node, root);
1082 }
1083
resort_source_line(struct rb_root * dest_root,struct rb_root * src_root)1084 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
1085 {
1086 struct annotation_line *al;
1087 struct rb_node *node;
1088
1089 node = rb_first(src_root);
1090 while (node) {
1091 struct rb_node *next;
1092
1093 al = rb_entry(node, struct annotation_line, rb_node);
1094 next = rb_next(node);
1095 rb_erase(node, src_root);
1096
1097 __resort_source_line(dest_root, al);
1098 node = next;
1099 }
1100 }
1101
print_summary(struct rb_root * root,const char * filename)1102 static void print_summary(struct rb_root *root, const char *filename)
1103 {
1104 struct annotation_line *al;
1105 struct rb_node *node;
1106
1107 printf("\nSorted summary for file %s\n", filename);
1108 printf("----------------------------------------------\n\n");
1109
1110 if (RB_EMPTY_ROOT(root)) {
1111 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
1112 return;
1113 }
1114
1115 node = rb_first(root);
1116 while (node) {
1117 double percent, percent_max = 0.0;
1118 const char *color;
1119 char *path;
1120 int i;
1121
1122 al = rb_entry(node, struct annotation_line, rb_node);
1123 for (i = 0; i < al->data_nr; i++) {
1124 percent = al->data[i].percent_sum;
1125 color = get_percent_color(percent);
1126 color_fprintf(stdout, color, " %7.2f", percent);
1127
1128 if (percent > percent_max)
1129 percent_max = percent;
1130 }
1131
1132 path = al->path;
1133 color = get_percent_color(percent_max);
1134 color_fprintf(stdout, color, " %s\n", path);
1135
1136 node = rb_next(node);
1137 }
1138 }
1139
symbol__annotate_hits(struct symbol * sym,struct evsel * evsel)1140 static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel)
1141 {
1142 struct annotation *notes = symbol__annotation(sym);
1143 struct sym_hist *h = annotation__histogram(notes, evsel);
1144 u64 len = symbol__size(sym), offset;
1145
1146 for (offset = 0; offset < len; ++offset) {
1147 struct sym_hist_entry *entry;
1148
1149 entry = annotated_source__hist_entry(notes->src, evsel, offset);
1150 if (entry && entry->nr_samples != 0)
1151 printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
1152 sym->start + offset, entry->nr_samples);
1153 }
1154 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
1155 }
1156
annotated_source__addr_fmt_width(struct list_head * lines,u64 start)1157 static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
1158 {
1159 char bf[32];
1160 struct annotation_line *line;
1161
1162 list_for_each_entry_reverse(line, lines, node) {
1163 if (line->offset != -1)
1164 return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset);
1165 }
1166
1167 return 0;
1168 }
1169
symbol__annotate_printf(struct map_symbol * ms,struct evsel * evsel)1170 int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel)
1171 {
1172 struct map *map = ms->map;
1173 struct symbol *sym = ms->sym;
1174 struct dso *dso = map__dso(map);
1175 char *filename;
1176 const char *d_filename;
1177 const char *evsel_name = evsel__name(evsel);
1178 struct annotation *notes = symbol__annotation(sym);
1179 struct sym_hist *h = annotation__histogram(notes, evsel);
1180 struct annotation_line *pos, *queue = NULL;
1181 struct annotation_options *opts = &annotate_opts;
1182 u64 start = map__rip_2objdump(map, sym->start);
1183 int printed = 2, queue_len = 0, addr_fmt_width;
1184 int more = 0;
1185 bool context = opts->context;
1186 u64 len;
1187 int width = annotation__pcnt_width(notes);
1188 int graph_dotted_len;
1189 char buf[512];
1190
1191 filename = strdup(dso__long_name(dso));
1192 if (!filename)
1193 return -ENOMEM;
1194
1195 if (opts->full_path)
1196 d_filename = filename;
1197 else
1198 d_filename = basename(filename);
1199
1200 len = symbol__size(sym);
1201
1202 if (evsel__is_group_event(evsel)) {
1203 evsel__group_desc(evsel, buf, sizeof(buf));
1204 evsel_name = buf;
1205 }
1206
1207 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples, "
1208 "percent: %s)\n",
1209 width, width, symbol_conf.show_total_period ? "Period" :
1210 symbol_conf.show_nr_samples ? "Samples" : "Percent",
1211 d_filename, evsel_name, h->nr_samples,
1212 percent_type_str(opts->percent_type));
1213
1214 printf("%-*.*s----\n",
1215 graph_dotted_len, graph_dotted_len, graph_dotted_line);
1216
1217 if (verbose > 0)
1218 symbol__annotate_hits(sym, evsel);
1219
1220 addr_fmt_width = annotated_source__addr_fmt_width(¬es->src->source, start);
1221
1222 list_for_each_entry(pos, ¬es->src->source, node) {
1223 int err;
1224
1225 if (context && queue == NULL) {
1226 queue = pos;
1227 queue_len = 0;
1228 }
1229
1230 err = annotation_line__print(pos, sym, start, evsel, len,
1231 opts->min_pcnt, printed, opts->max_lines,
1232 queue, addr_fmt_width, opts->percent_type);
1233
1234 switch (err) {
1235 case 0:
1236 ++printed;
1237 if (context) {
1238 printed += queue_len;
1239 queue = NULL;
1240 queue_len = 0;
1241 }
1242 break;
1243 case 1:
1244 /* filtered by max_lines */
1245 ++more;
1246 break;
1247 case -1:
1248 default:
1249 /*
1250 * Filtered by min_pcnt or non IP lines when
1251 * context != 0
1252 */
1253 if (!context)
1254 break;
1255 if (queue_len == context)
1256 queue = list_entry(queue->node.next, typeof(*queue), node);
1257 else
1258 ++queue_len;
1259 break;
1260 }
1261 }
1262
1263 free(filename);
1264
1265 return more;
1266 }
1267
FILE__set_percent_color(void * fp __maybe_unused,double percent __maybe_unused,bool current __maybe_unused)1268 static void FILE__set_percent_color(void *fp __maybe_unused,
1269 double percent __maybe_unused,
1270 bool current __maybe_unused)
1271 {
1272 }
1273
FILE__set_jumps_percent_color(void * fp __maybe_unused,int nr __maybe_unused,bool current __maybe_unused)1274 static int FILE__set_jumps_percent_color(void *fp __maybe_unused,
1275 int nr __maybe_unused, bool current __maybe_unused)
1276 {
1277 return 0;
1278 }
1279
FILE__set_color(void * fp __maybe_unused,int color __maybe_unused)1280 static int FILE__set_color(void *fp __maybe_unused, int color __maybe_unused)
1281 {
1282 return 0;
1283 }
1284
FILE__printf(void * fp,const char * fmt,...)1285 static void FILE__printf(void *fp, const char *fmt, ...)
1286 {
1287 va_list args;
1288
1289 va_start(args, fmt);
1290 vfprintf(fp, fmt, args);
1291 va_end(args);
1292 }
1293
FILE__write_graph(void * fp,int graph)1294 static void FILE__write_graph(void *fp, int graph)
1295 {
1296 const char *s;
1297 switch (graph) {
1298
1299 case DARROW_CHAR: s = "↓"; break;
1300 case UARROW_CHAR: s = "↑"; break;
1301 case LARROW_CHAR: s = "←"; break;
1302 case RARROW_CHAR: s = "→"; break;
1303 default: s = "?"; break;
1304 }
1305
1306 fputs(s, fp);
1307 }
1308
symbol__annotate_fprintf2(struct symbol * sym,FILE * fp)1309 static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp)
1310 {
1311 struct annotation *notes = symbol__annotation(sym);
1312 struct annotation_write_ops wops = {
1313 .first_line = true,
1314 .obj = fp,
1315 .set_color = FILE__set_color,
1316 .set_percent_color = FILE__set_percent_color,
1317 .set_jumps_percent_color = FILE__set_jumps_percent_color,
1318 .printf = FILE__printf,
1319 .write_graph = FILE__write_graph,
1320 };
1321 struct annotation_line *al;
1322
1323 list_for_each_entry(al, ¬es->src->source, node) {
1324 if (annotation_line__filter(al))
1325 continue;
1326 annotation_line__write(al, notes, &wops);
1327 fputc('\n', fp);
1328 wops.first_line = false;
1329 }
1330
1331 return 0;
1332 }
1333
map_symbol__annotation_dump(struct map_symbol * ms,struct evsel * evsel)1334 int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel)
1335 {
1336 const char *ev_name = evsel__name(evsel);
1337 char buf[1024];
1338 char *filename;
1339 int err = -1;
1340 FILE *fp;
1341
1342 if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0)
1343 return -1;
1344
1345 fp = fopen(filename, "w");
1346 if (fp == NULL)
1347 goto out_free_filename;
1348
1349 if (evsel__is_group_event(evsel)) {
1350 evsel__group_desc(evsel, buf, sizeof(buf));
1351 ev_name = buf;
1352 }
1353
1354 fprintf(fp, "%s() %s\nEvent: %s\n\n",
1355 ms->sym->name, dso__long_name(map__dso(ms->map)), ev_name);
1356 symbol__annotate_fprintf2(ms->sym, fp);
1357
1358 fclose(fp);
1359 err = 0;
1360 out_free_filename:
1361 free(filename);
1362 return err;
1363 }
1364
symbol__annotate_zero_histogram(struct symbol * sym,struct evsel * evsel)1365 void symbol__annotate_zero_histogram(struct symbol *sym, struct evsel *evsel)
1366 {
1367 struct annotation *notes = symbol__annotation(sym);
1368 struct sym_hist *h = annotation__histogram(notes, evsel);
1369
1370 memset(h, 0, sizeof(*notes->src->histograms) * notes->src->nr_histograms);
1371 }
1372
symbol__annotate_decay_histogram(struct symbol * sym,struct evsel * evsel)1373 void symbol__annotate_decay_histogram(struct symbol *sym, struct evsel *evsel)
1374 {
1375 struct annotation *notes = symbol__annotation(sym);
1376 struct sym_hist *h = annotation__histogram(notes, evsel);
1377 struct annotation_line *al;
1378
1379 h->nr_samples = 0;
1380 list_for_each_entry(al, ¬es->src->source, node) {
1381 struct sym_hist_entry *entry;
1382
1383 if (al->offset == -1)
1384 continue;
1385
1386 entry = annotated_source__hist_entry(notes->src, evsel, al->offset);
1387 if (entry == NULL)
1388 continue;
1389
1390 entry->nr_samples = entry->nr_samples * 7 / 8;
1391 h->nr_samples += entry->nr_samples;
1392 }
1393 }
1394
annotated_source__purge(struct annotated_source * as)1395 void annotated_source__purge(struct annotated_source *as)
1396 {
1397 struct annotation_line *al, *n;
1398
1399 list_for_each_entry_safe(al, n, &as->source, node) {
1400 list_del_init(&al->node);
1401 disasm_line__free(disasm_line(al));
1402 }
1403 }
1404
disasm_line__fprintf(struct disasm_line * dl,FILE * fp)1405 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
1406 {
1407 size_t printed;
1408
1409 if (dl->al.offset == -1)
1410 return fprintf(fp, "%s\n", dl->al.line);
1411
1412 printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name);
1413
1414 if (dl->ops.raw[0] != '\0') {
1415 printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
1416 dl->ops.raw);
1417 }
1418
1419 return printed + fprintf(fp, "\n");
1420 }
1421
disasm__fprintf(struct list_head * head,FILE * fp)1422 size_t disasm__fprintf(struct list_head *head, FILE *fp)
1423 {
1424 struct disasm_line *pos;
1425 size_t printed = 0;
1426
1427 list_for_each_entry(pos, head, al.node)
1428 printed += disasm_line__fprintf(pos, fp);
1429
1430 return printed;
1431 }
1432
disasm_line__is_valid_local_jump(struct disasm_line * dl,struct symbol * sym)1433 bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym)
1434 {
1435 if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins) ||
1436 !disasm_line__has_local_offset(dl) || dl->ops.target.offset < 0 ||
1437 dl->ops.target.offset >= (s64)symbol__size(sym))
1438 return false;
1439
1440 return true;
1441 }
1442
1443 static void
annotation__mark_jump_targets(struct annotation * notes,struct symbol * sym)1444 annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
1445 {
1446 struct annotation_line *al;
1447
1448 /* PLT symbols contain external offsets */
1449 if (strstr(sym->name, "@plt"))
1450 return;
1451
1452 list_for_each_entry(al, ¬es->src->source, node) {
1453 struct disasm_line *dl;
1454 struct annotation_line *target;
1455
1456 dl = disasm_line(al);
1457
1458 if (!disasm_line__is_valid_local_jump(dl, sym))
1459 continue;
1460
1461 target = annotated_source__get_line(notes->src,
1462 dl->ops.target.offset);
1463 /*
1464 * FIXME: Oops, no jump target? Buggy disassembler? Or do we
1465 * have to adjust to the previous offset?
1466 */
1467 if (target == NULL)
1468 continue;
1469
1470 if (++target->jump_sources > notes->src->max_jump_sources)
1471 notes->src->max_jump_sources = target->jump_sources;
1472 }
1473 }
1474
annotation__set_index(struct annotation * notes)1475 static void annotation__set_index(struct annotation *notes)
1476 {
1477 struct annotation_line *al;
1478 struct annotated_source *src = notes->src;
1479
1480 src->widths.max_line_len = 0;
1481 src->nr_entries = 0;
1482 src->nr_asm_entries = 0;
1483
1484 list_for_each_entry(al, &src->source, node) {
1485 size_t line_len = strlen(al->line);
1486
1487 if (src->widths.max_line_len < line_len)
1488 src->widths.max_line_len = line_len;
1489 al->idx = src->nr_entries++;
1490 if (al->offset != -1)
1491 al->idx_asm = src->nr_asm_entries++;
1492 else
1493 al->idx_asm = -1;
1494 }
1495 }
1496
width_jumps(int n)1497 static inline int width_jumps(int n)
1498 {
1499 if (n >= 100)
1500 return 5;
1501 if (n / 10)
1502 return 2;
1503 return 1;
1504 }
1505
annotation__max_ins_name(struct annotation * notes)1506 static int annotation__max_ins_name(struct annotation *notes)
1507 {
1508 int max_name = 0, len;
1509 struct annotation_line *al;
1510
1511 list_for_each_entry(al, ¬es->src->source, node) {
1512 if (al->offset == -1)
1513 continue;
1514
1515 len = strlen(disasm_line(al)->ins.name);
1516 if (max_name < len)
1517 max_name = len;
1518 }
1519
1520 return max_name;
1521 }
1522
1523 static void
annotation__init_column_widths(struct annotation * notes,struct symbol * sym)1524 annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
1525 {
1526 notes->src->widths.addr = notes->src->widths.target =
1527 notes->src->widths.min_addr = hex_width(symbol__size(sym));
1528 notes->src->widths.max_addr = hex_width(sym->end);
1529 notes->src->widths.jumps = width_jumps(notes->src->max_jump_sources);
1530 notes->src->widths.max_ins_name = annotation__max_ins_name(notes);
1531 }
1532
annotation__update_column_widths(struct annotation * notes)1533 void annotation__update_column_widths(struct annotation *notes)
1534 {
1535 if (annotate_opts.use_offset)
1536 notes->src->widths.target = notes->src->widths.min_addr;
1537 else if (annotate_opts.full_addr)
1538 notes->src->widths.target = BITS_PER_LONG / 4;
1539 else
1540 notes->src->widths.target = notes->src->widths.max_addr;
1541
1542 notes->src->widths.addr = notes->src->widths.target;
1543
1544 if (annotate_opts.show_nr_jumps)
1545 notes->src->widths.addr += notes->src->widths.jumps + 1;
1546 }
1547
annotation__toggle_full_addr(struct annotation * notes,struct map_symbol * ms)1548 void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms)
1549 {
1550 annotate_opts.full_addr = !annotate_opts.full_addr;
1551
1552 if (annotate_opts.full_addr)
1553 notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
1554 else
1555 notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
1556
1557 annotation__update_column_widths(notes);
1558 }
1559
annotation__calc_lines(struct annotation * notes,struct map_symbol * ms,struct rb_root * root)1560 static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms,
1561 struct rb_root *root)
1562 {
1563 struct annotation_line *al;
1564 struct rb_root tmp_root = RB_ROOT;
1565
1566 list_for_each_entry(al, ¬es->src->source, node) {
1567 double percent_max = 0.0;
1568 u64 addr;
1569 int i;
1570
1571 for (i = 0; i < al->data_nr; i++) {
1572 double percent;
1573
1574 percent = annotation_data__percent(&al->data[i],
1575 annotate_opts.percent_type);
1576
1577 if (percent > percent_max)
1578 percent_max = percent;
1579 }
1580
1581 if (percent_max <= 0.5)
1582 continue;
1583
1584 addr = map__rip_2objdump(ms->map, ms->sym->start);
1585 al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL,
1586 false, true, ms->sym->start + al->offset);
1587 insert_source_line(&tmp_root, al);
1588 }
1589
1590 resort_source_line(root, &tmp_root);
1591 }
1592
symbol__calc_lines(struct map_symbol * ms,struct rb_root * root)1593 static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root)
1594 {
1595 struct annotation *notes = symbol__annotation(ms->sym);
1596
1597 annotation__calc_lines(notes, ms, root);
1598 }
1599
symbol__tty_annotate2(struct map_symbol * ms,struct evsel * evsel)1600 int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel)
1601 {
1602 struct dso *dso = map__dso(ms->map);
1603 struct symbol *sym = ms->sym;
1604 struct rb_root source_line = RB_ROOT;
1605 struct hists *hists = evsel__hists(evsel);
1606 char buf[1024];
1607 int err;
1608
1609 err = symbol__annotate2(ms, evsel, NULL);
1610 if (err) {
1611 char msg[BUFSIZ];
1612
1613 dso__set_annotate_warned(dso);
1614 symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
1615 ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
1616 return -1;
1617 }
1618
1619 if (annotate_opts.print_lines) {
1620 srcline_full_filename = annotate_opts.full_path;
1621 symbol__calc_lines(ms, &source_line);
1622 print_summary(&source_line, dso__long_name(dso));
1623 }
1624
1625 hists__scnprintf_title(hists, buf, sizeof(buf));
1626 fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
1627 buf, percent_type_str(annotate_opts.percent_type), sym->name, dso__long_name(dso));
1628 symbol__annotate_fprintf2(sym, stdout);
1629
1630 annotated_source__purge(symbol__annotation(sym)->src);
1631
1632 return 0;
1633 }
1634
symbol__tty_annotate(struct map_symbol * ms,struct evsel * evsel)1635 int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel)
1636 {
1637 struct dso *dso = map__dso(ms->map);
1638 struct symbol *sym = ms->sym;
1639 struct rb_root source_line = RB_ROOT;
1640 int err;
1641
1642 err = symbol__annotate(ms, evsel, NULL);
1643 if (err) {
1644 char msg[BUFSIZ];
1645
1646 dso__set_annotate_warned(dso);
1647 symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
1648 ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
1649 return -1;
1650 }
1651
1652 symbol__calc_percent(sym, evsel);
1653
1654 if (annotate_opts.print_lines) {
1655 srcline_full_filename = annotate_opts.full_path;
1656 symbol__calc_lines(ms, &source_line);
1657 print_summary(&source_line, dso__long_name(dso));
1658 }
1659
1660 symbol__annotate_printf(ms, evsel);
1661
1662 annotated_source__purge(symbol__annotation(sym)->src);
1663
1664 return 0;
1665 }
1666
ui__has_annotation(void)1667 bool ui__has_annotation(void)
1668 {
1669 return use_browser == 1 && perf_hpp_list.sym;
1670 }
1671
1672
annotation_line__max_percent(struct annotation_line * al,unsigned int percent_type)1673 static double annotation_line__max_percent(struct annotation_line *al,
1674 unsigned int percent_type)
1675 {
1676 double percent_max = 0.0;
1677 int i;
1678
1679 for (i = 0; i < al->data_nr; i++) {
1680 double percent;
1681
1682 percent = annotation_data__percent(&al->data[i],
1683 percent_type);
1684
1685 if (percent > percent_max)
1686 percent_max = percent;
1687 }
1688
1689 return percent_max;
1690 }
1691
disasm_line__write(struct disasm_line * dl,struct annotation * notes,void * obj,char * bf,size_t size,void (* obj__printf)(void * obj,const char * fmt,...),void (* obj__write_graph)(void * obj,int graph))1692 static void disasm_line__write(struct disasm_line *dl, struct annotation *notes,
1693 void *obj, char *bf, size_t size,
1694 void (*obj__printf)(void *obj, const char *fmt, ...),
1695 void (*obj__write_graph)(void *obj, int graph))
1696 {
1697 if (dl->ins.ops && dl->ins.ops->scnprintf) {
1698 if (ins__is_jump(&dl->ins)) {
1699 bool fwd;
1700
1701 if (dl->ops.target.outside)
1702 goto call_like;
1703 fwd = dl->ops.target.offset > dl->al.offset;
1704 obj__write_graph(obj, fwd ? DARROW_CHAR : UARROW_CHAR);
1705 obj__printf(obj, " ");
1706 } else if (ins__is_call(&dl->ins)) {
1707 call_like:
1708 obj__write_graph(obj, RARROW_CHAR);
1709 obj__printf(obj, " ");
1710 } else if (ins__is_ret(&dl->ins)) {
1711 obj__write_graph(obj, LARROW_CHAR);
1712 obj__printf(obj, " ");
1713 } else {
1714 obj__printf(obj, " ");
1715 }
1716 } else {
1717 obj__printf(obj, " ");
1718 }
1719
1720 disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset,
1721 notes->src->widths.max_ins_name);
1722 }
1723
ipc_coverage_string(char * bf,int size,struct annotation * notes)1724 static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
1725 {
1726 double ipc = 0.0, coverage = 0.0;
1727 struct annotated_branch *branch = annotation__get_branch(notes);
1728
1729 if (branch && branch->hit_cycles)
1730 ipc = branch->hit_insn / ((double)branch->hit_cycles);
1731
1732 if (branch && branch->total_insn) {
1733 coverage = branch->cover_insn * 100.0 /
1734 ((double)branch->total_insn);
1735 }
1736
1737 scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
1738 ipc, coverage);
1739 }
1740
annotation_br_cntr_abbr_list(char ** str,struct evsel * evsel,bool header)1741 int annotation_br_cntr_abbr_list(char **str, struct evsel *evsel, bool header)
1742 {
1743 struct evsel *pos;
1744 struct strbuf sb;
1745
1746 if (evsel->evlist->nr_br_cntr <= 0)
1747 return -ENOTSUP;
1748
1749 strbuf_init(&sb, /*hint=*/ 0);
1750
1751 if (header && strbuf_addf(&sb, "# Branch counter abbr list:\n"))
1752 goto err;
1753
1754 evlist__for_each_entry(evsel->evlist, pos) {
1755 if (!(pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS))
1756 continue;
1757 if (header && strbuf_addf(&sb, "#"))
1758 goto err;
1759
1760 if (strbuf_addf(&sb, " %s = %s\n", pos->name, pos->abbr_name))
1761 goto err;
1762 }
1763
1764 if (header && strbuf_addf(&sb, "#"))
1765 goto err;
1766 if (strbuf_addf(&sb, " '-' No event occurs\n"))
1767 goto err;
1768
1769 if (header && strbuf_addf(&sb, "#"))
1770 goto err;
1771 if (strbuf_addf(&sb, " '+' Event occurrences may be lost due to branch counter saturated\n"))
1772 goto err;
1773
1774 *str = strbuf_detach(&sb, NULL);
1775
1776 return 0;
1777 err:
1778 strbuf_release(&sb);
1779 return -ENOMEM;
1780 }
1781
1782 /* Assume the branch counter saturated at 3 */
1783 #define ANNOTATION_BR_CNTR_SATURATION 3
1784
annotation_br_cntr_entry(char ** str,int br_cntr_nr,u64 * br_cntr,int num_aggr,struct evsel * evsel)1785 int annotation_br_cntr_entry(char **str, int br_cntr_nr,
1786 u64 *br_cntr, int num_aggr,
1787 struct evsel *evsel)
1788 {
1789 struct evsel *pos = evsel ? evlist__first(evsel->evlist) : NULL;
1790 bool saturated = false;
1791 int i, j, avg, used;
1792 struct strbuf sb;
1793
1794 strbuf_init(&sb, /*hint=*/ 0);
1795 for (i = 0; i < br_cntr_nr; i++) {
1796 used = 0;
1797 avg = ceil((double)(br_cntr[i] & ~ANNOTATION__BR_CNTR_SATURATED_FLAG) /
1798 (double)num_aggr);
1799
1800 /*
1801 * A histogram with the abbr name is displayed by default.
1802 * With -v, the exact number of branch counter is displayed.
1803 */
1804 if (verbose) {
1805 evlist__for_each_entry_from(evsel->evlist, pos) {
1806 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
1807 (pos->br_cntr_idx == i))
1808 break;
1809 }
1810 if (strbuf_addstr(&sb, pos->abbr_name))
1811 goto err;
1812
1813 if (!br_cntr[i]) {
1814 if (strbuf_addstr(&sb, "=-"))
1815 goto err;
1816 } else {
1817 if (strbuf_addf(&sb, "=%d", avg))
1818 goto err;
1819 }
1820 if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG) {
1821 if (strbuf_addch(&sb, '+'))
1822 goto err;
1823 } else {
1824 if (strbuf_addch(&sb, ' '))
1825 goto err;
1826 }
1827
1828 if ((i < br_cntr_nr - 1) && strbuf_addch(&sb, ','))
1829 goto err;
1830 continue;
1831 }
1832
1833 if (strbuf_addch(&sb, '|'))
1834 goto err;
1835
1836 if (!br_cntr[i]) {
1837 if (strbuf_addch(&sb, '-'))
1838 goto err;
1839 used++;
1840 } else {
1841 evlist__for_each_entry_from(evsel->evlist, pos) {
1842 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
1843 (pos->br_cntr_idx == i))
1844 break;
1845 }
1846 if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG)
1847 saturated = true;
1848
1849 for (j = 0; j < avg; j++, used++) {
1850 /* Print + if the number of logged events > 3 */
1851 if (j >= ANNOTATION_BR_CNTR_SATURATION) {
1852 saturated = true;
1853 break;
1854 }
1855 if (strbuf_addstr(&sb, pos->abbr_name))
1856 goto err;
1857 }
1858
1859 if (saturated) {
1860 if (strbuf_addch(&sb, '+'))
1861 goto err;
1862 used++;
1863 }
1864 pos = list_next_entry(pos, core.node);
1865 }
1866
1867 for (j = used; j < ANNOTATION_BR_CNTR_SATURATION + 1; j++) {
1868 if (strbuf_addch(&sb, ' '))
1869 goto err;
1870 }
1871 }
1872
1873 if (!verbose && strbuf_addch(&sb, br_cntr_nr ? '|' : ' '))
1874 goto err;
1875
1876 *str = strbuf_detach(&sb, NULL);
1877
1878 return 0;
1879 err:
1880 strbuf_release(&sb);
1881 return -ENOMEM;
1882 }
1883
__annotation_line__write(struct annotation_line * al,struct annotation * notes,bool first_line,bool current_entry,bool change_color,int width,void * obj,unsigned int percent_type,int (* obj__set_color)(void * obj,int color),void (* obj__set_percent_color)(void * obj,double percent,bool current),int (* obj__set_jumps_percent_color)(void * obj,int nr,bool current),void (* obj__printf)(void * obj,const char * fmt,...),void (* obj__write_graph)(void * obj,int graph))1884 static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
1885 bool first_line, bool current_entry, bool change_color, int width,
1886 void *obj, unsigned int percent_type,
1887 int (*obj__set_color)(void *obj, int color),
1888 void (*obj__set_percent_color)(void *obj, double percent, bool current),
1889 int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current),
1890 void (*obj__printf)(void *obj, const char *fmt, ...),
1891 void (*obj__write_graph)(void *obj, int graph))
1892
1893 {
1894 double percent_max = annotation_line__max_percent(al, percent_type);
1895 int pcnt_width = annotation__pcnt_width(notes),
1896 cycles_width = annotation__cycles_width(notes);
1897 bool show_title = false;
1898 char bf[256];
1899 int printed;
1900
1901 if (first_line && (al->offset == -1 || percent_max == 0.0)) {
1902 if (notes->branch && al->cycles) {
1903 if (al->cycles->ipc == 0.0 && al->cycles->avg == 0)
1904 show_title = true;
1905 } else
1906 show_title = true;
1907 }
1908
1909 if (al->offset != -1 && percent_max != 0.0) {
1910 int i;
1911
1912 for (i = 0; i < al->data_nr; i++) {
1913 double percent;
1914
1915 percent = annotation_data__percent(&al->data[i], percent_type);
1916
1917 obj__set_percent_color(obj, percent, current_entry);
1918 if (symbol_conf.show_total_period) {
1919 obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period);
1920 } else if (symbol_conf.show_nr_samples) {
1921 obj__printf(obj, "%7" PRIu64 " ",
1922 al->data[i].he.nr_samples);
1923 } else {
1924 obj__printf(obj, "%7.2f ", percent);
1925 }
1926 }
1927 } else {
1928 obj__set_percent_color(obj, 0, current_entry);
1929
1930 if (!show_title)
1931 obj__printf(obj, "%-*s", pcnt_width, " ");
1932 else {
1933 obj__printf(obj, "%-*s", pcnt_width,
1934 symbol_conf.show_total_period ? "Period" :
1935 symbol_conf.show_nr_samples ? "Samples" : "Percent");
1936 }
1937 }
1938
1939 if (notes->branch) {
1940 if (al->cycles && al->cycles->ipc)
1941 obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc);
1942 else if (!show_title)
1943 obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " ");
1944 else
1945 obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
1946
1947 if (!annotate_opts.show_minmax_cycle) {
1948 if (al->cycles && al->cycles->avg)
1949 obj__printf(obj, "%*" PRIu64 " ",
1950 ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg);
1951 else if (!show_title)
1952 obj__printf(obj, "%*s",
1953 ANNOTATION__CYCLES_WIDTH, " ");
1954 else
1955 obj__printf(obj, "%*s ",
1956 ANNOTATION__CYCLES_WIDTH - 1,
1957 "Cycle");
1958 } else {
1959 if (al->cycles) {
1960 char str[32];
1961
1962 scnprintf(str, sizeof(str),
1963 "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
1964 al->cycles->avg, al->cycles->min,
1965 al->cycles->max);
1966
1967 obj__printf(obj, "%*s ",
1968 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
1969 str);
1970 } else if (!show_title)
1971 obj__printf(obj, "%*s",
1972 ANNOTATION__MINMAX_CYCLES_WIDTH,
1973 " ");
1974 else
1975 obj__printf(obj, "%*s ",
1976 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
1977 "Cycle(min/max)");
1978 }
1979
1980 if (annotate_opts.show_br_cntr) {
1981 if (show_title) {
1982 obj__printf(obj, "%*s ",
1983 ANNOTATION__BR_CNTR_WIDTH,
1984 "Branch Counter");
1985 } else {
1986 char *buf;
1987
1988 if (!annotation_br_cntr_entry(&buf, al->br_cntr_nr, al->br_cntr,
1989 al->num_aggr, al->evsel)) {
1990 obj__printf(obj, "%*s ", ANNOTATION__BR_CNTR_WIDTH, buf);
1991 free(buf);
1992 }
1993 }
1994 }
1995
1996 if (show_title && !*al->line) {
1997 ipc_coverage_string(bf, sizeof(bf), notes);
1998 obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf);
1999 }
2000 }
2001
2002 obj__printf(obj, " ");
2003
2004 if (!*al->line)
2005 obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " ");
2006 else if (al->offset == -1) {
2007 if (al->line_nr && annotate_opts.show_linenr)
2008 printed = scnprintf(bf, sizeof(bf), "%-*d ",
2009 notes->src->widths.addr + 1, al->line_nr);
2010 else
2011 printed = scnprintf(bf, sizeof(bf), "%-*s ",
2012 notes->src->widths.addr, " ");
2013 obj__printf(obj, bf);
2014 obj__printf(obj, "%-*s", width - printed - pcnt_width - cycles_width + 1, al->line);
2015 } else {
2016 u64 addr = al->offset;
2017 int color = -1;
2018
2019 if (!annotate_opts.use_offset)
2020 addr += notes->src->start;
2021
2022 if (!annotate_opts.use_offset) {
2023 printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
2024 } else {
2025 if (al->jump_sources &&
2026 annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
2027 if (annotate_opts.show_nr_jumps) {
2028 int prev;
2029 printed = scnprintf(bf, sizeof(bf), "%*d ",
2030 notes->src->widths.jumps,
2031 al->jump_sources);
2032 prev = obj__set_jumps_percent_color(obj, al->jump_sources,
2033 current_entry);
2034 obj__printf(obj, bf);
2035 obj__set_color(obj, prev);
2036 }
2037 print_addr:
2038 printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
2039 notes->src->widths.target, addr);
2040 } else if (ins__is_call(&disasm_line(al)->ins) &&
2041 annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) {
2042 goto print_addr;
2043 } else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
2044 goto print_addr;
2045 } else {
2046 printed = scnprintf(bf, sizeof(bf), "%-*s ",
2047 notes->src->widths.addr, " ");
2048 }
2049 }
2050
2051 if (change_color)
2052 color = obj__set_color(obj, HE_COLORSET_ADDR);
2053 obj__printf(obj, bf);
2054 if (change_color)
2055 obj__set_color(obj, color);
2056
2057 disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf), obj__printf, obj__write_graph);
2058
2059 obj__printf(obj, "%-*s", width - pcnt_width - cycles_width - 3 - printed, bf);
2060 }
2061
2062 }
2063
annotation_line__write(struct annotation_line * al,struct annotation * notes,struct annotation_write_ops * wops)2064 void annotation_line__write(struct annotation_line *al, struct annotation *notes,
2065 struct annotation_write_ops *wops)
2066 {
2067 __annotation_line__write(al, notes, wops->first_line, wops->current_entry,
2068 wops->change_color, wops->width, wops->obj,
2069 annotate_opts.percent_type,
2070 wops->set_color, wops->set_percent_color,
2071 wops->set_jumps_percent_color, wops->printf,
2072 wops->write_graph);
2073 }
2074
symbol__annotate2(struct map_symbol * ms,struct evsel * evsel,struct arch ** parch)2075 int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
2076 struct arch **parch)
2077 {
2078 struct symbol *sym = ms->sym;
2079 struct annotation *notes = symbol__annotation(sym);
2080 size_t size = symbol__size(sym);
2081 int err;
2082
2083 err = symbol__annotate(ms, evsel, parch);
2084 if (err)
2085 return err;
2086
2087 symbol__calc_percent(sym, evsel);
2088
2089 annotation__set_index(notes);
2090 annotation__mark_jump_targets(notes, sym);
2091
2092 err = annotation__compute_ipc(notes, size, evsel);
2093 if (err)
2094 return err;
2095
2096 annotation__init_column_widths(notes, sym);
2097 annotation__update_column_widths(notes);
2098 sym->annotate2 = 1;
2099
2100 return 0;
2101 }
2102
2103 const char * const perf_disassembler__strs[] = {
2104 [PERF_DISASM_UNKNOWN] = "unknown",
2105 [PERF_DISASM_LLVM] = "llvm",
2106 [PERF_DISASM_CAPSTONE] = "capstone",
2107 [PERF_DISASM_OBJDUMP] = "objdump",
2108 };
2109
2110
annotation_options__add_disassembler(struct annotation_options * options,enum perf_disassembler dis)2111 static void annotation_options__add_disassembler(struct annotation_options *options,
2112 enum perf_disassembler dis)
2113 {
2114 for (u8 i = 0; i < ARRAY_SIZE(options->disassemblers); i++) {
2115 if (options->disassemblers[i] == dis) {
2116 /* Disassembler is already present then don't add again. */
2117 return;
2118 }
2119 if (options->disassemblers[i] == PERF_DISASM_UNKNOWN) {
2120 /* Found a free slot. */
2121 options->disassemblers[i] = dis;
2122 return;
2123 }
2124 }
2125 pr_err("Failed to add disassembler %d\n", dis);
2126 }
2127
annotation_options__add_disassemblers_str(struct annotation_options * options,const char * str)2128 static int annotation_options__add_disassemblers_str(struct annotation_options *options,
2129 const char *str)
2130 {
2131 while (str && *str != '\0') {
2132 const char *comma = strchr(str, ',');
2133 int len = comma ? comma - str : (int)strlen(str);
2134 bool match = false;
2135
2136 for (u8 i = 0; i < ARRAY_SIZE(perf_disassembler__strs); i++) {
2137 const char *dis_str = perf_disassembler__strs[i];
2138
2139 if (len == (int)strlen(dis_str) && !strncmp(str, dis_str, len)) {
2140 annotation_options__add_disassembler(options, i);
2141 match = true;
2142 break;
2143 }
2144 }
2145 if (!match) {
2146 pr_err("Invalid disassembler '%.*s'\n", len, str);
2147 return -1;
2148 }
2149 str = comma ? comma + 1 : NULL;
2150 }
2151 return 0;
2152 }
2153
annotation__config(const char * var,const char * value,void * data)2154 static int annotation__config(const char *var, const char *value, void *data)
2155 {
2156 struct annotation_options *opt = data;
2157
2158 if (!strstarts(var, "annotate."))
2159 return 0;
2160
2161 if (!strcmp(var, "annotate.offset_level")) {
2162 perf_config_u8(&opt->offset_level, "offset_level", value);
2163
2164 if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
2165 opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL;
2166 else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL)
2167 opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
2168 } else if (!strcmp(var, "annotate.disassemblers")) {
2169 int err = annotation_options__add_disassemblers_str(opt, value);
2170
2171 if (err)
2172 return err;
2173 } else if (!strcmp(var, "annotate.hide_src_code")) {
2174 opt->hide_src_code = perf_config_bool("hide_src_code", value);
2175 } else if (!strcmp(var, "annotate.jump_arrows")) {
2176 opt->jump_arrows = perf_config_bool("jump_arrows", value);
2177 } else if (!strcmp(var, "annotate.show_linenr")) {
2178 opt->show_linenr = perf_config_bool("show_linenr", value);
2179 } else if (!strcmp(var, "annotate.show_nr_jumps")) {
2180 opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value);
2181 } else if (!strcmp(var, "annotate.show_nr_samples")) {
2182 symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples",
2183 value);
2184 } else if (!strcmp(var, "annotate.show_total_period")) {
2185 symbol_conf.show_total_period = perf_config_bool("show_total_period",
2186 value);
2187 } else if (!strcmp(var, "annotate.use_offset")) {
2188 opt->use_offset = perf_config_bool("use_offset", value);
2189 } else if (!strcmp(var, "annotate.disassembler_style")) {
2190 opt->disassembler_style = strdup(value);
2191 if (!opt->disassembler_style) {
2192 pr_err("Not enough memory for annotate.disassembler_style\n");
2193 return -1;
2194 }
2195 } else if (!strcmp(var, "annotate.objdump")) {
2196 opt->objdump_path = strdup(value);
2197 if (!opt->objdump_path) {
2198 pr_err("Not enough memory for annotate.objdump\n");
2199 return -1;
2200 }
2201 } else if (!strcmp(var, "annotate.addr2line")) {
2202 symbol_conf.addr2line_path = strdup(value);
2203 if (!symbol_conf.addr2line_path) {
2204 pr_err("Not enough memory for annotate.addr2line\n");
2205 return -1;
2206 }
2207 } else if (!strcmp(var, "annotate.demangle")) {
2208 symbol_conf.demangle = perf_config_bool("demangle", value);
2209 } else if (!strcmp(var, "annotate.demangle_kernel")) {
2210 symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value);
2211 } else {
2212 pr_debug("%s variable unknown, ignoring...", var);
2213 }
2214
2215 return 0;
2216 }
2217
annotation_options__init(void)2218 void annotation_options__init(void)
2219 {
2220 struct annotation_options *opt = &annotate_opts;
2221
2222 memset(opt, 0, sizeof(*opt));
2223
2224 /* Default values. */
2225 opt->use_offset = true;
2226 opt->jump_arrows = true;
2227 opt->annotate_src = true;
2228 opt->offset_level = ANNOTATION__OFFSET_JUMP_TARGETS;
2229 opt->percent_type = PERCENT_PERIOD_LOCAL;
2230 }
2231
annotation_options__exit(void)2232 void annotation_options__exit(void)
2233 {
2234 zfree(&annotate_opts.disassembler_style);
2235 zfree(&annotate_opts.objdump_path);
2236 }
2237
annotation_options__default_init_disassemblers(struct annotation_options * options)2238 static void annotation_options__default_init_disassemblers(struct annotation_options *options)
2239 {
2240 if (options->disassemblers[0] != PERF_DISASM_UNKNOWN) {
2241 /* Already initialized. */
2242 return;
2243 }
2244 #ifdef HAVE_LIBLLVM_SUPPORT
2245 annotation_options__add_disassembler(options, PERF_DISASM_LLVM);
2246 #endif
2247 #ifdef HAVE_LIBCAPSTONE_SUPPORT
2248 annotation_options__add_disassembler(options, PERF_DISASM_CAPSTONE);
2249 #endif
2250 annotation_options__add_disassembler(options, PERF_DISASM_OBJDUMP);
2251 }
2252
annotation_config__init(void)2253 void annotation_config__init(void)
2254 {
2255 perf_config(annotation__config, &annotate_opts);
2256 annotation_options__default_init_disassemblers(&annotate_opts);
2257 }
2258
parse_percent_type(char * str1,char * str2)2259 static unsigned int parse_percent_type(char *str1, char *str2)
2260 {
2261 unsigned int type = (unsigned int) -1;
2262
2263 if (!strcmp("period", str1)) {
2264 if (!strcmp("local", str2))
2265 type = PERCENT_PERIOD_LOCAL;
2266 else if (!strcmp("global", str2))
2267 type = PERCENT_PERIOD_GLOBAL;
2268 }
2269
2270 if (!strcmp("hits", str1)) {
2271 if (!strcmp("local", str2))
2272 type = PERCENT_HITS_LOCAL;
2273 else if (!strcmp("global", str2))
2274 type = PERCENT_HITS_GLOBAL;
2275 }
2276
2277 return type;
2278 }
2279
annotate_parse_percent_type(const struct option * opt __maybe_unused,const char * _str,int unset __maybe_unused)2280 int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str,
2281 int unset __maybe_unused)
2282 {
2283 unsigned int type;
2284 char *str1, *str2;
2285 int err = -1;
2286
2287 str1 = strdup(_str);
2288 if (!str1)
2289 return -ENOMEM;
2290
2291 str2 = strchr(str1, '-');
2292 if (!str2)
2293 goto out;
2294
2295 *str2++ = 0;
2296
2297 type = parse_percent_type(str1, str2);
2298 if (type == (unsigned int) -1)
2299 type = parse_percent_type(str2, str1);
2300 if (type != (unsigned int) -1) {
2301 annotate_opts.percent_type = type;
2302 err = 0;
2303 }
2304
2305 out:
2306 free(str1);
2307 return err;
2308 }
2309
annotate_check_args(void)2310 int annotate_check_args(void)
2311 {
2312 struct annotation_options *args = &annotate_opts;
2313
2314 if (args->prefix_strip && !args->prefix) {
2315 pr_err("--prefix-strip requires --prefix\n");
2316 return -1;
2317 }
2318 return 0;
2319 }
2320
2321 /*
2322 * Get register number and access offset from the given instruction.
2323 * It assumes AT&T x86 asm format like OFFSET(REG). Maybe it needs
2324 * to revisit the format when it handles different architecture.
2325 * Fills @reg and @offset when return 0.
2326 */
extract_reg_offset(struct arch * arch,const char * str,struct annotated_op_loc * op_loc)2327 static int extract_reg_offset(struct arch *arch, const char *str,
2328 struct annotated_op_loc *op_loc)
2329 {
2330 char *p;
2331 char *regname;
2332
2333 if (arch->objdump.register_char == 0)
2334 return -1;
2335
2336 /*
2337 * It should start from offset, but it's possible to skip 0
2338 * in the asm. So 0(%rax) should be same as (%rax).
2339 *
2340 * However, it also start with a segment select register like
2341 * %gs:0x18(%rbx). In that case it should skip the part.
2342 */
2343 if (*str == arch->objdump.register_char) {
2344 if (arch__is(arch, "x86")) {
2345 /* FIXME: Handle other segment registers */
2346 if (!strncmp(str, "%gs:", 4))
2347 op_loc->segment = INSN_SEG_X86_GS;
2348 }
2349
2350 while (*str && !isdigit(*str) &&
2351 *str != arch->objdump.memory_ref_char)
2352 str++;
2353 }
2354
2355 op_loc->offset = strtol(str, &p, 0);
2356
2357 p = strchr(p, arch->objdump.register_char);
2358 if (p == NULL)
2359 return -1;
2360
2361 regname = strdup(p);
2362 if (regname == NULL)
2363 return -1;
2364
2365 op_loc->reg1 = get_dwarf_regnum(regname, arch->e_machine, arch->e_flags);
2366 free(regname);
2367
2368 /* Get the second register */
2369 if (op_loc->multi_regs) {
2370 p = strchr(p + 1, arch->objdump.register_char);
2371 if (p == NULL)
2372 return -1;
2373
2374 regname = strdup(p);
2375 if (regname == NULL)
2376 return -1;
2377
2378 op_loc->reg2 = get_dwarf_regnum(regname, arch->e_machine, arch->e_flags);
2379 free(regname);
2380 }
2381 return 0;
2382 }
2383
2384 /**
2385 * annotate_get_insn_location - Get location of instruction
2386 * @arch: the architecture info
2387 * @dl: the target instruction
2388 * @loc: a buffer to save the data
2389 *
2390 * Get detailed location info (register and offset) in the instruction.
2391 * It needs both source and target operand and whether it accesses a
2392 * memory location. The offset field is meaningful only when the
2393 * corresponding mem flag is set. The reg2 field is meaningful only
2394 * when multi_regs flag is set.
2395 *
2396 * Some examples on x86:
2397 *
2398 * mov (%rax), %rcx # src_reg1 = rax, src_mem = 1, src_offset = 0
2399 * # dst_reg1 = rcx, dst_mem = 0
2400 *
2401 * mov 0x18, %r8 # src_reg1 = -1, src_mem = 0
2402 * # dst_reg1 = r8, dst_mem = 0
2403 *
2404 * mov %rsi, 8(%rbx,%rcx,4) # src_reg1 = rsi, src_mem = 0, src_multi_regs = 0
2405 * # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1
2406 * # dst_multi_regs = 1, dst_offset = 8
2407 */
annotate_get_insn_location(struct arch * arch,struct disasm_line * dl,struct annotated_insn_loc * loc)2408 int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl,
2409 struct annotated_insn_loc *loc)
2410 {
2411 struct ins_operands *ops;
2412 struct annotated_op_loc *op_loc;
2413 int i;
2414
2415 if (ins__is_lock(&dl->ins))
2416 ops = dl->ops.locked.ops;
2417 else
2418 ops = &dl->ops;
2419
2420 if (ops == NULL)
2421 return -1;
2422
2423 memset(loc, 0, sizeof(*loc));
2424
2425 for_each_insn_op_loc(loc, i, op_loc) {
2426 const char *insn_str = ops->source.raw;
2427 bool multi_regs = ops->source.multi_regs;
2428 bool mem_ref = ops->source.mem_ref;
2429
2430 if (i == INSN_OP_TARGET) {
2431 insn_str = ops->target.raw;
2432 multi_regs = ops->target.multi_regs;
2433 mem_ref = ops->target.mem_ref;
2434 }
2435
2436 /* Invalidate the register by default */
2437 op_loc->reg1 = -1;
2438 op_loc->reg2 = -1;
2439
2440 if (insn_str == NULL) {
2441 if (!arch__is(arch, "powerpc"))
2442 continue;
2443 }
2444
2445 /*
2446 * For powerpc, call get_powerpc_regs function which extracts the
2447 * required fields for op_loc, ie reg1, reg2, offset from the
2448 * raw instruction.
2449 */
2450 if (arch__is(arch, "powerpc")) {
2451 op_loc->mem_ref = mem_ref;
2452 op_loc->multi_regs = multi_regs;
2453 get_powerpc_regs(dl->raw.raw_insn, !i, op_loc);
2454 } else if (strchr(insn_str, arch->objdump.memory_ref_char)) {
2455 op_loc->mem_ref = true;
2456 op_loc->multi_regs = multi_regs;
2457 extract_reg_offset(arch, insn_str, op_loc);
2458 } else {
2459 char *s, *p = NULL;
2460
2461 if (arch__is(arch, "x86")) {
2462 /* FIXME: Handle other segment registers */
2463 if (!strncmp(insn_str, "%gs:", 4)) {
2464 op_loc->segment = INSN_SEG_X86_GS;
2465 op_loc->offset = strtol(insn_str + 4,
2466 &p, 0);
2467 if (p && p != insn_str + 4)
2468 op_loc->imm = true;
2469 continue;
2470 }
2471 }
2472
2473 s = strdup(insn_str);
2474 if (s == NULL)
2475 return -1;
2476
2477 if (*s == arch->objdump.register_char)
2478 op_loc->reg1 = get_dwarf_regnum(s, arch->e_machine, arch->e_flags);
2479 else if (*s == arch->objdump.imm_char) {
2480 op_loc->offset = strtol(s + 1, &p, 0);
2481 if (p && p != s + 1)
2482 op_loc->imm = true;
2483 }
2484 free(s);
2485 }
2486 }
2487
2488 return 0;
2489 }
2490
find_disasm_line(struct symbol * sym,u64 ip,bool allow_update)2491 static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip,
2492 bool allow_update)
2493 {
2494 struct disasm_line *dl;
2495 struct annotation *notes;
2496
2497 notes = symbol__annotation(sym);
2498
2499 list_for_each_entry(dl, ¬es->src->source, al.node) {
2500 if (dl->al.offset == -1)
2501 continue;
2502
2503 if (sym->start + dl->al.offset == ip) {
2504 /*
2505 * llvm-objdump places "lock" in a separate line and
2506 * in that case, we want to get the next line.
2507 */
2508 if (ins__is_lock(&dl->ins) &&
2509 *dl->ops.raw == '\0' && allow_update) {
2510 ip++;
2511 continue;
2512 }
2513 return dl;
2514 }
2515 }
2516 return NULL;
2517 }
2518
annotate_data_stat(struct list_head * head,const char * name)2519 static struct annotated_item_stat *annotate_data_stat(struct list_head *head,
2520 const char *name)
2521 {
2522 struct annotated_item_stat *istat;
2523
2524 list_for_each_entry(istat, head, list) {
2525 if (!strcmp(istat->name, name))
2526 return istat;
2527 }
2528
2529 istat = zalloc(sizeof(*istat));
2530 if (istat == NULL)
2531 return NULL;
2532
2533 istat->name = strdup(name);
2534 if ((istat->name == NULL) || (!strlen(istat->name))) {
2535 free(istat);
2536 return NULL;
2537 }
2538
2539 list_add_tail(&istat->list, head);
2540 return istat;
2541 }
2542
is_stack_operation(struct arch * arch,struct disasm_line * dl)2543 static bool is_stack_operation(struct arch *arch, struct disasm_line *dl)
2544 {
2545 if (arch__is(arch, "x86")) {
2546 if (!strncmp(dl->ins.name, "push", 4) ||
2547 !strncmp(dl->ins.name, "pop", 3) ||
2548 !strncmp(dl->ins.name, "call", 4) ||
2549 !strncmp(dl->ins.name, "ret", 3))
2550 return true;
2551 }
2552
2553 return false;
2554 }
2555
is_stack_canary(struct arch * arch,struct annotated_op_loc * loc)2556 static bool is_stack_canary(struct arch *arch, struct annotated_op_loc *loc)
2557 {
2558 /* On x86_64, %gs:40 is used for stack canary */
2559 if (arch__is(arch, "x86")) {
2560 if (loc->segment == INSN_SEG_X86_GS && loc->imm &&
2561 loc->offset == 40)
2562 return true;
2563 }
2564
2565 return false;
2566 }
2567
2568 static struct disasm_line *
annotation__prev_asm_line(struct annotation * notes,struct disasm_line * curr)2569 annotation__prev_asm_line(struct annotation *notes, struct disasm_line *curr)
2570 {
2571 struct list_head *sources = ¬es->src->source;
2572 struct disasm_line *prev;
2573
2574 if (curr == list_first_entry(sources, struct disasm_line, al.node))
2575 return NULL;
2576
2577 prev = list_prev_entry(curr, al.node);
2578 while (prev->al.offset == -1 &&
2579 prev != list_first_entry(sources, struct disasm_line, al.node))
2580 prev = list_prev_entry(prev, al.node);
2581
2582 if (prev->al.offset == -1)
2583 return NULL;
2584
2585 return prev;
2586 }
2587
2588 static struct disasm_line *
annotation__next_asm_line(struct annotation * notes,struct disasm_line * curr)2589 annotation__next_asm_line(struct annotation *notes, struct disasm_line *curr)
2590 {
2591 struct list_head *sources = ¬es->src->source;
2592 struct disasm_line *next;
2593
2594 if (curr == list_last_entry(sources, struct disasm_line, al.node))
2595 return NULL;
2596
2597 next = list_next_entry(curr, al.node);
2598 while (next->al.offset == -1 &&
2599 next != list_last_entry(sources, struct disasm_line, al.node))
2600 next = list_next_entry(next, al.node);
2601
2602 if (next->al.offset == -1)
2603 return NULL;
2604
2605 return next;
2606 }
2607
annotate_calc_pcrel(struct map_symbol * ms,u64 ip,int offset,struct disasm_line * dl)2608 u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset,
2609 struct disasm_line *dl)
2610 {
2611 struct annotation *notes;
2612 struct disasm_line *next;
2613 u64 addr;
2614
2615 notes = symbol__annotation(ms->sym);
2616 /*
2617 * PC-relative addressing starts from the next instruction address
2618 * But the IP is for the current instruction. Since disasm_line
2619 * doesn't have the instruction size, calculate it using the next
2620 * disasm_line. If it's the last one, we can use symbol's end
2621 * address directly.
2622 */
2623 next = annotation__next_asm_line(notes, dl);
2624 if (next == NULL)
2625 addr = ms->sym->end + offset;
2626 else
2627 addr = ip + (next->al.offset - dl->al.offset) + offset;
2628
2629 return map__rip_2objdump(ms->map, addr);
2630 }
2631
2632 static struct debuginfo_cache {
2633 struct dso *dso;
2634 struct debuginfo *dbg;
2635 } di_cache;
2636
debuginfo_cache__delete(void)2637 void debuginfo_cache__delete(void)
2638 {
2639 dso__put(di_cache.dso);
2640 di_cache.dso = NULL;
2641
2642 debuginfo__delete(di_cache.dbg);
2643 di_cache.dbg = NULL;
2644 }
2645
2646 /**
2647 * hist_entry__get_data_type - find data type for given hist entry
2648 * @he: hist entry
2649 *
2650 * This function first annotates the instruction at @he->ip and extracts
2651 * register and offset info from it. Then it searches the DWARF debug
2652 * info to get a variable and type information using the address, register,
2653 * and offset.
2654 */
hist_entry__get_data_type(struct hist_entry * he)2655 struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he)
2656 {
2657 struct map_symbol *ms = &he->ms;
2658 struct evsel *evsel = hists_to_evsel(he->hists);
2659 struct arch *arch;
2660 struct disasm_line *dl;
2661 struct annotated_insn_loc loc;
2662 struct annotated_op_loc *op_loc;
2663 struct annotated_data_type *mem_type;
2664 struct annotated_item_stat *istat;
2665 u64 ip = he->ip;
2666 int i;
2667
2668 ann_data_stat.total++;
2669
2670 if (ms->map == NULL || ms->sym == NULL) {
2671 ann_data_stat.no_sym++;
2672 return NULL;
2673 }
2674
2675 if (!symbol_conf.init_annotation) {
2676 ann_data_stat.no_sym++;
2677 return NULL;
2678 }
2679
2680 /*
2681 * di_cache holds a pair of values, but code below assumes
2682 * di_cache.dso can be compared/updated and di_cache.dbg can be
2683 * read/updated independently from each other. That assumption only
2684 * holds in single threaded code.
2685 */
2686 assert(perf_singlethreaded);
2687
2688 if (map__dso(ms->map) != di_cache.dso) {
2689 dso__put(di_cache.dso);
2690 di_cache.dso = dso__get(map__dso(ms->map));
2691
2692 debuginfo__delete(di_cache.dbg);
2693 di_cache.dbg = debuginfo__new(dso__long_name(di_cache.dso));
2694 }
2695
2696 if (di_cache.dbg == NULL) {
2697 ann_data_stat.no_dbginfo++;
2698 return NULL;
2699 }
2700
2701 /* Make sure it has the disasm of the function */
2702 if (symbol__annotate(ms, evsel, &arch) < 0) {
2703 ann_data_stat.no_insn++;
2704 return NULL;
2705 }
2706
2707 /*
2708 * Get a disasm to extract the location from the insn.
2709 * This is too slow...
2710 */
2711 dl = find_disasm_line(ms->sym, ip, /*allow_update=*/true);
2712 if (dl == NULL) {
2713 ann_data_stat.no_insn++;
2714 return NULL;
2715 }
2716
2717 retry:
2718 istat = annotate_data_stat(&ann_insn_stat, dl->ins.name);
2719 if (istat == NULL) {
2720 ann_data_stat.no_insn++;
2721 return NULL;
2722 }
2723
2724 if (annotate_get_insn_location(arch, dl, &loc) < 0) {
2725 ann_data_stat.no_insn_ops++;
2726 istat->bad++;
2727 return NULL;
2728 }
2729
2730 if (is_stack_operation(arch, dl)) {
2731 istat->good++;
2732 he->mem_type_off = 0;
2733 return &stackop_type;
2734 }
2735
2736 for_each_insn_op_loc(&loc, i, op_loc) {
2737 struct data_loc_info dloc = {
2738 .arch = arch,
2739 .thread = he->thread,
2740 .ms = ms,
2741 /* Recalculate IP for LOCK prefix or insn fusion */
2742 .ip = ms->sym->start + dl->al.offset,
2743 .cpumode = he->cpumode,
2744 .op = op_loc,
2745 .di = di_cache.dbg,
2746 };
2747
2748 if (!op_loc->mem_ref && op_loc->segment == INSN_SEG_NONE)
2749 continue;
2750
2751 /* Recalculate IP because of LOCK prefix or insn fusion */
2752 ip = ms->sym->start + dl->al.offset;
2753
2754 /* PC-relative addressing */
2755 if (op_loc->reg1 == DWARF_REG_PC) {
2756 dloc.var_addr = annotate_calc_pcrel(ms, dloc.ip,
2757 op_loc->offset, dl);
2758 }
2759
2760 /* This CPU access in kernel - pretend PC-relative addressing */
2761 if (dso__kernel(map__dso(ms->map)) && arch__is(arch, "x86") &&
2762 op_loc->segment == INSN_SEG_X86_GS && op_loc->imm) {
2763 dloc.var_addr = op_loc->offset;
2764 op_loc->reg1 = DWARF_REG_PC;
2765 }
2766
2767 mem_type = find_data_type(&dloc);
2768
2769 if (mem_type == NULL && is_stack_canary(arch, op_loc)) {
2770 istat->good++;
2771 he->mem_type_off = 0;
2772 return &canary_type;
2773 }
2774
2775 if (mem_type)
2776 istat->good++;
2777 else
2778 istat->bad++;
2779
2780 if (symbol_conf.annotate_data_sample) {
2781 annotated_data_type__update_samples(mem_type, evsel,
2782 dloc.type_offset,
2783 he->stat.nr_events,
2784 he->stat.period);
2785 }
2786 he->mem_type_off = dloc.type_offset;
2787 return mem_type;
2788 }
2789
2790 /*
2791 * Some instructions can be fused and the actual memory access came
2792 * from the previous instruction.
2793 */
2794 if (dl->al.offset > 0) {
2795 struct annotation *notes;
2796 struct disasm_line *prev_dl;
2797
2798 notes = symbol__annotation(ms->sym);
2799 prev_dl = annotation__prev_asm_line(notes, dl);
2800
2801 if (prev_dl && ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) {
2802 dl = prev_dl;
2803 goto retry;
2804 }
2805 }
2806
2807 ann_data_stat.no_mem_ops++;
2808 istat->bad++;
2809 return NULL;
2810 }
2811
2812 /* Basic block traversal (BFS) data structure */
2813 struct basic_block_data {
2814 struct list_head queue;
2815 struct list_head visited;
2816 };
2817
2818 /*
2819 * During the traversal, it needs to know the parent block where the current
2820 * block block started from. Note that single basic block can be parent of
2821 * two child basic blocks (in case of condition jump).
2822 */
2823 struct basic_block_link {
2824 struct list_head node;
2825 struct basic_block_link *parent;
2826 struct annotated_basic_block *bb;
2827 };
2828
2829 /* Check any of basic block in the list already has the offset */
basic_block_has_offset(struct list_head * head,s64 offset)2830 static bool basic_block_has_offset(struct list_head *head, s64 offset)
2831 {
2832 struct basic_block_link *link;
2833
2834 list_for_each_entry(link, head, node) {
2835 s64 begin_offset = link->bb->begin->al.offset;
2836 s64 end_offset = link->bb->end->al.offset;
2837
2838 if (begin_offset <= offset && offset <= end_offset)
2839 return true;
2840 }
2841 return false;
2842 }
2843
is_new_basic_block(struct basic_block_data * bb_data,struct disasm_line * dl)2844 static bool is_new_basic_block(struct basic_block_data *bb_data,
2845 struct disasm_line *dl)
2846 {
2847 s64 offset = dl->al.offset;
2848
2849 if (basic_block_has_offset(&bb_data->visited, offset))
2850 return false;
2851 if (basic_block_has_offset(&bb_data->queue, offset))
2852 return false;
2853 return true;
2854 }
2855
2856 /* Add a basic block starting from dl and link it to the parent */
add_basic_block(struct basic_block_data * bb_data,struct basic_block_link * parent,struct disasm_line * dl)2857 static int add_basic_block(struct basic_block_data *bb_data,
2858 struct basic_block_link *parent,
2859 struct disasm_line *dl)
2860 {
2861 struct annotated_basic_block *bb;
2862 struct basic_block_link *link;
2863
2864 if (dl == NULL)
2865 return -1;
2866
2867 if (!is_new_basic_block(bb_data, dl))
2868 return 0;
2869
2870 bb = zalloc(sizeof(*bb));
2871 if (bb == NULL)
2872 return -1;
2873
2874 bb->begin = dl;
2875 bb->end = dl;
2876 INIT_LIST_HEAD(&bb->list);
2877
2878 link = malloc(sizeof(*link));
2879 if (link == NULL) {
2880 free(bb);
2881 return -1;
2882 }
2883
2884 link->bb = bb;
2885 link->parent = parent;
2886 list_add_tail(&link->node, &bb_data->queue);
2887 return 0;
2888 }
2889
2890 /* Returns true when it finds the target in the current basic block */
process_basic_block(struct basic_block_data * bb_data,struct basic_block_link * link,struct symbol * sym,u64 target)2891 static bool process_basic_block(struct basic_block_data *bb_data,
2892 struct basic_block_link *link,
2893 struct symbol *sym, u64 target)
2894 {
2895 struct disasm_line *dl, *next_dl, *last_dl;
2896 struct annotation *notes = symbol__annotation(sym);
2897 bool found = false;
2898
2899 dl = link->bb->begin;
2900 /* Check if it's already visited */
2901 if (basic_block_has_offset(&bb_data->visited, dl->al.offset))
2902 return false;
2903
2904 last_dl = list_last_entry(¬es->src->source,
2905 struct disasm_line, al.node);
2906 if (last_dl->al.offset == -1)
2907 last_dl = annotation__prev_asm_line(notes, last_dl);
2908
2909 if (last_dl == NULL)
2910 return false;
2911
2912 list_for_each_entry_from(dl, ¬es->src->source, al.node) {
2913 /* Skip comment or debug info line */
2914 if (dl->al.offset == -1)
2915 continue;
2916 /* Found the target instruction */
2917 if (sym->start + dl->al.offset == target) {
2918 found = true;
2919 break;
2920 }
2921 /* End of the function, finish the block */
2922 if (dl == last_dl)
2923 break;
2924 /* 'return' instruction finishes the block */
2925 if (ins__is_ret(&dl->ins))
2926 break;
2927 /* normal instructions are part of the basic block */
2928 if (!ins__is_jump(&dl->ins))
2929 continue;
2930 /* jump to a different function, tail call or return */
2931 if (dl->ops.target.outside)
2932 break;
2933 /* jump instruction creates new basic block(s) */
2934 next_dl = find_disasm_line(sym, sym->start + dl->ops.target.offset,
2935 /*allow_update=*/false);
2936 if (next_dl)
2937 add_basic_block(bb_data, link, next_dl);
2938
2939 /*
2940 * FIXME: determine conditional jumps properly.
2941 * Conditional jumps create another basic block with the
2942 * next disasm line.
2943 */
2944 if (!strstr(dl->ins.name, "jmp")) {
2945 next_dl = annotation__next_asm_line(notes, dl);
2946 if (next_dl)
2947 add_basic_block(bb_data, link, next_dl);
2948 }
2949 break;
2950
2951 }
2952 link->bb->end = dl;
2953 return found;
2954 }
2955
2956 /*
2957 * It founds a target basic block, build a proper linked list of basic blocks
2958 * by following the link recursively.
2959 */
link_found_basic_blocks(struct basic_block_link * link,struct list_head * head)2960 static void link_found_basic_blocks(struct basic_block_link *link,
2961 struct list_head *head)
2962 {
2963 while (link) {
2964 struct basic_block_link *parent = link->parent;
2965
2966 list_move(&link->bb->list, head);
2967 list_del(&link->node);
2968 free(link);
2969
2970 link = parent;
2971 }
2972 }
2973
delete_basic_blocks(struct basic_block_data * bb_data)2974 static void delete_basic_blocks(struct basic_block_data *bb_data)
2975 {
2976 struct basic_block_link *link, *tmp;
2977
2978 list_for_each_entry_safe(link, tmp, &bb_data->queue, node) {
2979 list_del(&link->node);
2980 zfree(&link->bb);
2981 free(link);
2982 }
2983
2984 list_for_each_entry_safe(link, tmp, &bb_data->visited, node) {
2985 list_del(&link->node);
2986 zfree(&link->bb);
2987 free(link);
2988 }
2989 }
2990
2991 /**
2992 * annotate_get_basic_blocks - Get basic blocks for given address range
2993 * @sym: symbol to annotate
2994 * @src: source address
2995 * @dst: destination address
2996 * @head: list head to save basic blocks
2997 *
2998 * This function traverses disasm_lines from @src to @dst and save them in a
2999 * list of annotated_basic_block to @head. It uses BFS to find the shortest
3000 * path between two. The basic_block_link is to maintain parent links so
3001 * that it can build a list of blocks from the start.
3002 */
annotate_get_basic_blocks(struct symbol * sym,s64 src,s64 dst,struct list_head * head)3003 int annotate_get_basic_blocks(struct symbol *sym, s64 src, s64 dst,
3004 struct list_head *head)
3005 {
3006 struct basic_block_data bb_data = {
3007 .queue = LIST_HEAD_INIT(bb_data.queue),
3008 .visited = LIST_HEAD_INIT(bb_data.visited),
3009 };
3010 struct basic_block_link *link;
3011 struct disasm_line *dl;
3012 int ret = -1;
3013
3014 dl = find_disasm_line(sym, src, /*allow_update=*/false);
3015 if (dl == NULL)
3016 return -1;
3017
3018 if (add_basic_block(&bb_data, /*parent=*/NULL, dl) < 0)
3019 return -1;
3020
3021 /* Find shortest path from src to dst using BFS */
3022 while (!list_empty(&bb_data.queue)) {
3023 link = list_first_entry(&bb_data.queue, struct basic_block_link, node);
3024
3025 if (process_basic_block(&bb_data, link, sym, dst)) {
3026 link_found_basic_blocks(link, head);
3027 ret = 0;
3028 break;
3029 }
3030 list_move(&link->node, &bb_data.visited);
3031 }
3032 delete_basic_blocks(&bb_data);
3033 return ret;
3034 }
3035