1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from builtin-annotate.c, see those files for further
6 * copyright notes.
7 */
8
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <libgen.h>
12 #include <stdlib.h>
13 #include "util.h" // hex_width()
14 #include "ui/ui.h"
15 #include "sort.h"
16 #include "build-id.h"
17 #include "color.h"
18 #include "config.h"
19 #include "disasm.h"
20 #include "dso.h"
21 #include "env.h"
22 #include "map.h"
23 #include "maps.h"
24 #include "symbol.h"
25 #include "srcline.h"
26 #include "units.h"
27 #include "debug.h"
28 #include "debuginfo.h"
29 #include "annotate.h"
30 #include "annotate-data.h"
31 #include "evsel.h"
32 #include "evlist.h"
33 #include "bpf-event.h"
34 #include "bpf-utils.h"
35 #include "block-range.h"
36 #include "string2.h"
37 #include "dwarf-regs.h"
38 #include "util/event.h"
39 #include "util/sharded_mutex.h"
40 #include "arch/common.h"
41 #include "namespaces.h"
42 #include "thread.h"
43 #include "hashmap.h"
44 #include "strbuf.h"
45 #include <regex.h>
46 #include <linux/bitops.h>
47 #include <linux/kernel.h>
48 #include <linux/string.h>
49 #include <linux/zalloc.h>
50 #include <subcmd/parse-options.h>
51 #include <subcmd/run-command.h>
52 #include <math.h>
53
54 /* FIXME: For the HE_COLORSET */
55 #include "ui/browser.h"
56
57 /*
58 * FIXME: Using the same values as slang.h,
59 * but that header may not be available everywhere
60 */
61 #define LARROW_CHAR ((unsigned char)',')
62 #define RARROW_CHAR ((unsigned char)'+')
63 #define DARROW_CHAR ((unsigned char)'.')
64 #define UARROW_CHAR ((unsigned char)'-')
65
66 #include <linux/ctype.h>
67
68 /* global annotation options */
69 struct annotation_options annotate_opts;
70
71 /* Data type collection debug statistics */
72 struct annotated_data_stat ann_data_stat;
73 LIST_HEAD(ann_insn_stat);
74
75 /* Pseudo data types */
76 struct annotated_data_type stackop_type = {
77 .self = {
78 .type_name = (char *)"(stack operation)",
79 .children = LIST_HEAD_INIT(stackop_type.self.children),
80 },
81 };
82
83 struct annotated_data_type canary_type = {
84 .self = {
85 .type_name = (char *)"(stack canary)",
86 .children = LIST_HEAD_INIT(canary_type.self.children),
87 },
88 };
89
90 /* symbol histogram: key = offset << 16 | evsel->core.idx */
sym_hist_hash(long key,void * ctx __maybe_unused)91 static size_t sym_hist_hash(long key, void *ctx __maybe_unused)
92 {
93 return (key >> 16) + (key & 0xffff);
94 }
95
sym_hist_equal(long key1,long key2,void * ctx __maybe_unused)96 static bool sym_hist_equal(long key1, long key2, void *ctx __maybe_unused)
97 {
98 return key1 == key2;
99 }
100
annotated_source__new(void)101 static struct annotated_source *annotated_source__new(void)
102 {
103 struct annotated_source *src = zalloc(sizeof(*src));
104
105 if (src != NULL)
106 INIT_LIST_HEAD(&src->source);
107
108 return src;
109 }
110
annotated_source__delete(struct annotated_source * src)111 static __maybe_unused void annotated_source__delete(struct annotated_source *src)
112 {
113 struct hashmap_entry *cur;
114 size_t bkt;
115
116 if (src == NULL)
117 return;
118
119 if (src->samples) {
120 hashmap__for_each_entry(src->samples, cur, bkt)
121 zfree(&cur->pvalue);
122 hashmap__free(src->samples);
123 }
124 zfree(&src->histograms);
125 free(src);
126 }
127
annotated_source__alloc_histograms(struct annotated_source * src,int nr_hists)128 static int annotated_source__alloc_histograms(struct annotated_source *src,
129 int nr_hists)
130 {
131 src->nr_histograms = nr_hists;
132 src->histograms = calloc(nr_hists, sizeof(*src->histograms));
133
134 if (src->histograms == NULL)
135 return -1;
136
137 src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL);
138 if (src->samples == NULL)
139 zfree(&src->histograms);
140
141 return src->histograms ? 0 : -1;
142 }
143
symbol__annotate_zero_histograms(struct symbol * sym)144 void symbol__annotate_zero_histograms(struct symbol *sym)
145 {
146 struct annotation *notes = symbol__annotation(sym);
147
148 annotation__lock(notes);
149 if (notes->src != NULL) {
150 memset(notes->src->histograms, 0,
151 notes->src->nr_histograms * sizeof(*notes->src->histograms));
152 hashmap__clear(notes->src->samples);
153 }
154 if (notes->branch && notes->branch->cycles_hist) {
155 memset(notes->branch->cycles_hist, 0,
156 symbol__size(sym) * sizeof(struct cyc_hist));
157 }
158 annotation__unlock(notes);
159 }
160
__symbol__account_cycles(struct cyc_hist * ch,u64 start,unsigned offset,unsigned cycles,unsigned have_start)161 static int __symbol__account_cycles(struct cyc_hist *ch,
162 u64 start,
163 unsigned offset, unsigned cycles,
164 unsigned have_start)
165 {
166 /*
167 * For now we can only account one basic block per
168 * final jump. But multiple could be overlapping.
169 * Always account the longest one. So when
170 * a shorter one has been already seen throw it away.
171 *
172 * We separately always account the full cycles.
173 */
174 ch[offset].num_aggr++;
175 ch[offset].cycles_aggr += cycles;
176
177 if (cycles > ch[offset].cycles_max)
178 ch[offset].cycles_max = cycles;
179
180 if (ch[offset].cycles_min) {
181 if (cycles && cycles < ch[offset].cycles_min)
182 ch[offset].cycles_min = cycles;
183 } else
184 ch[offset].cycles_min = cycles;
185
186 if (!have_start && ch[offset].have_start)
187 return 0;
188 if (ch[offset].num) {
189 if (have_start && (!ch[offset].have_start ||
190 ch[offset].start > start)) {
191 ch[offset].have_start = 0;
192 ch[offset].cycles = 0;
193 ch[offset].num = 0;
194 if (ch[offset].reset < 0xffff)
195 ch[offset].reset++;
196 } else if (have_start &&
197 ch[offset].start < start)
198 return 0;
199 }
200
201 if (ch[offset].num < NUM_SPARKS)
202 ch[offset].cycles_spark[ch[offset].num] = cycles;
203
204 ch[offset].have_start = have_start;
205 ch[offset].start = start;
206 ch[offset].cycles += cycles;
207 ch[offset].num++;
208 return 0;
209 }
210
__symbol__inc_addr_samples(struct map_symbol * ms,struct annotated_source * src,int evidx,u64 addr,struct perf_sample * sample)211 static int __symbol__inc_addr_samples(struct map_symbol *ms,
212 struct annotated_source *src, int evidx, u64 addr,
213 struct perf_sample *sample)
214 {
215 struct symbol *sym = ms->sym;
216 long hash_key;
217 u64 offset;
218 struct sym_hist *h;
219 struct sym_hist_entry *entry;
220
221 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr));
222
223 if ((addr < sym->start || addr >= sym->end) &&
224 (addr != sym->end || sym->start != sym->end)) {
225 pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
226 __func__, __LINE__, sym->name, sym->start, addr, sym->end);
227 return -ERANGE;
228 }
229
230 offset = addr - sym->start;
231 h = annotated_source__histogram(src, evidx);
232 if (h == NULL) {
233 pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n",
234 __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC);
235 return -ENOMEM;
236 }
237
238 hash_key = offset << 16 | evidx;
239 if (!hashmap__find(src->samples, hash_key, &entry)) {
240 entry = zalloc(sizeof(*entry));
241 if (entry == NULL)
242 return -ENOMEM;
243
244 if (hashmap__add(src->samples, hash_key, entry) < 0)
245 return -ENOMEM;
246 }
247
248 h->nr_samples++;
249 h->period += sample->period;
250 entry->nr_samples++;
251 entry->period += sample->period;
252
253 pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
254 ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n",
255 sym->start, sym->name, addr, addr - sym->start, evidx,
256 entry->nr_samples, entry->period);
257 return 0;
258 }
259
annotation__get_branch(struct annotation * notes)260 struct annotated_branch *annotation__get_branch(struct annotation *notes)
261 {
262 if (notes == NULL)
263 return NULL;
264
265 if (notes->branch == NULL)
266 notes->branch = zalloc(sizeof(*notes->branch));
267
268 return notes->branch;
269 }
270
symbol__find_branch_hist(struct symbol * sym,unsigned int br_cntr_nr)271 static struct annotated_branch *symbol__find_branch_hist(struct symbol *sym,
272 unsigned int br_cntr_nr)
273 {
274 struct annotation *notes = symbol__annotation(sym);
275 struct annotated_branch *branch;
276 const size_t size = symbol__size(sym);
277
278 branch = annotation__get_branch(notes);
279 if (branch == NULL)
280 return NULL;
281
282 if (branch->cycles_hist == NULL) {
283 branch->cycles_hist = calloc(size, sizeof(struct cyc_hist));
284 if (!branch->cycles_hist)
285 return NULL;
286 }
287
288 if (br_cntr_nr && branch->br_cntr == NULL) {
289 branch->br_cntr = calloc(br_cntr_nr * size, sizeof(u64));
290 if (!branch->br_cntr)
291 return NULL;
292 }
293
294 return branch;
295 }
296
symbol__hists(struct symbol * sym,int nr_hists)297 struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
298 {
299 struct annotation *notes = symbol__annotation(sym);
300
301 if (notes->src == NULL) {
302 notes->src = annotated_source__new();
303 if (notes->src == NULL)
304 return NULL;
305 goto alloc_histograms;
306 }
307
308 if (notes->src->histograms == NULL) {
309 alloc_histograms:
310 annotated_source__alloc_histograms(notes->src, nr_hists);
311 }
312
313 return notes->src;
314 }
315
symbol__inc_addr_samples(struct map_symbol * ms,struct evsel * evsel,u64 addr,struct perf_sample * sample)316 static int symbol__inc_addr_samples(struct map_symbol *ms,
317 struct evsel *evsel, u64 addr,
318 struct perf_sample *sample)
319 {
320 struct symbol *sym = ms->sym;
321 struct annotated_source *src;
322
323 if (sym == NULL)
324 return 0;
325 src = symbol__hists(sym, evsel->evlist->core.nr_entries);
326 return src ? __symbol__inc_addr_samples(ms, src, evsel->core.idx, addr, sample) : 0;
327 }
328
symbol__account_br_cntr(struct annotated_branch * branch,struct evsel * evsel,unsigned offset,u64 br_cntr)329 static int symbol__account_br_cntr(struct annotated_branch *branch,
330 struct evsel *evsel,
331 unsigned offset,
332 u64 br_cntr)
333 {
334 unsigned int br_cntr_nr = evsel__leader(evsel)->br_cntr_nr;
335 unsigned int base = evsel__leader(evsel)->br_cntr_idx;
336 unsigned int off = offset * evsel->evlist->nr_br_cntr;
337 u64 *branch_br_cntr = branch->br_cntr;
338 unsigned int i, mask, width;
339
340 if (!br_cntr || !branch_br_cntr)
341 return 0;
342
343 perf_env__find_br_cntr_info(evsel__env(evsel), NULL, &width);
344 mask = (1L << width) - 1;
345 for (i = 0; i < br_cntr_nr; i++) {
346 u64 cntr = (br_cntr >> i * width) & mask;
347
348 branch_br_cntr[off + i + base] += cntr;
349 if (cntr == mask)
350 branch_br_cntr[off + i + base] |= ANNOTATION__BR_CNTR_SATURATED_FLAG;
351 }
352
353 return 0;
354 }
355
symbol__account_cycles(u64 addr,u64 start,struct symbol * sym,unsigned cycles,struct evsel * evsel,u64 br_cntr)356 static int symbol__account_cycles(u64 addr, u64 start, struct symbol *sym,
357 unsigned cycles, struct evsel *evsel,
358 u64 br_cntr)
359 {
360 struct annotated_branch *branch;
361 unsigned offset;
362 int ret;
363
364 if (sym == NULL)
365 return 0;
366 branch = symbol__find_branch_hist(sym, evsel->evlist->nr_br_cntr);
367 if (!branch)
368 return -ENOMEM;
369 if (addr < sym->start || addr >= sym->end)
370 return -ERANGE;
371
372 if (start) {
373 if (start < sym->start || start >= sym->end)
374 return -ERANGE;
375 if (start >= addr)
376 start = 0;
377 }
378 offset = addr - sym->start;
379 ret = __symbol__account_cycles(branch->cycles_hist,
380 start ? start - sym->start : 0,
381 offset, cycles,
382 !!start);
383
384 if (ret)
385 return ret;
386
387 return symbol__account_br_cntr(branch, evsel, offset, br_cntr);
388 }
389
addr_map_symbol__account_cycles(struct addr_map_symbol * ams,struct addr_map_symbol * start,unsigned cycles,struct evsel * evsel,u64 br_cntr)390 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
391 struct addr_map_symbol *start,
392 unsigned cycles,
393 struct evsel *evsel,
394 u64 br_cntr)
395 {
396 u64 saddr = 0;
397 int err;
398
399 if (!cycles)
400 return 0;
401
402 /*
403 * Only set start when IPC can be computed. We can only
404 * compute it when the basic block is completely in a single
405 * function.
406 * Special case the case when the jump is elsewhere, but
407 * it starts on the function start.
408 */
409 if (start &&
410 (start->ms.sym == ams->ms.sym ||
411 (ams->ms.sym &&
412 start->addr == ams->ms.sym->start + map__start(ams->ms.map))))
413 saddr = start->al_addr;
414 if (saddr == 0)
415 pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
416 ams->addr,
417 start ? start->addr : 0,
418 ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0,
419 saddr);
420 err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles, evsel, br_cntr);
421 if (err)
422 pr_debug2("account_cycles failed %d\n", err);
423 return err;
424 }
425
annotated_source__get_line(struct annotated_source * src,s64 offset)426 struct annotation_line *annotated_source__get_line(struct annotated_source *src,
427 s64 offset)
428 {
429 struct annotation_line *al;
430
431 list_for_each_entry(al, &src->source, node) {
432 if (al->offset == offset)
433 return al;
434 }
435 return NULL;
436 }
437
annotation__count_insn(struct annotation * notes,u64 start,u64 end)438 static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end)
439 {
440 struct annotation_line *al;
441 unsigned n_insn = 0;
442
443 al = annotated_source__get_line(notes->src, start);
444 if (al == NULL)
445 return 0;
446
447 list_for_each_entry_from(al, ¬es->src->source, node) {
448 if (al->offset == -1)
449 continue;
450 if ((u64)al->offset > end)
451 break;
452 n_insn++;
453 }
454 return n_insn;
455 }
456
annotated_branch__delete(struct annotated_branch * branch)457 static void annotated_branch__delete(struct annotated_branch *branch)
458 {
459 if (branch) {
460 zfree(&branch->cycles_hist);
461 free(branch->br_cntr);
462 free(branch);
463 }
464 }
465
annotation__count_and_fill(struct annotation * notes,u64 start,u64 end,struct cyc_hist * ch)466 static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
467 {
468 unsigned n_insn;
469 unsigned int cover_insn = 0;
470
471 n_insn = annotation__count_insn(notes, start, end);
472 if (n_insn && ch->num && ch->cycles) {
473 struct annotation_line *al;
474 struct annotated_branch *branch;
475 float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
476
477 /* Hide data when there are too many overlaps. */
478 if (ch->reset >= 0x7fff)
479 return;
480
481 al = annotated_source__get_line(notes->src, start);
482 if (al == NULL)
483 return;
484
485 list_for_each_entry_from(al, ¬es->src->source, node) {
486 if (al->offset == -1)
487 continue;
488 if ((u64)al->offset > end)
489 break;
490 if (al->cycles && al->cycles->ipc == 0.0) {
491 al->cycles->ipc = ipc;
492 cover_insn++;
493 }
494 }
495
496 branch = annotation__get_branch(notes);
497 if (cover_insn && branch) {
498 branch->hit_cycles += ch->cycles;
499 branch->hit_insn += n_insn * ch->num;
500 branch->cover_insn += cover_insn;
501 }
502 }
503 }
504
annotation__compute_ipc(struct annotation * notes,size_t size,struct evsel * evsel)505 static int annotation__compute_ipc(struct annotation *notes, size_t size,
506 struct evsel *evsel)
507 {
508 unsigned int br_cntr_nr = evsel->evlist->nr_br_cntr;
509 int err = 0;
510 s64 offset;
511
512 if (!notes->branch || !notes->branch->cycles_hist)
513 return 0;
514
515 notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1);
516 notes->branch->hit_cycles = 0;
517 notes->branch->hit_insn = 0;
518 notes->branch->cover_insn = 0;
519
520 annotation__lock(notes);
521 for (offset = size - 1; offset >= 0; --offset) {
522 struct cyc_hist *ch;
523
524 ch = ¬es->branch->cycles_hist[offset];
525 if (ch && ch->cycles) {
526 struct annotation_line *al;
527
528 al = annotated_source__get_line(notes->src, offset);
529 if (al && al->cycles == NULL) {
530 al->cycles = zalloc(sizeof(*al->cycles));
531 if (al->cycles == NULL) {
532 err = ENOMEM;
533 break;
534 }
535 }
536 if (ch->have_start)
537 annotation__count_and_fill(notes, ch->start, offset, ch);
538 if (al && ch->num_aggr) {
539 al->cycles->avg = ch->cycles_aggr / ch->num_aggr;
540 al->cycles->max = ch->cycles_max;
541 al->cycles->min = ch->cycles_min;
542 }
543 if (al && notes->branch->br_cntr) {
544 if (!al->br_cntr) {
545 al->br_cntr = calloc(br_cntr_nr, sizeof(u64));
546 if (!al->br_cntr) {
547 err = ENOMEM;
548 break;
549 }
550 }
551 al->num_aggr = ch->num_aggr;
552 al->br_cntr_nr = br_cntr_nr;
553 al->evsel = evsel;
554 memcpy(al->br_cntr, ¬es->branch->br_cntr[offset * br_cntr_nr],
555 br_cntr_nr * sizeof(u64));
556 }
557 }
558 }
559
560 if (err) {
561 while (++offset < (s64)size) {
562 struct cyc_hist *ch = ¬es->branch->cycles_hist[offset];
563
564 if (ch && ch->cycles) {
565 struct annotation_line *al;
566
567 al = annotated_source__get_line(notes->src, offset);
568 if (al) {
569 zfree(&al->cycles);
570 zfree(&al->br_cntr);
571 }
572 }
573 }
574 }
575
576 annotation__unlock(notes);
577 return 0;
578 }
579
addr_map_symbol__inc_samples(struct addr_map_symbol * ams,struct perf_sample * sample,struct evsel * evsel)580 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
581 struct evsel *evsel)
582 {
583 return symbol__inc_addr_samples(&ams->ms, evsel, ams->al_addr, sample);
584 }
585
hist_entry__inc_addr_samples(struct hist_entry * he,struct perf_sample * sample,struct evsel * evsel,u64 ip)586 int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
587 struct evsel *evsel, u64 ip)
588 {
589 return symbol__inc_addr_samples(&he->ms, evsel, ip, sample);
590 }
591
592
annotation__exit(struct annotation * notes)593 void annotation__exit(struct annotation *notes)
594 {
595 annotated_source__delete(notes->src);
596 annotated_branch__delete(notes->branch);
597 }
598
599 static struct sharded_mutex *sharded_mutex;
600
annotation__init_sharded_mutex(void)601 static void annotation__init_sharded_mutex(void)
602 {
603 /* As many mutexes as there are CPUs. */
604 sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu);
605 }
606
annotation__hash(const struct annotation * notes)607 static size_t annotation__hash(const struct annotation *notes)
608 {
609 return (size_t)notes;
610 }
611
annotation__get_mutex(const struct annotation * notes)612 static struct mutex *annotation__get_mutex(const struct annotation *notes)
613 {
614 static pthread_once_t once = PTHREAD_ONCE_INIT;
615
616 pthread_once(&once, annotation__init_sharded_mutex);
617 if (!sharded_mutex)
618 return NULL;
619
620 return sharded_mutex__get_mutex(sharded_mutex, annotation__hash(notes));
621 }
622
annotation__lock(struct annotation * notes)623 void annotation__lock(struct annotation *notes)
624 NO_THREAD_SAFETY_ANALYSIS
625 {
626 struct mutex *mutex = annotation__get_mutex(notes);
627
628 if (mutex)
629 mutex_lock(mutex);
630 }
631
annotation__unlock(struct annotation * notes)632 void annotation__unlock(struct annotation *notes)
633 NO_THREAD_SAFETY_ANALYSIS
634 {
635 struct mutex *mutex = annotation__get_mutex(notes);
636
637 if (mutex)
638 mutex_unlock(mutex);
639 }
640
annotation__trylock(struct annotation * notes)641 bool annotation__trylock(struct annotation *notes)
642 {
643 struct mutex *mutex = annotation__get_mutex(notes);
644
645 if (!mutex)
646 return false;
647
648 return mutex_trylock(mutex);
649 }
650
annotation_line__add(struct annotation_line * al,struct list_head * head)651 void annotation_line__add(struct annotation_line *al, struct list_head *head)
652 {
653 list_add_tail(&al->node, head);
654 }
655
656 struct annotation_line *
annotation_line__next(struct annotation_line * pos,struct list_head * head)657 annotation_line__next(struct annotation_line *pos, struct list_head *head)
658 {
659 list_for_each_entry_continue(pos, head, node)
660 if (pos->offset >= 0)
661 return pos;
662
663 return NULL;
664 }
665
annotate__address_color(struct block_range * br)666 static const char *annotate__address_color(struct block_range *br)
667 {
668 double cov = block_range__coverage(br);
669
670 if (cov >= 0) {
671 /* mark red for >75% coverage */
672 if (cov > 0.75)
673 return PERF_COLOR_RED;
674
675 /* mark dull for <1% coverage */
676 if (cov < 0.01)
677 return PERF_COLOR_NORMAL;
678 }
679
680 return PERF_COLOR_MAGENTA;
681 }
682
annotate__asm_color(struct block_range * br)683 static const char *annotate__asm_color(struct block_range *br)
684 {
685 double cov = block_range__coverage(br);
686
687 if (cov >= 0) {
688 /* mark dull for <1% coverage */
689 if (cov < 0.01)
690 return PERF_COLOR_NORMAL;
691 }
692
693 return PERF_COLOR_BLUE;
694 }
695
annotate__branch_printf(struct block_range * br,u64 addr)696 static void annotate__branch_printf(struct block_range *br, u64 addr)
697 {
698 bool emit_comment = true;
699
700 if (!br)
701 return;
702
703 #if 1
704 if (br->is_target && br->start == addr) {
705 struct block_range *branch = br;
706 double p;
707
708 /*
709 * Find matching branch to our target.
710 */
711 while (!branch->is_branch)
712 branch = block_range__next(branch);
713
714 p = 100 *(double)br->entry / branch->coverage;
715
716 if (p > 0.1) {
717 if (emit_comment) {
718 emit_comment = false;
719 printf("\t#");
720 }
721
722 /*
723 * The percentage of coverage joined at this target in relation
724 * to the next branch.
725 */
726 printf(" +%.2f%%", p);
727 }
728 }
729 #endif
730 if (br->is_branch && br->end == addr) {
731 double p = 100*(double)br->taken / br->coverage;
732
733 if (p > 0.1) {
734 if (emit_comment) {
735 emit_comment = false;
736 printf("\t#");
737 }
738
739 /*
740 * The percentage of coverage leaving at this branch, and
741 * its prediction ratio.
742 */
743 printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken);
744 }
745 }
746 }
747
disasm_line__print(struct disasm_line * dl,u64 start,int addr_fmt_width)748 static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width)
749 {
750 s64 offset = dl->al.offset;
751 const u64 addr = start + offset;
752 struct block_range *br;
753
754 br = block_range__find(addr);
755 color_fprintf(stdout, annotate__address_color(br), " %*" PRIx64 ":", addr_fmt_width, addr);
756 color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line);
757 annotate__branch_printf(br, addr);
758 return 0;
759 }
760
761 static int
annotation_line__print(struct annotation_line * al,struct symbol * sym,u64 start,struct evsel * evsel,u64 len,int min_pcnt,int printed,int max_lines,struct annotation_line * queue,int addr_fmt_width,int percent_type)762 annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start,
763 struct evsel *evsel, u64 len, int min_pcnt, int printed,
764 int max_lines, struct annotation_line *queue, int addr_fmt_width,
765 int percent_type)
766 {
767 struct disasm_line *dl = container_of(al, struct disasm_line, al);
768 struct annotation *notes = symbol__annotation(sym);
769 static const char *prev_line;
770
771 if (al->offset != -1) {
772 double max_percent = 0.0;
773 int i, nr_percent = 1;
774 const char *color;
775
776 for (i = 0; i < al->data_nr; i++) {
777 double percent;
778
779 percent = annotation_data__percent(&al->data[i],
780 percent_type);
781
782 if (percent > max_percent)
783 max_percent = percent;
784 }
785
786 if (al->data_nr > nr_percent)
787 nr_percent = al->data_nr;
788
789 if (max_percent < min_pcnt)
790 return -1;
791
792 if (max_lines && printed >= max_lines)
793 return 1;
794
795 if (queue != NULL) {
796 list_for_each_entry_from(queue, ¬es->src->source, node) {
797 if (queue == al)
798 break;
799 annotation_line__print(queue, sym, start, evsel, len,
800 0, 0, 1, NULL, addr_fmt_width,
801 percent_type);
802 }
803 }
804
805 color = get_percent_color(max_percent);
806
807 for (i = 0; i < nr_percent; i++) {
808 struct annotation_data *data = &al->data[i];
809 double percent;
810
811 percent = annotation_data__percent(data, percent_type);
812 color = get_percent_color(percent);
813
814 if (symbol_conf.show_total_period)
815 color_fprintf(stdout, color, " %11" PRIu64,
816 data->he.period);
817 else if (symbol_conf.show_nr_samples)
818 color_fprintf(stdout, color, " %7" PRIu64,
819 data->he.nr_samples);
820 else
821 color_fprintf(stdout, color, " %7.2f", percent);
822 }
823
824 printf(" : ");
825
826 disasm_line__print(dl, start, addr_fmt_width);
827
828 /*
829 * Also color the filename and line if needed, with
830 * the same color than the percentage. Don't print it
831 * twice for close colored addr with the same filename:line
832 */
833 if (al->path) {
834 if (!prev_line || strcmp(prev_line, al->path)) {
835 color_fprintf(stdout, color, " // %s", al->path);
836 prev_line = al->path;
837 }
838 }
839
840 printf("\n");
841 } else if (max_lines && printed >= max_lines)
842 return 1;
843 else {
844 int width = annotation__pcnt_width(notes);
845
846 if (queue)
847 return -1;
848
849 if (!*al->line)
850 printf(" %*s:\n", width, " ");
851 else
852 printf(" %*s: %-*d %s\n", width, " ", addr_fmt_width, al->line_nr, al->line);
853 }
854
855 return 0;
856 }
857
calc_percent(struct annotation * notes,struct evsel * evsel,struct annotation_data * data,s64 offset,s64 end)858 static void calc_percent(struct annotation *notes,
859 struct evsel *evsel,
860 struct annotation_data *data,
861 s64 offset, s64 end)
862 {
863 struct hists *hists = evsel__hists(evsel);
864 int evidx = evsel->core.idx;
865 struct sym_hist *sym_hist = annotation__histogram(notes, evidx);
866 unsigned int hits = 0;
867 u64 period = 0;
868
869 while (offset < end) {
870 struct sym_hist_entry *entry;
871
872 entry = annotated_source__hist_entry(notes->src, evidx, offset);
873 if (entry) {
874 hits += entry->nr_samples;
875 period += entry->period;
876 }
877 ++offset;
878 }
879
880 if (sym_hist->nr_samples) {
881 data->he.period = period;
882 data->he.nr_samples = hits;
883 data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples;
884 }
885
886 if (hists->stats.nr_non_filtered_samples)
887 data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples;
888
889 if (sym_hist->period)
890 data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period;
891
892 if (hists->stats.total_period)
893 data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period;
894 }
895
annotation__calc_percent(struct annotation * notes,struct evsel * leader,s64 len)896 static void annotation__calc_percent(struct annotation *notes,
897 struct evsel *leader, s64 len)
898 {
899 struct annotation_line *al, *next;
900 struct evsel *evsel;
901
902 list_for_each_entry(al, ¬es->src->source, node) {
903 s64 end;
904 int i = 0;
905
906 if (al->offset == -1)
907 continue;
908
909 next = annotation_line__next(al, ¬es->src->source);
910 end = next ? next->offset : len;
911
912 for_each_group_evsel(evsel, leader) {
913 struct annotation_data *data;
914
915 BUG_ON(i >= al->data_nr);
916
917 if (symbol_conf.skip_empty &&
918 evsel__hists(evsel)->stats.nr_samples == 0)
919 continue;
920
921 data = &al->data[i++];
922
923 calc_percent(notes, evsel, data, al->offset, end);
924 }
925 }
926 }
927
symbol__calc_percent(struct symbol * sym,struct evsel * evsel)928 void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
929 {
930 struct annotation *notes = symbol__annotation(sym);
931
932 annotation__calc_percent(notes, evsel, symbol__size(sym));
933 }
934
evsel__get_arch(struct evsel * evsel,struct arch ** parch)935 static int evsel__get_arch(struct evsel *evsel, struct arch **parch)
936 {
937 struct perf_env *env = evsel__env(evsel);
938 const char *arch_name = perf_env__arch(env);
939 struct arch *arch;
940 int err;
941
942 if (!arch_name) {
943 *parch = NULL;
944 return errno;
945 }
946
947 *parch = arch = arch__find(arch_name);
948 if (arch == NULL) {
949 pr_err("%s: unsupported arch %s\n", __func__, arch_name);
950 return ENOTSUP;
951 }
952
953 if (arch->init) {
954 err = arch->init(arch, env ? env->cpuid : NULL);
955 if (err) {
956 pr_err("%s: failed to initialize %s arch priv area\n",
957 __func__, arch->name);
958 return err;
959 }
960 }
961 return 0;
962 }
963
symbol__annotate(struct map_symbol * ms,struct evsel * evsel,struct arch ** parch)964 int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
965 struct arch **parch)
966 {
967 struct symbol *sym = ms->sym;
968 struct annotation *notes = symbol__annotation(sym);
969 struct annotate_args args = {
970 .evsel = evsel,
971 .options = &annotate_opts,
972 };
973 struct arch *arch = NULL;
974 int err, nr;
975
976 err = evsel__get_arch(evsel, &arch);
977 if (err < 0)
978 return err;
979
980 if (parch)
981 *parch = arch;
982
983 if (notes->src && !list_empty(¬es->src->source))
984 return 0;
985
986 args.arch = arch;
987 args.ms = *ms;
988
989 if (notes->src == NULL) {
990 notes->src = annotated_source__new();
991 if (notes->src == NULL)
992 return -1;
993 }
994
995 nr = 0;
996 if (evsel__is_group_event(evsel)) {
997 struct evsel *pos;
998
999 for_each_group_evsel(pos, evsel) {
1000 if (symbol_conf.skip_empty &&
1001 evsel__hists(pos)->stats.nr_samples == 0)
1002 continue;
1003 nr++;
1004 }
1005 }
1006 notes->src->nr_events = nr ? nr : 1;
1007
1008 if (annotate_opts.full_addr)
1009 notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
1010 else
1011 notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
1012
1013 return symbol__disassemble(sym, &args);
1014 }
1015
insert_source_line(struct rb_root * root,struct annotation_line * al)1016 static void insert_source_line(struct rb_root *root, struct annotation_line *al)
1017 {
1018 struct annotation_line *iter;
1019 struct rb_node **p = &root->rb_node;
1020 struct rb_node *parent = NULL;
1021 unsigned int percent_type = annotate_opts.percent_type;
1022 int i, ret;
1023
1024 while (*p != NULL) {
1025 parent = *p;
1026 iter = rb_entry(parent, struct annotation_line, rb_node);
1027
1028 ret = strcmp(iter->path, al->path);
1029 if (ret == 0) {
1030 for (i = 0; i < al->data_nr; i++) {
1031 iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
1032 percent_type);
1033 }
1034 return;
1035 }
1036
1037 if (ret < 0)
1038 p = &(*p)->rb_left;
1039 else
1040 p = &(*p)->rb_right;
1041 }
1042
1043 for (i = 0; i < al->data_nr; i++) {
1044 al->data[i].percent_sum = annotation_data__percent(&al->data[i],
1045 percent_type);
1046 }
1047
1048 rb_link_node(&al->rb_node, parent, p);
1049 rb_insert_color(&al->rb_node, root);
1050 }
1051
cmp_source_line(struct annotation_line * a,struct annotation_line * b)1052 static int cmp_source_line(struct annotation_line *a, struct annotation_line *b)
1053 {
1054 int i;
1055
1056 for (i = 0; i < a->data_nr; i++) {
1057 if (a->data[i].percent_sum == b->data[i].percent_sum)
1058 continue;
1059 return a->data[i].percent_sum > b->data[i].percent_sum;
1060 }
1061
1062 return 0;
1063 }
1064
__resort_source_line(struct rb_root * root,struct annotation_line * al)1065 static void __resort_source_line(struct rb_root *root, struct annotation_line *al)
1066 {
1067 struct annotation_line *iter;
1068 struct rb_node **p = &root->rb_node;
1069 struct rb_node *parent = NULL;
1070
1071 while (*p != NULL) {
1072 parent = *p;
1073 iter = rb_entry(parent, struct annotation_line, rb_node);
1074
1075 if (cmp_source_line(al, iter))
1076 p = &(*p)->rb_left;
1077 else
1078 p = &(*p)->rb_right;
1079 }
1080
1081 rb_link_node(&al->rb_node, parent, p);
1082 rb_insert_color(&al->rb_node, root);
1083 }
1084
resort_source_line(struct rb_root * dest_root,struct rb_root * src_root)1085 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
1086 {
1087 struct annotation_line *al;
1088 struct rb_node *node;
1089
1090 node = rb_first(src_root);
1091 while (node) {
1092 struct rb_node *next;
1093
1094 al = rb_entry(node, struct annotation_line, rb_node);
1095 next = rb_next(node);
1096 rb_erase(node, src_root);
1097
1098 __resort_source_line(dest_root, al);
1099 node = next;
1100 }
1101 }
1102
print_summary(struct rb_root * root,const char * filename)1103 static void print_summary(struct rb_root *root, const char *filename)
1104 {
1105 struct annotation_line *al;
1106 struct rb_node *node;
1107
1108 printf("\nSorted summary for file %s\n", filename);
1109 printf("----------------------------------------------\n\n");
1110
1111 if (RB_EMPTY_ROOT(root)) {
1112 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
1113 return;
1114 }
1115
1116 node = rb_first(root);
1117 while (node) {
1118 double percent, percent_max = 0.0;
1119 const char *color;
1120 char *path;
1121 int i;
1122
1123 al = rb_entry(node, struct annotation_line, rb_node);
1124 for (i = 0; i < al->data_nr; i++) {
1125 percent = al->data[i].percent_sum;
1126 color = get_percent_color(percent);
1127 color_fprintf(stdout, color, " %7.2f", percent);
1128
1129 if (percent > percent_max)
1130 percent_max = percent;
1131 }
1132
1133 path = al->path;
1134 color = get_percent_color(percent_max);
1135 color_fprintf(stdout, color, " %s\n", path);
1136
1137 node = rb_next(node);
1138 }
1139 }
1140
symbol__annotate_hits(struct symbol * sym,struct evsel * evsel)1141 static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel)
1142 {
1143 int evidx = evsel->core.idx;
1144 struct annotation *notes = symbol__annotation(sym);
1145 struct sym_hist *h = annotation__histogram(notes, evidx);
1146 u64 len = symbol__size(sym), offset;
1147
1148 for (offset = 0; offset < len; ++offset) {
1149 struct sym_hist_entry *entry;
1150
1151 entry = annotated_source__hist_entry(notes->src, evidx, offset);
1152 if (entry && entry->nr_samples != 0)
1153 printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
1154 sym->start + offset, entry->nr_samples);
1155 }
1156 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
1157 }
1158
annotated_source__addr_fmt_width(struct list_head * lines,u64 start)1159 static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
1160 {
1161 char bf[32];
1162 struct annotation_line *line;
1163
1164 list_for_each_entry_reverse(line, lines, node) {
1165 if (line->offset != -1)
1166 return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset);
1167 }
1168
1169 return 0;
1170 }
1171
symbol__annotate_printf(struct map_symbol * ms,struct evsel * evsel)1172 int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel)
1173 {
1174 struct map *map = ms->map;
1175 struct symbol *sym = ms->sym;
1176 struct dso *dso = map__dso(map);
1177 char *filename;
1178 const char *d_filename;
1179 const char *evsel_name = evsel__name(evsel);
1180 struct annotation *notes = symbol__annotation(sym);
1181 struct sym_hist *h = annotation__histogram(notes, evsel->core.idx);
1182 struct annotation_line *pos, *queue = NULL;
1183 struct annotation_options *opts = &annotate_opts;
1184 u64 start = map__rip_2objdump(map, sym->start);
1185 int printed = 2, queue_len = 0, addr_fmt_width;
1186 int more = 0;
1187 bool context = opts->context;
1188 u64 len;
1189 int width = annotation__pcnt_width(notes);
1190 int graph_dotted_len;
1191 char buf[512];
1192
1193 filename = strdup(dso__long_name(dso));
1194 if (!filename)
1195 return -ENOMEM;
1196
1197 if (opts->full_path)
1198 d_filename = filename;
1199 else
1200 d_filename = basename(filename);
1201
1202 len = symbol__size(sym);
1203
1204 if (evsel__is_group_event(evsel)) {
1205 evsel__group_desc(evsel, buf, sizeof(buf));
1206 evsel_name = buf;
1207 }
1208
1209 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples, "
1210 "percent: %s)\n",
1211 width, width, symbol_conf.show_total_period ? "Period" :
1212 symbol_conf.show_nr_samples ? "Samples" : "Percent",
1213 d_filename, evsel_name, h->nr_samples,
1214 percent_type_str(opts->percent_type));
1215
1216 printf("%-*.*s----\n",
1217 graph_dotted_len, graph_dotted_len, graph_dotted_line);
1218
1219 if (verbose > 0)
1220 symbol__annotate_hits(sym, evsel);
1221
1222 addr_fmt_width = annotated_source__addr_fmt_width(¬es->src->source, start);
1223
1224 list_for_each_entry(pos, ¬es->src->source, node) {
1225 int err;
1226
1227 if (context && queue == NULL) {
1228 queue = pos;
1229 queue_len = 0;
1230 }
1231
1232 err = annotation_line__print(pos, sym, start, evsel, len,
1233 opts->min_pcnt, printed, opts->max_lines,
1234 queue, addr_fmt_width, opts->percent_type);
1235
1236 switch (err) {
1237 case 0:
1238 ++printed;
1239 if (context) {
1240 printed += queue_len;
1241 queue = NULL;
1242 queue_len = 0;
1243 }
1244 break;
1245 case 1:
1246 /* filtered by max_lines */
1247 ++more;
1248 break;
1249 case -1:
1250 default:
1251 /*
1252 * Filtered by min_pcnt or non IP lines when
1253 * context != 0
1254 */
1255 if (!context)
1256 break;
1257 if (queue_len == context)
1258 queue = list_entry(queue->node.next, typeof(*queue), node);
1259 else
1260 ++queue_len;
1261 break;
1262 }
1263 }
1264
1265 free(filename);
1266
1267 return more;
1268 }
1269
FILE__set_percent_color(void * fp __maybe_unused,double percent __maybe_unused,bool current __maybe_unused)1270 static void FILE__set_percent_color(void *fp __maybe_unused,
1271 double percent __maybe_unused,
1272 bool current __maybe_unused)
1273 {
1274 }
1275
FILE__set_jumps_percent_color(void * fp __maybe_unused,int nr __maybe_unused,bool current __maybe_unused)1276 static int FILE__set_jumps_percent_color(void *fp __maybe_unused,
1277 int nr __maybe_unused, bool current __maybe_unused)
1278 {
1279 return 0;
1280 }
1281
FILE__set_color(void * fp __maybe_unused,int color __maybe_unused)1282 static int FILE__set_color(void *fp __maybe_unused, int color __maybe_unused)
1283 {
1284 return 0;
1285 }
1286
FILE__printf(void * fp,const char * fmt,...)1287 static void FILE__printf(void *fp, const char *fmt, ...)
1288 {
1289 va_list args;
1290
1291 va_start(args, fmt);
1292 vfprintf(fp, fmt, args);
1293 va_end(args);
1294 }
1295
FILE__write_graph(void * fp,int graph)1296 static void FILE__write_graph(void *fp, int graph)
1297 {
1298 const char *s;
1299 switch (graph) {
1300
1301 case DARROW_CHAR: s = "↓"; break;
1302 case UARROW_CHAR: s = "↑"; break;
1303 case LARROW_CHAR: s = "←"; break;
1304 case RARROW_CHAR: s = "→"; break;
1305 default: s = "?"; break;
1306 }
1307
1308 fputs(s, fp);
1309 }
1310
symbol__annotate_fprintf2(struct symbol * sym,FILE * fp)1311 static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp)
1312 {
1313 struct annotation *notes = symbol__annotation(sym);
1314 struct annotation_write_ops wops = {
1315 .first_line = true,
1316 .obj = fp,
1317 .set_color = FILE__set_color,
1318 .set_percent_color = FILE__set_percent_color,
1319 .set_jumps_percent_color = FILE__set_jumps_percent_color,
1320 .printf = FILE__printf,
1321 .write_graph = FILE__write_graph,
1322 };
1323 struct annotation_line *al;
1324
1325 list_for_each_entry(al, ¬es->src->source, node) {
1326 if (annotation_line__filter(al))
1327 continue;
1328 annotation_line__write(al, notes, &wops);
1329 fputc('\n', fp);
1330 wops.first_line = false;
1331 }
1332
1333 return 0;
1334 }
1335
map_symbol__annotation_dump(struct map_symbol * ms,struct evsel * evsel)1336 int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel)
1337 {
1338 const char *ev_name = evsel__name(evsel);
1339 char buf[1024];
1340 char *filename;
1341 int err = -1;
1342 FILE *fp;
1343
1344 if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0)
1345 return -1;
1346
1347 fp = fopen(filename, "w");
1348 if (fp == NULL)
1349 goto out_free_filename;
1350
1351 if (evsel__is_group_event(evsel)) {
1352 evsel__group_desc(evsel, buf, sizeof(buf));
1353 ev_name = buf;
1354 }
1355
1356 fprintf(fp, "%s() %s\nEvent: %s\n\n",
1357 ms->sym->name, dso__long_name(map__dso(ms->map)), ev_name);
1358 symbol__annotate_fprintf2(ms->sym, fp);
1359
1360 fclose(fp);
1361 err = 0;
1362 out_free_filename:
1363 free(filename);
1364 return err;
1365 }
1366
symbol__annotate_zero_histogram(struct symbol * sym,int evidx)1367 void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
1368 {
1369 struct annotation *notes = symbol__annotation(sym);
1370 struct sym_hist *h = annotation__histogram(notes, evidx);
1371
1372 memset(h, 0, sizeof(*notes->src->histograms) * notes->src->nr_histograms);
1373 }
1374
symbol__annotate_decay_histogram(struct symbol * sym,int evidx)1375 void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
1376 {
1377 struct annotation *notes = symbol__annotation(sym);
1378 struct sym_hist *h = annotation__histogram(notes, evidx);
1379 struct annotation_line *al;
1380
1381 h->nr_samples = 0;
1382 list_for_each_entry(al, ¬es->src->source, node) {
1383 struct sym_hist_entry *entry;
1384
1385 if (al->offset == -1)
1386 continue;
1387
1388 entry = annotated_source__hist_entry(notes->src, evidx, al->offset);
1389 if (entry == NULL)
1390 continue;
1391
1392 entry->nr_samples = entry->nr_samples * 7 / 8;
1393 h->nr_samples += entry->nr_samples;
1394 }
1395 }
1396
annotated_source__purge(struct annotated_source * as)1397 void annotated_source__purge(struct annotated_source *as)
1398 {
1399 struct annotation_line *al, *n;
1400
1401 list_for_each_entry_safe(al, n, &as->source, node) {
1402 list_del_init(&al->node);
1403 disasm_line__free(disasm_line(al));
1404 }
1405 }
1406
disasm_line__fprintf(struct disasm_line * dl,FILE * fp)1407 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
1408 {
1409 size_t printed;
1410
1411 if (dl->al.offset == -1)
1412 return fprintf(fp, "%s\n", dl->al.line);
1413
1414 printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name);
1415
1416 if (dl->ops.raw[0] != '\0') {
1417 printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
1418 dl->ops.raw);
1419 }
1420
1421 return printed + fprintf(fp, "\n");
1422 }
1423
disasm__fprintf(struct list_head * head,FILE * fp)1424 size_t disasm__fprintf(struct list_head *head, FILE *fp)
1425 {
1426 struct disasm_line *pos;
1427 size_t printed = 0;
1428
1429 list_for_each_entry(pos, head, al.node)
1430 printed += disasm_line__fprintf(pos, fp);
1431
1432 return printed;
1433 }
1434
disasm_line__is_valid_local_jump(struct disasm_line * dl,struct symbol * sym)1435 bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym)
1436 {
1437 if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins) ||
1438 !disasm_line__has_local_offset(dl) || dl->ops.target.offset < 0 ||
1439 dl->ops.target.offset >= (s64)symbol__size(sym))
1440 return false;
1441
1442 return true;
1443 }
1444
1445 static void
annotation__mark_jump_targets(struct annotation * notes,struct symbol * sym)1446 annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
1447 {
1448 struct annotation_line *al;
1449
1450 /* PLT symbols contain external offsets */
1451 if (strstr(sym->name, "@plt"))
1452 return;
1453
1454 list_for_each_entry(al, ¬es->src->source, node) {
1455 struct disasm_line *dl;
1456 struct annotation_line *target;
1457
1458 dl = disasm_line(al);
1459
1460 if (!disasm_line__is_valid_local_jump(dl, sym))
1461 continue;
1462
1463 target = annotated_source__get_line(notes->src,
1464 dl->ops.target.offset);
1465 /*
1466 * FIXME: Oops, no jump target? Buggy disassembler? Or do we
1467 * have to adjust to the previous offset?
1468 */
1469 if (target == NULL)
1470 continue;
1471
1472 if (++target->jump_sources > notes->src->max_jump_sources)
1473 notes->src->max_jump_sources = target->jump_sources;
1474 }
1475 }
1476
annotation__set_index(struct annotation * notes)1477 static void annotation__set_index(struct annotation *notes)
1478 {
1479 struct annotation_line *al;
1480 struct annotated_source *src = notes->src;
1481
1482 src->widths.max_line_len = 0;
1483 src->nr_entries = 0;
1484 src->nr_asm_entries = 0;
1485
1486 list_for_each_entry(al, &src->source, node) {
1487 size_t line_len = strlen(al->line);
1488
1489 if (src->widths.max_line_len < line_len)
1490 src->widths.max_line_len = line_len;
1491 al->idx = src->nr_entries++;
1492 if (al->offset != -1)
1493 al->idx_asm = src->nr_asm_entries++;
1494 else
1495 al->idx_asm = -1;
1496 }
1497 }
1498
width_jumps(int n)1499 static inline int width_jumps(int n)
1500 {
1501 if (n >= 100)
1502 return 5;
1503 if (n / 10)
1504 return 2;
1505 return 1;
1506 }
1507
annotation__max_ins_name(struct annotation * notes)1508 static int annotation__max_ins_name(struct annotation *notes)
1509 {
1510 int max_name = 0, len;
1511 struct annotation_line *al;
1512
1513 list_for_each_entry(al, ¬es->src->source, node) {
1514 if (al->offset == -1)
1515 continue;
1516
1517 len = strlen(disasm_line(al)->ins.name);
1518 if (max_name < len)
1519 max_name = len;
1520 }
1521
1522 return max_name;
1523 }
1524
1525 static void
annotation__init_column_widths(struct annotation * notes,struct symbol * sym)1526 annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
1527 {
1528 notes->src->widths.addr = notes->src->widths.target =
1529 notes->src->widths.min_addr = hex_width(symbol__size(sym));
1530 notes->src->widths.max_addr = hex_width(sym->end);
1531 notes->src->widths.jumps = width_jumps(notes->src->max_jump_sources);
1532 notes->src->widths.max_ins_name = annotation__max_ins_name(notes);
1533 }
1534
annotation__update_column_widths(struct annotation * notes)1535 void annotation__update_column_widths(struct annotation *notes)
1536 {
1537 if (annotate_opts.use_offset)
1538 notes->src->widths.target = notes->src->widths.min_addr;
1539 else if (annotate_opts.full_addr)
1540 notes->src->widths.target = BITS_PER_LONG / 4;
1541 else
1542 notes->src->widths.target = notes->src->widths.max_addr;
1543
1544 notes->src->widths.addr = notes->src->widths.target;
1545
1546 if (annotate_opts.show_nr_jumps)
1547 notes->src->widths.addr += notes->src->widths.jumps + 1;
1548 }
1549
annotation__toggle_full_addr(struct annotation * notes,struct map_symbol * ms)1550 void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms)
1551 {
1552 annotate_opts.full_addr = !annotate_opts.full_addr;
1553
1554 if (annotate_opts.full_addr)
1555 notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
1556 else
1557 notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
1558
1559 annotation__update_column_widths(notes);
1560 }
1561
annotation__calc_lines(struct annotation * notes,struct map_symbol * ms,struct rb_root * root)1562 static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms,
1563 struct rb_root *root)
1564 {
1565 struct annotation_line *al;
1566 struct rb_root tmp_root = RB_ROOT;
1567
1568 list_for_each_entry(al, ¬es->src->source, node) {
1569 double percent_max = 0.0;
1570 u64 addr;
1571 int i;
1572
1573 for (i = 0; i < al->data_nr; i++) {
1574 double percent;
1575
1576 percent = annotation_data__percent(&al->data[i],
1577 annotate_opts.percent_type);
1578
1579 if (percent > percent_max)
1580 percent_max = percent;
1581 }
1582
1583 if (percent_max <= 0.5)
1584 continue;
1585
1586 addr = map__rip_2objdump(ms->map, ms->sym->start);
1587 al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL,
1588 false, true, ms->sym->start + al->offset);
1589 insert_source_line(&tmp_root, al);
1590 }
1591
1592 resort_source_line(root, &tmp_root);
1593 }
1594
symbol__calc_lines(struct map_symbol * ms,struct rb_root * root)1595 static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root)
1596 {
1597 struct annotation *notes = symbol__annotation(ms->sym);
1598
1599 annotation__calc_lines(notes, ms, root);
1600 }
1601
symbol__tty_annotate2(struct map_symbol * ms,struct evsel * evsel)1602 int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel)
1603 {
1604 struct dso *dso = map__dso(ms->map);
1605 struct symbol *sym = ms->sym;
1606 struct rb_root source_line = RB_ROOT;
1607 struct hists *hists = evsel__hists(evsel);
1608 char buf[1024];
1609 int err;
1610
1611 err = symbol__annotate2(ms, evsel, NULL);
1612 if (err) {
1613 char msg[BUFSIZ];
1614
1615 dso__set_annotate_warned(dso);
1616 symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
1617 ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
1618 return -1;
1619 }
1620
1621 if (annotate_opts.print_lines) {
1622 srcline_full_filename = annotate_opts.full_path;
1623 symbol__calc_lines(ms, &source_line);
1624 print_summary(&source_line, dso__long_name(dso));
1625 }
1626
1627 hists__scnprintf_title(hists, buf, sizeof(buf));
1628 fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
1629 buf, percent_type_str(annotate_opts.percent_type), sym->name, dso__long_name(dso));
1630 symbol__annotate_fprintf2(sym, stdout);
1631
1632 annotated_source__purge(symbol__annotation(sym)->src);
1633
1634 return 0;
1635 }
1636
symbol__tty_annotate(struct map_symbol * ms,struct evsel * evsel)1637 int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel)
1638 {
1639 struct dso *dso = map__dso(ms->map);
1640 struct symbol *sym = ms->sym;
1641 struct rb_root source_line = RB_ROOT;
1642 int err;
1643
1644 err = symbol__annotate(ms, evsel, NULL);
1645 if (err) {
1646 char msg[BUFSIZ];
1647
1648 dso__set_annotate_warned(dso);
1649 symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
1650 ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
1651 return -1;
1652 }
1653
1654 symbol__calc_percent(sym, evsel);
1655
1656 if (annotate_opts.print_lines) {
1657 srcline_full_filename = annotate_opts.full_path;
1658 symbol__calc_lines(ms, &source_line);
1659 print_summary(&source_line, dso__long_name(dso));
1660 }
1661
1662 symbol__annotate_printf(ms, evsel);
1663
1664 annotated_source__purge(symbol__annotation(sym)->src);
1665
1666 return 0;
1667 }
1668
ui__has_annotation(void)1669 bool ui__has_annotation(void)
1670 {
1671 return use_browser == 1 && perf_hpp_list.sym;
1672 }
1673
1674
annotation_line__max_percent(struct annotation_line * al,unsigned int percent_type)1675 static double annotation_line__max_percent(struct annotation_line *al,
1676 unsigned int percent_type)
1677 {
1678 double percent_max = 0.0;
1679 int i;
1680
1681 for (i = 0; i < al->data_nr; i++) {
1682 double percent;
1683
1684 percent = annotation_data__percent(&al->data[i],
1685 percent_type);
1686
1687 if (percent > percent_max)
1688 percent_max = percent;
1689 }
1690
1691 return percent_max;
1692 }
1693
disasm_line__write(struct disasm_line * dl,struct annotation * notes,void * obj,char * bf,size_t size,void (* obj__printf)(void * obj,const char * fmt,...),void (* obj__write_graph)(void * obj,int graph))1694 static void disasm_line__write(struct disasm_line *dl, struct annotation *notes,
1695 void *obj, char *bf, size_t size,
1696 void (*obj__printf)(void *obj, const char *fmt, ...),
1697 void (*obj__write_graph)(void *obj, int graph))
1698 {
1699 if (dl->ins.ops && dl->ins.ops->scnprintf) {
1700 if (ins__is_jump(&dl->ins)) {
1701 bool fwd;
1702
1703 if (dl->ops.target.outside)
1704 goto call_like;
1705 fwd = dl->ops.target.offset > dl->al.offset;
1706 obj__write_graph(obj, fwd ? DARROW_CHAR : UARROW_CHAR);
1707 obj__printf(obj, " ");
1708 } else if (ins__is_call(&dl->ins)) {
1709 call_like:
1710 obj__write_graph(obj, RARROW_CHAR);
1711 obj__printf(obj, " ");
1712 } else if (ins__is_ret(&dl->ins)) {
1713 obj__write_graph(obj, LARROW_CHAR);
1714 obj__printf(obj, " ");
1715 } else {
1716 obj__printf(obj, " ");
1717 }
1718 } else {
1719 obj__printf(obj, " ");
1720 }
1721
1722 disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset,
1723 notes->src->widths.max_ins_name);
1724 }
1725
ipc_coverage_string(char * bf,int size,struct annotation * notes)1726 static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
1727 {
1728 double ipc = 0.0, coverage = 0.0;
1729 struct annotated_branch *branch = annotation__get_branch(notes);
1730
1731 if (branch && branch->hit_cycles)
1732 ipc = branch->hit_insn / ((double)branch->hit_cycles);
1733
1734 if (branch && branch->total_insn) {
1735 coverage = branch->cover_insn * 100.0 /
1736 ((double)branch->total_insn);
1737 }
1738
1739 scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
1740 ipc, coverage);
1741 }
1742
annotation_br_cntr_abbr_list(char ** str,struct evsel * evsel,bool header)1743 int annotation_br_cntr_abbr_list(char **str, struct evsel *evsel, bool header)
1744 {
1745 struct evsel *pos;
1746 struct strbuf sb;
1747
1748 if (evsel->evlist->nr_br_cntr <= 0)
1749 return -ENOTSUP;
1750
1751 strbuf_init(&sb, /*hint=*/ 0);
1752
1753 if (header && strbuf_addf(&sb, "# Branch counter abbr list:\n"))
1754 goto err;
1755
1756 evlist__for_each_entry(evsel->evlist, pos) {
1757 if (!(pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS))
1758 continue;
1759 if (header && strbuf_addf(&sb, "#"))
1760 goto err;
1761
1762 if (strbuf_addf(&sb, " %s = %s\n", pos->name, pos->abbr_name))
1763 goto err;
1764 }
1765
1766 if (header && strbuf_addf(&sb, "#"))
1767 goto err;
1768 if (strbuf_addf(&sb, " '-' No event occurs\n"))
1769 goto err;
1770
1771 if (header && strbuf_addf(&sb, "#"))
1772 goto err;
1773 if (strbuf_addf(&sb, " '+' Event occurrences may be lost due to branch counter saturated\n"))
1774 goto err;
1775
1776 *str = strbuf_detach(&sb, NULL);
1777
1778 return 0;
1779 err:
1780 strbuf_release(&sb);
1781 return -ENOMEM;
1782 }
1783
1784 /* Assume the branch counter saturated at 3 */
1785 #define ANNOTATION_BR_CNTR_SATURATION 3
1786
annotation_br_cntr_entry(char ** str,int br_cntr_nr,u64 * br_cntr,int num_aggr,struct evsel * evsel)1787 int annotation_br_cntr_entry(char **str, int br_cntr_nr,
1788 u64 *br_cntr, int num_aggr,
1789 struct evsel *evsel)
1790 {
1791 struct evsel *pos = evsel ? evlist__first(evsel->evlist) : NULL;
1792 bool saturated = false;
1793 int i, j, avg, used;
1794 struct strbuf sb;
1795
1796 strbuf_init(&sb, /*hint=*/ 0);
1797 for (i = 0; i < br_cntr_nr; i++) {
1798 used = 0;
1799 avg = ceil((double)(br_cntr[i] & ~ANNOTATION__BR_CNTR_SATURATED_FLAG) /
1800 (double)num_aggr);
1801
1802 /*
1803 * A histogram with the abbr name is displayed by default.
1804 * With -v, the exact number of branch counter is displayed.
1805 */
1806 if (verbose) {
1807 evlist__for_each_entry_from(evsel->evlist, pos) {
1808 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
1809 (pos->br_cntr_idx == i))
1810 break;
1811 }
1812 if (strbuf_addstr(&sb, pos->abbr_name))
1813 goto err;
1814
1815 if (!br_cntr[i]) {
1816 if (strbuf_addstr(&sb, "=-"))
1817 goto err;
1818 } else {
1819 if (strbuf_addf(&sb, "=%d", avg))
1820 goto err;
1821 }
1822 if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG) {
1823 if (strbuf_addch(&sb, '+'))
1824 goto err;
1825 } else {
1826 if (strbuf_addch(&sb, ' '))
1827 goto err;
1828 }
1829
1830 if ((i < br_cntr_nr - 1) && strbuf_addch(&sb, ','))
1831 goto err;
1832 continue;
1833 }
1834
1835 if (strbuf_addch(&sb, '|'))
1836 goto err;
1837
1838 if (!br_cntr[i]) {
1839 if (strbuf_addch(&sb, '-'))
1840 goto err;
1841 used++;
1842 } else {
1843 evlist__for_each_entry_from(evsel->evlist, pos) {
1844 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
1845 (pos->br_cntr_idx == i))
1846 break;
1847 }
1848 if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG)
1849 saturated = true;
1850
1851 for (j = 0; j < avg; j++, used++) {
1852 /* Print + if the number of logged events > 3 */
1853 if (j >= ANNOTATION_BR_CNTR_SATURATION) {
1854 saturated = true;
1855 break;
1856 }
1857 if (strbuf_addstr(&sb, pos->abbr_name))
1858 goto err;
1859 }
1860
1861 if (saturated) {
1862 if (strbuf_addch(&sb, '+'))
1863 goto err;
1864 used++;
1865 }
1866 pos = list_next_entry(pos, core.node);
1867 }
1868
1869 for (j = used; j < ANNOTATION_BR_CNTR_SATURATION + 1; j++) {
1870 if (strbuf_addch(&sb, ' '))
1871 goto err;
1872 }
1873 }
1874
1875 if (!verbose && strbuf_addch(&sb, br_cntr_nr ? '|' : ' '))
1876 goto err;
1877
1878 *str = strbuf_detach(&sb, NULL);
1879
1880 return 0;
1881 err:
1882 strbuf_release(&sb);
1883 return -ENOMEM;
1884 }
1885
__annotation_line__write(struct annotation_line * al,struct annotation * notes,bool first_line,bool current_entry,bool change_color,int width,void * obj,unsigned int percent_type,int (* obj__set_color)(void * obj,int color),void (* obj__set_percent_color)(void * obj,double percent,bool current),int (* obj__set_jumps_percent_color)(void * obj,int nr,bool current),void (* obj__printf)(void * obj,const char * fmt,...),void (* obj__write_graph)(void * obj,int graph))1886 static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
1887 bool first_line, bool current_entry, bool change_color, int width,
1888 void *obj, unsigned int percent_type,
1889 int (*obj__set_color)(void *obj, int color),
1890 void (*obj__set_percent_color)(void *obj, double percent, bool current),
1891 int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current),
1892 void (*obj__printf)(void *obj, const char *fmt, ...),
1893 void (*obj__write_graph)(void *obj, int graph))
1894
1895 {
1896 double percent_max = annotation_line__max_percent(al, percent_type);
1897 int pcnt_width = annotation__pcnt_width(notes),
1898 cycles_width = annotation__cycles_width(notes);
1899 bool show_title = false;
1900 char bf[256];
1901 int printed;
1902
1903 if (first_line && (al->offset == -1 || percent_max == 0.0)) {
1904 if (notes->branch && al->cycles) {
1905 if (al->cycles->ipc == 0.0 && al->cycles->avg == 0)
1906 show_title = true;
1907 } else
1908 show_title = true;
1909 }
1910
1911 if (al->offset != -1 && percent_max != 0.0) {
1912 int i;
1913
1914 for (i = 0; i < al->data_nr; i++) {
1915 double percent;
1916
1917 percent = annotation_data__percent(&al->data[i], percent_type);
1918
1919 obj__set_percent_color(obj, percent, current_entry);
1920 if (symbol_conf.show_total_period) {
1921 obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period);
1922 } else if (symbol_conf.show_nr_samples) {
1923 obj__printf(obj, "%7" PRIu64 " ",
1924 al->data[i].he.nr_samples);
1925 } else {
1926 obj__printf(obj, "%7.2f ", percent);
1927 }
1928 }
1929 } else {
1930 obj__set_percent_color(obj, 0, current_entry);
1931
1932 if (!show_title)
1933 obj__printf(obj, "%-*s", pcnt_width, " ");
1934 else {
1935 obj__printf(obj, "%-*s", pcnt_width,
1936 symbol_conf.show_total_period ? "Period" :
1937 symbol_conf.show_nr_samples ? "Samples" : "Percent");
1938 }
1939 }
1940
1941 if (notes->branch) {
1942 if (al->cycles && al->cycles->ipc)
1943 obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc);
1944 else if (!show_title)
1945 obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " ");
1946 else
1947 obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
1948
1949 if (!annotate_opts.show_minmax_cycle) {
1950 if (al->cycles && al->cycles->avg)
1951 obj__printf(obj, "%*" PRIu64 " ",
1952 ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg);
1953 else if (!show_title)
1954 obj__printf(obj, "%*s",
1955 ANNOTATION__CYCLES_WIDTH, " ");
1956 else
1957 obj__printf(obj, "%*s ",
1958 ANNOTATION__CYCLES_WIDTH - 1,
1959 "Cycle");
1960 } else {
1961 if (al->cycles) {
1962 char str[32];
1963
1964 scnprintf(str, sizeof(str),
1965 "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
1966 al->cycles->avg, al->cycles->min,
1967 al->cycles->max);
1968
1969 obj__printf(obj, "%*s ",
1970 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
1971 str);
1972 } else if (!show_title)
1973 obj__printf(obj, "%*s",
1974 ANNOTATION__MINMAX_CYCLES_WIDTH,
1975 " ");
1976 else
1977 obj__printf(obj, "%*s ",
1978 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
1979 "Cycle(min/max)");
1980 }
1981
1982 if (annotate_opts.show_br_cntr) {
1983 if (show_title) {
1984 obj__printf(obj, "%*s ",
1985 ANNOTATION__BR_CNTR_WIDTH,
1986 "Branch Counter");
1987 } else {
1988 char *buf;
1989
1990 if (!annotation_br_cntr_entry(&buf, al->br_cntr_nr, al->br_cntr,
1991 al->num_aggr, al->evsel)) {
1992 obj__printf(obj, "%*s ", ANNOTATION__BR_CNTR_WIDTH, buf);
1993 free(buf);
1994 }
1995 }
1996 }
1997
1998 if (show_title && !*al->line) {
1999 ipc_coverage_string(bf, sizeof(bf), notes);
2000 obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf);
2001 }
2002 }
2003
2004 obj__printf(obj, " ");
2005
2006 if (!*al->line)
2007 obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " ");
2008 else if (al->offset == -1) {
2009 if (al->line_nr && annotate_opts.show_linenr)
2010 printed = scnprintf(bf, sizeof(bf), "%-*d ",
2011 notes->src->widths.addr + 1, al->line_nr);
2012 else
2013 printed = scnprintf(bf, sizeof(bf), "%-*s ",
2014 notes->src->widths.addr, " ");
2015 obj__printf(obj, bf);
2016 obj__printf(obj, "%-*s", width - printed - pcnt_width - cycles_width + 1, al->line);
2017 } else {
2018 u64 addr = al->offset;
2019 int color = -1;
2020
2021 if (!annotate_opts.use_offset)
2022 addr += notes->src->start;
2023
2024 if (!annotate_opts.use_offset) {
2025 printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
2026 } else {
2027 if (al->jump_sources &&
2028 annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
2029 if (annotate_opts.show_nr_jumps) {
2030 int prev;
2031 printed = scnprintf(bf, sizeof(bf), "%*d ",
2032 notes->src->widths.jumps,
2033 al->jump_sources);
2034 prev = obj__set_jumps_percent_color(obj, al->jump_sources,
2035 current_entry);
2036 obj__printf(obj, bf);
2037 obj__set_color(obj, prev);
2038 }
2039 print_addr:
2040 printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
2041 notes->src->widths.target, addr);
2042 } else if (ins__is_call(&disasm_line(al)->ins) &&
2043 annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) {
2044 goto print_addr;
2045 } else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
2046 goto print_addr;
2047 } else {
2048 printed = scnprintf(bf, sizeof(bf), "%-*s ",
2049 notes->src->widths.addr, " ");
2050 }
2051 }
2052
2053 if (change_color)
2054 color = obj__set_color(obj, HE_COLORSET_ADDR);
2055 obj__printf(obj, bf);
2056 if (change_color)
2057 obj__set_color(obj, color);
2058
2059 disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf), obj__printf, obj__write_graph);
2060
2061 obj__printf(obj, "%-*s", width - pcnt_width - cycles_width - 3 - printed, bf);
2062 }
2063
2064 }
2065
annotation_line__write(struct annotation_line * al,struct annotation * notes,struct annotation_write_ops * wops)2066 void annotation_line__write(struct annotation_line *al, struct annotation *notes,
2067 struct annotation_write_ops *wops)
2068 {
2069 __annotation_line__write(al, notes, wops->first_line, wops->current_entry,
2070 wops->change_color, wops->width, wops->obj,
2071 annotate_opts.percent_type,
2072 wops->set_color, wops->set_percent_color,
2073 wops->set_jumps_percent_color, wops->printf,
2074 wops->write_graph);
2075 }
2076
symbol__annotate2(struct map_symbol * ms,struct evsel * evsel,struct arch ** parch)2077 int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
2078 struct arch **parch)
2079 {
2080 struct symbol *sym = ms->sym;
2081 struct annotation *notes = symbol__annotation(sym);
2082 size_t size = symbol__size(sym);
2083 int err;
2084
2085 err = symbol__annotate(ms, evsel, parch);
2086 if (err)
2087 return err;
2088
2089 symbol__calc_percent(sym, evsel);
2090
2091 annotation__set_index(notes);
2092 annotation__mark_jump_targets(notes, sym);
2093
2094 err = annotation__compute_ipc(notes, size, evsel);
2095 if (err)
2096 return err;
2097
2098 annotation__init_column_widths(notes, sym);
2099 annotation__update_column_widths(notes);
2100 sym->annotate2 = 1;
2101
2102 return 0;
2103 }
2104
annotation__config(const char * var,const char * value,void * data)2105 static int annotation__config(const char *var, const char *value, void *data)
2106 {
2107 struct annotation_options *opt = data;
2108
2109 if (!strstarts(var, "annotate."))
2110 return 0;
2111
2112 if (!strcmp(var, "annotate.offset_level")) {
2113 perf_config_u8(&opt->offset_level, "offset_level", value);
2114
2115 if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
2116 opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL;
2117 else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL)
2118 opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
2119 } else if (!strcmp(var, "annotate.hide_src_code")) {
2120 opt->hide_src_code = perf_config_bool("hide_src_code", value);
2121 } else if (!strcmp(var, "annotate.jump_arrows")) {
2122 opt->jump_arrows = perf_config_bool("jump_arrows", value);
2123 } else if (!strcmp(var, "annotate.show_linenr")) {
2124 opt->show_linenr = perf_config_bool("show_linenr", value);
2125 } else if (!strcmp(var, "annotate.show_nr_jumps")) {
2126 opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value);
2127 } else if (!strcmp(var, "annotate.show_nr_samples")) {
2128 symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples",
2129 value);
2130 } else if (!strcmp(var, "annotate.show_total_period")) {
2131 symbol_conf.show_total_period = perf_config_bool("show_total_period",
2132 value);
2133 } else if (!strcmp(var, "annotate.use_offset")) {
2134 opt->use_offset = perf_config_bool("use_offset", value);
2135 } else if (!strcmp(var, "annotate.disassembler_style")) {
2136 opt->disassembler_style = strdup(value);
2137 if (!opt->disassembler_style) {
2138 pr_err("Not enough memory for annotate.disassembler_style\n");
2139 return -1;
2140 }
2141 } else if (!strcmp(var, "annotate.objdump")) {
2142 opt->objdump_path = strdup(value);
2143 if (!opt->objdump_path) {
2144 pr_err("Not enough memory for annotate.objdump\n");
2145 return -1;
2146 }
2147 } else if (!strcmp(var, "annotate.addr2line")) {
2148 symbol_conf.addr2line_path = strdup(value);
2149 if (!symbol_conf.addr2line_path) {
2150 pr_err("Not enough memory for annotate.addr2line\n");
2151 return -1;
2152 }
2153 } else if (!strcmp(var, "annotate.demangle")) {
2154 symbol_conf.demangle = perf_config_bool("demangle", value);
2155 } else if (!strcmp(var, "annotate.demangle_kernel")) {
2156 symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value);
2157 } else {
2158 pr_debug("%s variable unknown, ignoring...", var);
2159 }
2160
2161 return 0;
2162 }
2163
annotation_options__init(void)2164 void annotation_options__init(void)
2165 {
2166 struct annotation_options *opt = &annotate_opts;
2167
2168 memset(opt, 0, sizeof(*opt));
2169
2170 /* Default values. */
2171 opt->use_offset = true;
2172 opt->jump_arrows = true;
2173 opt->annotate_src = true;
2174 opt->offset_level = ANNOTATION__OFFSET_JUMP_TARGETS;
2175 opt->percent_type = PERCENT_PERIOD_LOCAL;
2176 }
2177
annotation_options__exit(void)2178 void annotation_options__exit(void)
2179 {
2180 zfree(&annotate_opts.disassembler_style);
2181 zfree(&annotate_opts.objdump_path);
2182 }
2183
annotation_config__init(void)2184 void annotation_config__init(void)
2185 {
2186 perf_config(annotation__config, &annotate_opts);
2187 }
2188
parse_percent_type(char * str1,char * str2)2189 static unsigned int parse_percent_type(char *str1, char *str2)
2190 {
2191 unsigned int type = (unsigned int) -1;
2192
2193 if (!strcmp("period", str1)) {
2194 if (!strcmp("local", str2))
2195 type = PERCENT_PERIOD_LOCAL;
2196 else if (!strcmp("global", str2))
2197 type = PERCENT_PERIOD_GLOBAL;
2198 }
2199
2200 if (!strcmp("hits", str1)) {
2201 if (!strcmp("local", str2))
2202 type = PERCENT_HITS_LOCAL;
2203 else if (!strcmp("global", str2))
2204 type = PERCENT_HITS_GLOBAL;
2205 }
2206
2207 return type;
2208 }
2209
annotate_parse_percent_type(const struct option * opt __maybe_unused,const char * _str,int unset __maybe_unused)2210 int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str,
2211 int unset __maybe_unused)
2212 {
2213 unsigned int type;
2214 char *str1, *str2;
2215 int err = -1;
2216
2217 str1 = strdup(_str);
2218 if (!str1)
2219 return -ENOMEM;
2220
2221 str2 = strchr(str1, '-');
2222 if (!str2)
2223 goto out;
2224
2225 *str2++ = 0;
2226
2227 type = parse_percent_type(str1, str2);
2228 if (type == (unsigned int) -1)
2229 type = parse_percent_type(str2, str1);
2230 if (type != (unsigned int) -1) {
2231 annotate_opts.percent_type = type;
2232 err = 0;
2233 }
2234
2235 out:
2236 free(str1);
2237 return err;
2238 }
2239
annotate_check_args(void)2240 int annotate_check_args(void)
2241 {
2242 struct annotation_options *args = &annotate_opts;
2243
2244 if (args->prefix_strip && !args->prefix) {
2245 pr_err("--prefix-strip requires --prefix\n");
2246 return -1;
2247 }
2248 return 0;
2249 }
2250
2251 /*
2252 * Get register number and access offset from the given instruction.
2253 * It assumes AT&T x86 asm format like OFFSET(REG). Maybe it needs
2254 * to revisit the format when it handles different architecture.
2255 * Fills @reg and @offset when return 0.
2256 */
extract_reg_offset(struct arch * arch,const char * str,struct annotated_op_loc * op_loc)2257 static int extract_reg_offset(struct arch *arch, const char *str,
2258 struct annotated_op_loc *op_loc)
2259 {
2260 char *p;
2261 char *regname;
2262
2263 if (arch->objdump.register_char == 0)
2264 return -1;
2265
2266 /*
2267 * It should start from offset, but it's possible to skip 0
2268 * in the asm. So 0(%rax) should be same as (%rax).
2269 *
2270 * However, it also start with a segment select register like
2271 * %gs:0x18(%rbx). In that case it should skip the part.
2272 */
2273 if (*str == arch->objdump.register_char) {
2274 if (arch__is(arch, "x86")) {
2275 /* FIXME: Handle other segment registers */
2276 if (!strncmp(str, "%gs:", 4))
2277 op_loc->segment = INSN_SEG_X86_GS;
2278 }
2279
2280 while (*str && !isdigit(*str) &&
2281 *str != arch->objdump.memory_ref_char)
2282 str++;
2283 }
2284
2285 op_loc->offset = strtol(str, &p, 0);
2286
2287 p = strchr(p, arch->objdump.register_char);
2288 if (p == NULL)
2289 return -1;
2290
2291 regname = strdup(p);
2292 if (regname == NULL)
2293 return -1;
2294
2295 op_loc->reg1 = get_dwarf_regnum(regname, 0);
2296 free(regname);
2297
2298 /* Get the second register */
2299 if (op_loc->multi_regs) {
2300 p = strchr(p + 1, arch->objdump.register_char);
2301 if (p == NULL)
2302 return -1;
2303
2304 regname = strdup(p);
2305 if (regname == NULL)
2306 return -1;
2307
2308 op_loc->reg2 = get_dwarf_regnum(regname, 0);
2309 free(regname);
2310 }
2311 return 0;
2312 }
2313
2314 /**
2315 * annotate_get_insn_location - Get location of instruction
2316 * @arch: the architecture info
2317 * @dl: the target instruction
2318 * @loc: a buffer to save the data
2319 *
2320 * Get detailed location info (register and offset) in the instruction.
2321 * It needs both source and target operand and whether it accesses a
2322 * memory location. The offset field is meaningful only when the
2323 * corresponding mem flag is set. The reg2 field is meaningful only
2324 * when multi_regs flag is set.
2325 *
2326 * Some examples on x86:
2327 *
2328 * mov (%rax), %rcx # src_reg1 = rax, src_mem = 1, src_offset = 0
2329 * # dst_reg1 = rcx, dst_mem = 0
2330 *
2331 * mov 0x18, %r8 # src_reg1 = -1, src_mem = 0
2332 * # dst_reg1 = r8, dst_mem = 0
2333 *
2334 * mov %rsi, 8(%rbx,%rcx,4) # src_reg1 = rsi, src_mem = 0, src_multi_regs = 0
2335 * # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1
2336 * # dst_multi_regs = 1, dst_offset = 8
2337 */
annotate_get_insn_location(struct arch * arch,struct disasm_line * dl,struct annotated_insn_loc * loc)2338 int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl,
2339 struct annotated_insn_loc *loc)
2340 {
2341 struct ins_operands *ops;
2342 struct annotated_op_loc *op_loc;
2343 int i;
2344
2345 if (ins__is_lock(&dl->ins))
2346 ops = dl->ops.locked.ops;
2347 else
2348 ops = &dl->ops;
2349
2350 if (ops == NULL)
2351 return -1;
2352
2353 memset(loc, 0, sizeof(*loc));
2354
2355 for_each_insn_op_loc(loc, i, op_loc) {
2356 const char *insn_str = ops->source.raw;
2357 bool multi_regs = ops->source.multi_regs;
2358 bool mem_ref = ops->source.mem_ref;
2359
2360 if (i == INSN_OP_TARGET) {
2361 insn_str = ops->target.raw;
2362 multi_regs = ops->target.multi_regs;
2363 mem_ref = ops->target.mem_ref;
2364 }
2365
2366 /* Invalidate the register by default */
2367 op_loc->reg1 = -1;
2368 op_loc->reg2 = -1;
2369
2370 if (insn_str == NULL) {
2371 if (!arch__is(arch, "powerpc"))
2372 continue;
2373 }
2374
2375 /*
2376 * For powerpc, call get_powerpc_regs function which extracts the
2377 * required fields for op_loc, ie reg1, reg2, offset from the
2378 * raw instruction.
2379 */
2380 if (arch__is(arch, "powerpc")) {
2381 op_loc->mem_ref = mem_ref;
2382 op_loc->multi_regs = multi_regs;
2383 get_powerpc_regs(dl->raw.raw_insn, !i, op_loc);
2384 } else if (strchr(insn_str, arch->objdump.memory_ref_char)) {
2385 op_loc->mem_ref = true;
2386 op_loc->multi_regs = multi_regs;
2387 extract_reg_offset(arch, insn_str, op_loc);
2388 } else {
2389 char *s, *p = NULL;
2390
2391 if (arch__is(arch, "x86")) {
2392 /* FIXME: Handle other segment registers */
2393 if (!strncmp(insn_str, "%gs:", 4)) {
2394 op_loc->segment = INSN_SEG_X86_GS;
2395 op_loc->offset = strtol(insn_str + 4,
2396 &p, 0);
2397 if (p && p != insn_str + 4)
2398 op_loc->imm = true;
2399 continue;
2400 }
2401 }
2402
2403 s = strdup(insn_str);
2404 if (s == NULL)
2405 return -1;
2406
2407 if (*s == arch->objdump.register_char)
2408 op_loc->reg1 = get_dwarf_regnum(s, 0);
2409 else if (*s == arch->objdump.imm_char) {
2410 op_loc->offset = strtol(s + 1, &p, 0);
2411 if (p && p != s + 1)
2412 op_loc->imm = true;
2413 }
2414 free(s);
2415 }
2416 }
2417
2418 return 0;
2419 }
2420
find_disasm_line(struct symbol * sym,u64 ip,bool allow_update)2421 static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip,
2422 bool allow_update)
2423 {
2424 struct disasm_line *dl;
2425 struct annotation *notes;
2426
2427 notes = symbol__annotation(sym);
2428
2429 list_for_each_entry(dl, ¬es->src->source, al.node) {
2430 if (dl->al.offset == -1)
2431 continue;
2432
2433 if (sym->start + dl->al.offset == ip) {
2434 /*
2435 * llvm-objdump places "lock" in a separate line and
2436 * in that case, we want to get the next line.
2437 */
2438 if (ins__is_lock(&dl->ins) &&
2439 *dl->ops.raw == '\0' && allow_update) {
2440 ip++;
2441 continue;
2442 }
2443 return dl;
2444 }
2445 }
2446 return NULL;
2447 }
2448
annotate_data_stat(struct list_head * head,const char * name)2449 static struct annotated_item_stat *annotate_data_stat(struct list_head *head,
2450 const char *name)
2451 {
2452 struct annotated_item_stat *istat;
2453
2454 list_for_each_entry(istat, head, list) {
2455 if (!strcmp(istat->name, name))
2456 return istat;
2457 }
2458
2459 istat = zalloc(sizeof(*istat));
2460 if (istat == NULL)
2461 return NULL;
2462
2463 istat->name = strdup(name);
2464 if ((istat->name == NULL) || (!strlen(istat->name))) {
2465 free(istat);
2466 return NULL;
2467 }
2468
2469 list_add_tail(&istat->list, head);
2470 return istat;
2471 }
2472
is_stack_operation(struct arch * arch,struct disasm_line * dl)2473 static bool is_stack_operation(struct arch *arch, struct disasm_line *dl)
2474 {
2475 if (arch__is(arch, "x86")) {
2476 if (!strncmp(dl->ins.name, "push", 4) ||
2477 !strncmp(dl->ins.name, "pop", 3) ||
2478 !strncmp(dl->ins.name, "call", 4) ||
2479 !strncmp(dl->ins.name, "ret", 3))
2480 return true;
2481 }
2482
2483 return false;
2484 }
2485
is_stack_canary(struct arch * arch,struct annotated_op_loc * loc)2486 static bool is_stack_canary(struct arch *arch, struct annotated_op_loc *loc)
2487 {
2488 /* On x86_64, %gs:40 is used for stack canary */
2489 if (arch__is(arch, "x86")) {
2490 if (loc->segment == INSN_SEG_X86_GS && loc->imm &&
2491 loc->offset == 40)
2492 return true;
2493 }
2494
2495 return false;
2496 }
2497
2498 static struct disasm_line *
annotation__prev_asm_line(struct annotation * notes,struct disasm_line * curr)2499 annotation__prev_asm_line(struct annotation *notes, struct disasm_line *curr)
2500 {
2501 struct list_head *sources = ¬es->src->source;
2502 struct disasm_line *prev;
2503
2504 if (curr == list_first_entry(sources, struct disasm_line, al.node))
2505 return NULL;
2506
2507 prev = list_prev_entry(curr, al.node);
2508 while (prev->al.offset == -1 &&
2509 prev != list_first_entry(sources, struct disasm_line, al.node))
2510 prev = list_prev_entry(prev, al.node);
2511
2512 if (prev->al.offset == -1)
2513 return NULL;
2514
2515 return prev;
2516 }
2517
2518 static struct disasm_line *
annotation__next_asm_line(struct annotation * notes,struct disasm_line * curr)2519 annotation__next_asm_line(struct annotation *notes, struct disasm_line *curr)
2520 {
2521 struct list_head *sources = ¬es->src->source;
2522 struct disasm_line *next;
2523
2524 if (curr == list_last_entry(sources, struct disasm_line, al.node))
2525 return NULL;
2526
2527 next = list_next_entry(curr, al.node);
2528 while (next->al.offset == -1 &&
2529 next != list_last_entry(sources, struct disasm_line, al.node))
2530 next = list_next_entry(next, al.node);
2531
2532 if (next->al.offset == -1)
2533 return NULL;
2534
2535 return next;
2536 }
2537
annotate_calc_pcrel(struct map_symbol * ms,u64 ip,int offset,struct disasm_line * dl)2538 u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset,
2539 struct disasm_line *dl)
2540 {
2541 struct annotation *notes;
2542 struct disasm_line *next;
2543 u64 addr;
2544
2545 notes = symbol__annotation(ms->sym);
2546 /*
2547 * PC-relative addressing starts from the next instruction address
2548 * But the IP is for the current instruction. Since disasm_line
2549 * doesn't have the instruction size, calculate it using the next
2550 * disasm_line. If it's the last one, we can use symbol's end
2551 * address directly.
2552 */
2553 next = annotation__next_asm_line(notes, dl);
2554 if (next == NULL)
2555 addr = ms->sym->end + offset;
2556 else
2557 addr = ip + (next->al.offset - dl->al.offset) + offset;
2558
2559 return map__rip_2objdump(ms->map, addr);
2560 }
2561
2562 static struct debuginfo_cache {
2563 struct dso *dso;
2564 struct debuginfo *dbg;
2565 } di_cache;
2566
debuginfo_cache__delete(void)2567 void debuginfo_cache__delete(void)
2568 {
2569 dso__put(di_cache.dso);
2570 di_cache.dso = NULL;
2571
2572 debuginfo__delete(di_cache.dbg);
2573 di_cache.dbg = NULL;
2574 }
2575
2576 /**
2577 * hist_entry__get_data_type - find data type for given hist entry
2578 * @he: hist entry
2579 *
2580 * This function first annotates the instruction at @he->ip and extracts
2581 * register and offset info from it. Then it searches the DWARF debug
2582 * info to get a variable and type information using the address, register,
2583 * and offset.
2584 */
hist_entry__get_data_type(struct hist_entry * he)2585 struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he)
2586 {
2587 struct map_symbol *ms = &he->ms;
2588 struct evsel *evsel = hists_to_evsel(he->hists);
2589 struct arch *arch;
2590 struct disasm_line *dl;
2591 struct annotated_insn_loc loc;
2592 struct annotated_op_loc *op_loc;
2593 struct annotated_data_type *mem_type;
2594 struct annotated_item_stat *istat;
2595 u64 ip = he->ip;
2596 int i;
2597
2598 ann_data_stat.total++;
2599
2600 if (ms->map == NULL || ms->sym == NULL) {
2601 ann_data_stat.no_sym++;
2602 return NULL;
2603 }
2604
2605 if (!symbol_conf.init_annotation) {
2606 ann_data_stat.no_sym++;
2607 return NULL;
2608 }
2609
2610 /*
2611 * di_cache holds a pair of values, but code below assumes
2612 * di_cache.dso can be compared/updated and di_cache.dbg can be
2613 * read/updated independently from each other. That assumption only
2614 * holds in single threaded code.
2615 */
2616 assert(perf_singlethreaded);
2617
2618 if (map__dso(ms->map) != di_cache.dso) {
2619 dso__put(di_cache.dso);
2620 di_cache.dso = dso__get(map__dso(ms->map));
2621
2622 debuginfo__delete(di_cache.dbg);
2623 di_cache.dbg = debuginfo__new(dso__long_name(di_cache.dso));
2624 }
2625
2626 if (di_cache.dbg == NULL) {
2627 ann_data_stat.no_dbginfo++;
2628 return NULL;
2629 }
2630
2631 /* Make sure it has the disasm of the function */
2632 if (symbol__annotate(ms, evsel, &arch) < 0) {
2633 ann_data_stat.no_insn++;
2634 return NULL;
2635 }
2636
2637 /*
2638 * Get a disasm to extract the location from the insn.
2639 * This is too slow...
2640 */
2641 dl = find_disasm_line(ms->sym, ip, /*allow_update=*/true);
2642 if (dl == NULL) {
2643 ann_data_stat.no_insn++;
2644 return NULL;
2645 }
2646
2647 retry:
2648 istat = annotate_data_stat(&ann_insn_stat, dl->ins.name);
2649 if (istat == NULL) {
2650 ann_data_stat.no_insn++;
2651 return NULL;
2652 }
2653
2654 if (annotate_get_insn_location(arch, dl, &loc) < 0) {
2655 ann_data_stat.no_insn_ops++;
2656 istat->bad++;
2657 return NULL;
2658 }
2659
2660 if (is_stack_operation(arch, dl)) {
2661 istat->good++;
2662 he->mem_type_off = 0;
2663 return &stackop_type;
2664 }
2665
2666 for_each_insn_op_loc(&loc, i, op_loc) {
2667 struct data_loc_info dloc = {
2668 .arch = arch,
2669 .thread = he->thread,
2670 .ms = ms,
2671 /* Recalculate IP for LOCK prefix or insn fusion */
2672 .ip = ms->sym->start + dl->al.offset,
2673 .cpumode = he->cpumode,
2674 .op = op_loc,
2675 .di = di_cache.dbg,
2676 };
2677
2678 if (!op_loc->mem_ref && op_loc->segment == INSN_SEG_NONE)
2679 continue;
2680
2681 /* Recalculate IP because of LOCK prefix or insn fusion */
2682 ip = ms->sym->start + dl->al.offset;
2683
2684 /* PC-relative addressing */
2685 if (op_loc->reg1 == DWARF_REG_PC) {
2686 dloc.var_addr = annotate_calc_pcrel(ms, dloc.ip,
2687 op_loc->offset, dl);
2688 }
2689
2690 /* This CPU access in kernel - pretend PC-relative addressing */
2691 if (dso__kernel(map__dso(ms->map)) && arch__is(arch, "x86") &&
2692 op_loc->segment == INSN_SEG_X86_GS && op_loc->imm) {
2693 dloc.var_addr = op_loc->offset;
2694 op_loc->reg1 = DWARF_REG_PC;
2695 }
2696
2697 mem_type = find_data_type(&dloc);
2698
2699 if (mem_type == NULL && is_stack_canary(arch, op_loc)) {
2700 istat->good++;
2701 he->mem_type_off = 0;
2702 return &canary_type;
2703 }
2704
2705 if (mem_type)
2706 istat->good++;
2707 else
2708 istat->bad++;
2709
2710 if (symbol_conf.annotate_data_sample) {
2711 annotated_data_type__update_samples(mem_type, evsel,
2712 dloc.type_offset,
2713 he->stat.nr_events,
2714 he->stat.period);
2715 }
2716 he->mem_type_off = dloc.type_offset;
2717 return mem_type;
2718 }
2719
2720 /*
2721 * Some instructions can be fused and the actual memory access came
2722 * from the previous instruction.
2723 */
2724 if (dl->al.offset > 0) {
2725 struct annotation *notes;
2726 struct disasm_line *prev_dl;
2727
2728 notes = symbol__annotation(ms->sym);
2729 prev_dl = annotation__prev_asm_line(notes, dl);
2730
2731 if (prev_dl && ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) {
2732 dl = prev_dl;
2733 goto retry;
2734 }
2735 }
2736
2737 ann_data_stat.no_mem_ops++;
2738 istat->bad++;
2739 return NULL;
2740 }
2741
2742 /* Basic block traversal (BFS) data structure */
2743 struct basic_block_data {
2744 struct list_head queue;
2745 struct list_head visited;
2746 };
2747
2748 /*
2749 * During the traversal, it needs to know the parent block where the current
2750 * block block started from. Note that single basic block can be parent of
2751 * two child basic blocks (in case of condition jump).
2752 */
2753 struct basic_block_link {
2754 struct list_head node;
2755 struct basic_block_link *parent;
2756 struct annotated_basic_block *bb;
2757 };
2758
2759 /* Check any of basic block in the list already has the offset */
basic_block_has_offset(struct list_head * head,s64 offset)2760 static bool basic_block_has_offset(struct list_head *head, s64 offset)
2761 {
2762 struct basic_block_link *link;
2763
2764 list_for_each_entry(link, head, node) {
2765 s64 begin_offset = link->bb->begin->al.offset;
2766 s64 end_offset = link->bb->end->al.offset;
2767
2768 if (begin_offset <= offset && offset <= end_offset)
2769 return true;
2770 }
2771 return false;
2772 }
2773
is_new_basic_block(struct basic_block_data * bb_data,struct disasm_line * dl)2774 static bool is_new_basic_block(struct basic_block_data *bb_data,
2775 struct disasm_line *dl)
2776 {
2777 s64 offset = dl->al.offset;
2778
2779 if (basic_block_has_offset(&bb_data->visited, offset))
2780 return false;
2781 if (basic_block_has_offset(&bb_data->queue, offset))
2782 return false;
2783 return true;
2784 }
2785
2786 /* Add a basic block starting from dl and link it to the parent */
add_basic_block(struct basic_block_data * bb_data,struct basic_block_link * parent,struct disasm_line * dl)2787 static int add_basic_block(struct basic_block_data *bb_data,
2788 struct basic_block_link *parent,
2789 struct disasm_line *dl)
2790 {
2791 struct annotated_basic_block *bb;
2792 struct basic_block_link *link;
2793
2794 if (dl == NULL)
2795 return -1;
2796
2797 if (!is_new_basic_block(bb_data, dl))
2798 return 0;
2799
2800 bb = zalloc(sizeof(*bb));
2801 if (bb == NULL)
2802 return -1;
2803
2804 bb->begin = dl;
2805 bb->end = dl;
2806 INIT_LIST_HEAD(&bb->list);
2807
2808 link = malloc(sizeof(*link));
2809 if (link == NULL) {
2810 free(bb);
2811 return -1;
2812 }
2813
2814 link->bb = bb;
2815 link->parent = parent;
2816 list_add_tail(&link->node, &bb_data->queue);
2817 return 0;
2818 }
2819
2820 /* Returns true when it finds the target in the current basic block */
process_basic_block(struct basic_block_data * bb_data,struct basic_block_link * link,struct symbol * sym,u64 target)2821 static bool process_basic_block(struct basic_block_data *bb_data,
2822 struct basic_block_link *link,
2823 struct symbol *sym, u64 target)
2824 {
2825 struct disasm_line *dl, *next_dl, *last_dl;
2826 struct annotation *notes = symbol__annotation(sym);
2827 bool found = false;
2828
2829 dl = link->bb->begin;
2830 /* Check if it's already visited */
2831 if (basic_block_has_offset(&bb_data->visited, dl->al.offset))
2832 return false;
2833
2834 last_dl = list_last_entry(¬es->src->source,
2835 struct disasm_line, al.node);
2836 if (last_dl->al.offset == -1)
2837 last_dl = annotation__prev_asm_line(notes, last_dl);
2838
2839 if (last_dl == NULL)
2840 return false;
2841
2842 list_for_each_entry_from(dl, ¬es->src->source, al.node) {
2843 /* Skip comment or debug info line */
2844 if (dl->al.offset == -1)
2845 continue;
2846 /* Found the target instruction */
2847 if (sym->start + dl->al.offset == target) {
2848 found = true;
2849 break;
2850 }
2851 /* End of the function, finish the block */
2852 if (dl == last_dl)
2853 break;
2854 /* 'return' instruction finishes the block */
2855 if (ins__is_ret(&dl->ins))
2856 break;
2857 /* normal instructions are part of the basic block */
2858 if (!ins__is_jump(&dl->ins))
2859 continue;
2860 /* jump to a different function, tail call or return */
2861 if (dl->ops.target.outside)
2862 break;
2863 /* jump instruction creates new basic block(s) */
2864 next_dl = find_disasm_line(sym, sym->start + dl->ops.target.offset,
2865 /*allow_update=*/false);
2866 if (next_dl)
2867 add_basic_block(bb_data, link, next_dl);
2868
2869 /*
2870 * FIXME: determine conditional jumps properly.
2871 * Conditional jumps create another basic block with the
2872 * next disasm line.
2873 */
2874 if (!strstr(dl->ins.name, "jmp")) {
2875 next_dl = annotation__next_asm_line(notes, dl);
2876 if (next_dl)
2877 add_basic_block(bb_data, link, next_dl);
2878 }
2879 break;
2880
2881 }
2882 link->bb->end = dl;
2883 return found;
2884 }
2885
2886 /*
2887 * It founds a target basic block, build a proper linked list of basic blocks
2888 * by following the link recursively.
2889 */
link_found_basic_blocks(struct basic_block_link * link,struct list_head * head)2890 static void link_found_basic_blocks(struct basic_block_link *link,
2891 struct list_head *head)
2892 {
2893 while (link) {
2894 struct basic_block_link *parent = link->parent;
2895
2896 list_move(&link->bb->list, head);
2897 list_del(&link->node);
2898 free(link);
2899
2900 link = parent;
2901 }
2902 }
2903
delete_basic_blocks(struct basic_block_data * bb_data)2904 static void delete_basic_blocks(struct basic_block_data *bb_data)
2905 {
2906 struct basic_block_link *link, *tmp;
2907
2908 list_for_each_entry_safe(link, tmp, &bb_data->queue, node) {
2909 list_del(&link->node);
2910 zfree(&link->bb);
2911 free(link);
2912 }
2913
2914 list_for_each_entry_safe(link, tmp, &bb_data->visited, node) {
2915 list_del(&link->node);
2916 zfree(&link->bb);
2917 free(link);
2918 }
2919 }
2920
2921 /**
2922 * annotate_get_basic_blocks - Get basic blocks for given address range
2923 * @sym: symbol to annotate
2924 * @src: source address
2925 * @dst: destination address
2926 * @head: list head to save basic blocks
2927 *
2928 * This function traverses disasm_lines from @src to @dst and save them in a
2929 * list of annotated_basic_block to @head. It uses BFS to find the shortest
2930 * path between two. The basic_block_link is to maintain parent links so
2931 * that it can build a list of blocks from the start.
2932 */
annotate_get_basic_blocks(struct symbol * sym,s64 src,s64 dst,struct list_head * head)2933 int annotate_get_basic_blocks(struct symbol *sym, s64 src, s64 dst,
2934 struct list_head *head)
2935 {
2936 struct basic_block_data bb_data = {
2937 .queue = LIST_HEAD_INIT(bb_data.queue),
2938 .visited = LIST_HEAD_INIT(bb_data.visited),
2939 };
2940 struct basic_block_link *link;
2941 struct disasm_line *dl;
2942 int ret = -1;
2943
2944 dl = find_disasm_line(sym, src, /*allow_update=*/false);
2945 if (dl == NULL)
2946 return -1;
2947
2948 if (add_basic_block(&bb_data, /*parent=*/NULL, dl) < 0)
2949 return -1;
2950
2951 /* Find shortest path from src to dst using BFS */
2952 while (!list_empty(&bb_data.queue)) {
2953 link = list_first_entry(&bb_data.queue, struct basic_block_link, node);
2954
2955 if (process_basic_block(&bb_data, link, sym, dst)) {
2956 link_found_basic_blocks(link, head);
2957 ret = 0;
2958 break;
2959 }
2960 list_move(&link->node, &bb_data.visited);
2961 }
2962 delete_basic_blocks(&bb_data);
2963 return ret;
2964 }
2965