xref: /linux/tools/perf/util/annotate.c (revision 25489a4f556414445d342951615178368ee45cde)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4  *
5  * Parts came from builtin-annotate.c, see those files for further
6  * copyright notes.
7  */
8 
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <libgen.h>
12 #include <stdlib.h>
13 #include "util.h" // hex_width()
14 #include "ui/ui.h"
15 #include "sort.h"
16 #include "build-id.h"
17 #include "color.h"
18 #include "config.h"
19 #include "disasm.h"
20 #include "dso.h"
21 #include "env.h"
22 #include "map.h"
23 #include "maps.h"
24 #include "symbol.h"
25 #include "srcline.h"
26 #include "units.h"
27 #include "debug.h"
28 #include "debuginfo.h"
29 #include "annotate.h"
30 #include "annotate-data.h"
31 #include "evsel.h"
32 #include "evlist.h"
33 #include "bpf-event.h"
34 #include "bpf-utils.h"
35 #include "block-range.h"
36 #include "string2.h"
37 #include "dwarf-regs.h"
38 #include "util/event.h"
39 #include "util/sharded_mutex.h"
40 #include "arch/common.h"
41 #include "namespaces.h"
42 #include "thread.h"
43 #include "hashmap.h"
44 #include "strbuf.h"
45 #include <regex.h>
46 #include <linux/bitops.h>
47 #include <linux/kernel.h>
48 #include <linux/string.h>
49 #include <linux/zalloc.h>
50 #include <subcmd/parse-options.h>
51 #include <subcmd/run-command.h>
52 #include <math.h>
53 
54 /* FIXME: For the HE_COLORSET */
55 #include "ui/browser.h"
56 
57 /*
58  * FIXME: Using the same values as slang.h,
59  * but that header may not be available everywhere
60  */
61 #define LARROW_CHAR	((unsigned char)',')
62 #define RARROW_CHAR	((unsigned char)'+')
63 #define DARROW_CHAR	((unsigned char)'.')
64 #define UARROW_CHAR	((unsigned char)'-')
65 
66 #include <linux/ctype.h>
67 
68 /* global annotation options */
69 struct annotation_options annotate_opts;
70 
71 /* Data type collection debug statistics */
72 struct annotated_data_stat ann_data_stat;
73 LIST_HEAD(ann_insn_stat);
74 
75 /* Pseudo data types */
76 struct annotated_data_type stackop_type = {
77 	.self = {
78 		.type_name = (char *)"(stack operation)",
79 		.children = LIST_HEAD_INIT(stackop_type.self.children),
80 	},
81 };
82 
83 struct annotated_data_type canary_type = {
84 	.self = {
85 		.type_name = (char *)"(stack canary)",
86 		.children = LIST_HEAD_INIT(canary_type.self.children),
87 	},
88 };
89 
90 #define NO_TYPE ((struct annotated_data_type *)-1UL)
91 
92 /* symbol histogram: key = offset << 16 | evsel->core.idx */
93 static size_t sym_hist_hash(long key, void *ctx __maybe_unused)
94 {
95 	return (key >> 16) + (key & 0xffff);
96 }
97 
98 static bool sym_hist_equal(long key1, long key2, void *ctx __maybe_unused)
99 {
100 	return key1 == key2;
101 }
102 
103 static struct annotated_source *annotated_source__new(void)
104 {
105 	struct annotated_source *src = zalloc(sizeof(*src));
106 
107 	if (src != NULL)
108 		INIT_LIST_HEAD(&src->source);
109 
110 	return src;
111 }
112 
113 static __maybe_unused void annotated_source__delete(struct annotated_source *src)
114 {
115 	struct hashmap_entry *cur;
116 	size_t bkt;
117 
118 	if (src == NULL)
119 		return;
120 
121 	if (src->samples) {
122 		hashmap__for_each_entry(src->samples, cur, bkt)
123 			zfree(&cur->pvalue);
124 		hashmap__free(src->samples);
125 	}
126 	zfree(&src->histograms);
127 	free(src);
128 }
129 
130 static int annotated_source__alloc_histograms(struct annotated_source *src,
131 					      int nr_hists)
132 {
133 	src->nr_histograms   = nr_hists;
134 	src->histograms	     = calloc(nr_hists, sizeof(*src->histograms));
135 
136 	if (src->histograms == NULL)
137 		return -1;
138 
139 	src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL);
140 	if (src->samples == NULL)
141 		zfree(&src->histograms);
142 
143 	return src->histograms ? 0 : -1;
144 }
145 
146 void symbol__annotate_zero_histograms(struct symbol *sym)
147 {
148 	struct annotation *notes = symbol__annotation(sym);
149 
150 	annotation__lock(notes);
151 	if (notes->src != NULL) {
152 		memset(notes->src->histograms, 0,
153 		       notes->src->nr_histograms * sizeof(*notes->src->histograms));
154 		hashmap__clear(notes->src->samples);
155 	}
156 	if (notes->branch && notes->branch->cycles_hist) {
157 		memset(notes->branch->cycles_hist, 0,
158 		       symbol__size(sym) * sizeof(struct cyc_hist));
159 	}
160 	annotation__unlock(notes);
161 }
162 
163 static int __symbol__account_cycles(struct cyc_hist *ch,
164 				    u64 start,
165 				    unsigned offset, unsigned cycles,
166 				    unsigned have_start)
167 {
168 	/*
169 	 * For now we can only account one basic block per
170 	 * final jump. But multiple could be overlapping.
171 	 * Always account the longest one. So when
172 	 * a shorter one has been already seen throw it away.
173 	 *
174 	 * We separately always account the full cycles.
175 	 */
176 	ch[offset].num_aggr++;
177 	ch[offset].cycles_aggr += cycles;
178 
179 	if (cycles > ch[offset].cycles_max)
180 		ch[offset].cycles_max = cycles;
181 
182 	if (ch[offset].cycles_min) {
183 		if (cycles && cycles < ch[offset].cycles_min)
184 			ch[offset].cycles_min = cycles;
185 	} else
186 		ch[offset].cycles_min = cycles;
187 
188 	if (!have_start && ch[offset].have_start)
189 		return 0;
190 	if (ch[offset].num) {
191 		if (have_start && (!ch[offset].have_start ||
192 				   ch[offset].start > start)) {
193 			ch[offset].have_start = 0;
194 			ch[offset].cycles = 0;
195 			ch[offset].num = 0;
196 			if (ch[offset].reset < 0xffff)
197 				ch[offset].reset++;
198 		} else if (have_start &&
199 			   ch[offset].start < start)
200 			return 0;
201 	}
202 
203 	if (ch[offset].num < NUM_SPARKS)
204 		ch[offset].cycles_spark[ch[offset].num] = cycles;
205 
206 	ch[offset].have_start = have_start;
207 	ch[offset].start = start;
208 	ch[offset].cycles += cycles;
209 	ch[offset].num++;
210 	return 0;
211 }
212 
213 static int __symbol__inc_addr_samples(struct map_symbol *ms,
214 				      struct annotated_source *src, struct evsel *evsel, u64 addr,
215 				      struct perf_sample *sample)
216 {
217 	struct symbol *sym = ms->sym;
218 	long hash_key;
219 	u64 offset;
220 	struct sym_hist *h;
221 	struct sym_hist_entry *entry;
222 
223 	pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr));
224 
225 	if ((addr < sym->start || addr >= sym->end) &&
226 	    (addr != sym->end || sym->start != sym->end)) {
227 		pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
228 		       __func__, __LINE__, sym->name, sym->start, addr, sym->end);
229 		return -ERANGE;
230 	}
231 
232 	offset = addr - sym->start;
233 	h = annotated_source__histogram(src, evsel);
234 	if (h == NULL) {
235 		pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n",
236 			 __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC);
237 		return -ENOMEM;
238 	}
239 
240 	hash_key = offset << 16 | evsel->core.idx;
241 	if (!hashmap__find(src->samples, hash_key, &entry)) {
242 		entry = zalloc(sizeof(*entry));
243 		if (entry == NULL)
244 			return -ENOMEM;
245 
246 		if (hashmap__add(src->samples, hash_key, entry) < 0)
247 			return -ENOMEM;
248 	}
249 
250 	h->nr_samples++;
251 	h->period += sample->period;
252 	entry->nr_samples++;
253 	entry->period += sample->period;
254 
255 	pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
256 		  ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n",
257 		  sym->start, sym->name, addr, addr - sym->start, evsel->core.idx,
258 		  entry->nr_samples, entry->period);
259 	return 0;
260 }
261 
262 struct annotated_branch *annotation__get_branch(struct annotation *notes)
263 {
264 	if (notes == NULL)
265 		return NULL;
266 
267 	if (notes->branch == NULL)
268 		notes->branch = zalloc(sizeof(*notes->branch));
269 
270 	return notes->branch;
271 }
272 
273 static struct annotated_branch *symbol__find_branch_hist(struct symbol *sym,
274 							 unsigned int br_cntr_nr)
275 {
276 	struct annotation *notes = symbol__annotation(sym);
277 	struct annotated_branch *branch;
278 	const size_t size = symbol__size(sym);
279 
280 	branch = annotation__get_branch(notes);
281 	if (branch == NULL)
282 		return NULL;
283 
284 	if (branch->cycles_hist == NULL) {
285 		branch->cycles_hist = calloc(size, sizeof(struct cyc_hist));
286 		if (!branch->cycles_hist)
287 			return NULL;
288 	}
289 
290 	if (br_cntr_nr && branch->br_cntr == NULL) {
291 		branch->br_cntr = calloc(br_cntr_nr * size, sizeof(u64));
292 		if (!branch->br_cntr)
293 			return NULL;
294 	}
295 
296 	return branch;
297 }
298 
299 struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
300 {
301 	struct annotation *notes = symbol__annotation(sym);
302 
303 	if (notes->src == NULL) {
304 		notes->src = annotated_source__new();
305 		if (notes->src == NULL)
306 			return NULL;
307 		goto alloc_histograms;
308 	}
309 
310 	if (notes->src->histograms == NULL) {
311 alloc_histograms:
312 		annotated_source__alloc_histograms(notes->src, nr_hists);
313 	}
314 
315 	return notes->src;
316 }
317 
318 static int symbol__inc_addr_samples(struct map_symbol *ms,
319 				    struct evsel *evsel, u64 addr,
320 				    struct perf_sample *sample)
321 {
322 	struct symbol *sym = ms->sym;
323 	struct annotated_source *src;
324 
325 	if (sym == NULL)
326 		return 0;
327 	src = symbol__hists(sym, evsel->evlist->core.nr_entries);
328 	return src ? __symbol__inc_addr_samples(ms, src, evsel, addr, sample) : 0;
329 }
330 
331 static int symbol__account_br_cntr(struct annotated_branch *branch,
332 				   struct evsel *evsel,
333 				   unsigned offset,
334 				   u64 br_cntr)
335 {
336 	unsigned int br_cntr_nr = evsel__leader(evsel)->br_cntr_nr;
337 	unsigned int base = evsel__leader(evsel)->br_cntr_idx;
338 	unsigned int off = offset * evsel->evlist->nr_br_cntr;
339 	u64 *branch_br_cntr = branch->br_cntr;
340 	unsigned int i, mask, width;
341 
342 	if (!br_cntr || !branch_br_cntr)
343 		return 0;
344 
345 	perf_env__find_br_cntr_info(evsel__env(evsel), NULL, &width);
346 	mask = (1L << width) - 1;
347 	for (i = 0; i < br_cntr_nr; i++) {
348 		u64 cntr = (br_cntr >> i * width) & mask;
349 
350 		branch_br_cntr[off + i + base] += cntr;
351 		if (cntr == mask)
352 			branch_br_cntr[off + i + base] |= ANNOTATION__BR_CNTR_SATURATED_FLAG;
353 	}
354 
355 	return 0;
356 }
357 
358 static int symbol__account_cycles(u64 addr, u64 start, struct symbol *sym,
359 				  unsigned cycles, struct evsel *evsel,
360 				  u64 br_cntr)
361 {
362 	struct annotated_branch *branch;
363 	unsigned offset;
364 	int ret;
365 
366 	if (sym == NULL)
367 		return 0;
368 	branch = symbol__find_branch_hist(sym, evsel->evlist->nr_br_cntr);
369 	if (!branch)
370 		return -ENOMEM;
371 	if (addr < sym->start || addr >= sym->end)
372 		return -ERANGE;
373 
374 	if (start) {
375 		if (start < sym->start || start >= sym->end)
376 			return -ERANGE;
377 		if (start >= addr)
378 			start = 0;
379 	}
380 	offset = addr - sym->start;
381 	ret = __symbol__account_cycles(branch->cycles_hist,
382 					start ? start - sym->start : 0,
383 					offset, cycles,
384 					!!start);
385 
386 	if (ret)
387 		return ret;
388 
389 	return symbol__account_br_cntr(branch, evsel, offset, br_cntr);
390 }
391 
392 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
393 				    struct addr_map_symbol *start,
394 				    unsigned cycles,
395 				    struct evsel *evsel,
396 				    u64 br_cntr)
397 {
398 	u64 saddr = 0;
399 	int err;
400 
401 	if (!cycles)
402 		return 0;
403 
404 	/*
405 	 * Only set start when IPC can be computed. We can only
406 	 * compute it when the basic block is completely in a single
407 	 * function.
408 	 * Special case the case when the jump is elsewhere, but
409 	 * it starts on the function start.
410 	 */
411 	if (start &&
412 		(start->ms.sym == ams->ms.sym ||
413 		 (ams->ms.sym &&
414 		  start->addr == ams->ms.sym->start + map__start(ams->ms.map))))
415 		saddr = start->al_addr;
416 	if (saddr == 0)
417 		pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
418 			ams->addr,
419 			start ? start->addr : 0,
420 			ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0,
421 			saddr);
422 	err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles, evsel, br_cntr);
423 	if (err)
424 		pr_debug2("account_cycles failed %d\n", err);
425 	return err;
426 }
427 
428 struct annotation_line *annotated_source__get_line(struct annotated_source *src,
429 						   s64 offset)
430 {
431 	struct annotation_line *al;
432 
433 	list_for_each_entry(al, &src->source, node) {
434 		if (al->offset == offset)
435 			return al;
436 	}
437 	return NULL;
438 }
439 
440 static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end)
441 {
442 	struct annotation_line *al;
443 	unsigned n_insn = 0;
444 
445 	al = annotated_source__get_line(notes->src, start);
446 	if (al == NULL)
447 		return 0;
448 
449 	list_for_each_entry_from(al, &notes->src->source, node) {
450 		if (al->offset == -1)
451 			continue;
452 		if ((u64)al->offset > end)
453 			break;
454 		n_insn++;
455 	}
456 	return n_insn;
457 }
458 
459 static void annotated_branch__delete(struct annotated_branch *branch)
460 {
461 	if (branch) {
462 		zfree(&branch->cycles_hist);
463 		free(branch->br_cntr);
464 		free(branch);
465 	}
466 }
467 
468 static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
469 {
470 	unsigned n_insn;
471 	unsigned int cover_insn = 0;
472 
473 	n_insn = annotation__count_insn(notes, start, end);
474 	if (n_insn && ch->num && ch->cycles) {
475 		struct annotation_line *al;
476 		struct annotated_branch *branch;
477 		float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
478 
479 		/* Hide data when there are too many overlaps. */
480 		if (ch->reset >= 0x7fff)
481 			return;
482 
483 		al = annotated_source__get_line(notes->src, start);
484 		if (al == NULL)
485 			return;
486 
487 		list_for_each_entry_from(al, &notes->src->source, node) {
488 			if (al->offset == -1)
489 				continue;
490 			if ((u64)al->offset > end)
491 				break;
492 			if (al->cycles && al->cycles->ipc == 0.0) {
493 				al->cycles->ipc = ipc;
494 				cover_insn++;
495 			}
496 		}
497 
498 		branch = annotation__get_branch(notes);
499 		if (cover_insn && branch) {
500 			branch->hit_cycles += ch->cycles;
501 			branch->hit_insn += n_insn * ch->num;
502 			branch->cover_insn += cover_insn;
503 		}
504 	}
505 }
506 
507 static int annotation__compute_ipc(struct annotation *notes, size_t size,
508 				   struct evsel *evsel)
509 {
510 	unsigned int br_cntr_nr = evsel->evlist->nr_br_cntr;
511 	int err = 0;
512 	s64 offset;
513 
514 	if (!notes->branch || !notes->branch->cycles_hist)
515 		return 0;
516 
517 	notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1);
518 	notes->branch->hit_cycles = 0;
519 	notes->branch->hit_insn = 0;
520 	notes->branch->cover_insn = 0;
521 
522 	annotation__lock(notes);
523 	for (offset = size - 1; offset >= 0; --offset) {
524 		struct cyc_hist *ch;
525 
526 		ch = &notes->branch->cycles_hist[offset];
527 		if (ch && ch->cycles) {
528 			struct annotation_line *al;
529 
530 			al = annotated_source__get_line(notes->src, offset);
531 			if (al && al->cycles == NULL) {
532 				al->cycles = zalloc(sizeof(*al->cycles));
533 				if (al->cycles == NULL) {
534 					err = ENOMEM;
535 					break;
536 				}
537 			}
538 			if (ch->have_start)
539 				annotation__count_and_fill(notes, ch->start, offset, ch);
540 			if (al && ch->num_aggr) {
541 				al->cycles->avg = ch->cycles_aggr / ch->num_aggr;
542 				al->cycles->max = ch->cycles_max;
543 				al->cycles->min = ch->cycles_min;
544 			}
545 			if (al && notes->branch->br_cntr) {
546 				if (!al->br_cntr) {
547 					al->br_cntr = calloc(br_cntr_nr, sizeof(u64));
548 					if (!al->br_cntr) {
549 						err = ENOMEM;
550 						break;
551 					}
552 				}
553 				al->num_aggr = ch->num_aggr;
554 				al->br_cntr_nr = br_cntr_nr;
555 				al->evsel = evsel;
556 				memcpy(al->br_cntr, &notes->branch->br_cntr[offset * br_cntr_nr],
557 				       br_cntr_nr * sizeof(u64));
558 			}
559 		}
560 	}
561 
562 	if (err) {
563 		while (++offset < (s64)size) {
564 			struct cyc_hist *ch = &notes->branch->cycles_hist[offset];
565 
566 			if (ch && ch->cycles) {
567 				struct annotation_line *al;
568 
569 				al = annotated_source__get_line(notes->src, offset);
570 				if (al) {
571 					zfree(&al->cycles);
572 					zfree(&al->br_cntr);
573 				}
574 			}
575 		}
576 	}
577 
578 	annotation__unlock(notes);
579 	return 0;
580 }
581 
582 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
583 				 struct evsel *evsel)
584 {
585 	return symbol__inc_addr_samples(&ams->ms, evsel, ams->al_addr, sample);
586 }
587 
588 int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
589 				 struct evsel *evsel, u64 ip)
590 {
591 	return symbol__inc_addr_samples(&he->ms, evsel, ip, sample);
592 }
593 
594 
595 void annotation__exit(struct annotation *notes)
596 {
597 	annotated_source__delete(notes->src);
598 	annotated_branch__delete(notes->branch);
599 }
600 
601 static struct sharded_mutex *sharded_mutex;
602 
603 static void annotation__init_sharded_mutex(void)
604 {
605 	/* As many mutexes as there are CPUs. */
606 	sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu);
607 }
608 
609 static size_t annotation__hash(const struct annotation *notes)
610 {
611 	return (size_t)notes;
612 }
613 
614 static struct mutex *annotation__get_mutex(const struct annotation *notes)
615 {
616 	static pthread_once_t once = PTHREAD_ONCE_INIT;
617 
618 	pthread_once(&once, annotation__init_sharded_mutex);
619 	if (!sharded_mutex)
620 		return NULL;
621 
622 	return sharded_mutex__get_mutex(sharded_mutex, annotation__hash(notes));
623 }
624 
625 void annotation__lock(struct annotation *notes)
626 	NO_THREAD_SAFETY_ANALYSIS
627 {
628 	struct mutex *mutex = annotation__get_mutex(notes);
629 
630 	if (mutex)
631 		mutex_lock(mutex);
632 }
633 
634 void annotation__unlock(struct annotation *notes)
635 	NO_THREAD_SAFETY_ANALYSIS
636 {
637 	struct mutex *mutex = annotation__get_mutex(notes);
638 
639 	if (mutex)
640 		mutex_unlock(mutex);
641 }
642 
643 bool annotation__trylock(struct annotation *notes)
644 {
645 	struct mutex *mutex = annotation__get_mutex(notes);
646 
647 	if (!mutex)
648 		return false;
649 
650 	return mutex_trylock(mutex);
651 }
652 
653 void annotation_line__add(struct annotation_line *al, struct list_head *head)
654 {
655 	list_add_tail(&al->node, head);
656 }
657 
658 struct annotation_line *
659 annotation_line__next(struct annotation_line *pos, struct list_head *head)
660 {
661 	list_for_each_entry_continue(pos, head, node)
662 		if (pos->offset >= 0)
663 			return pos;
664 
665 	return NULL;
666 }
667 
668 static const char *annotate__address_color(struct block_range *br)
669 {
670 	double cov = block_range__coverage(br);
671 
672 	if (cov >= 0) {
673 		/* mark red for >75% coverage */
674 		if (cov > 0.75)
675 			return PERF_COLOR_RED;
676 
677 		/* mark dull for <1% coverage */
678 		if (cov < 0.01)
679 			return PERF_COLOR_NORMAL;
680 	}
681 
682 	return PERF_COLOR_MAGENTA;
683 }
684 
685 static const char *annotate__asm_color(struct block_range *br)
686 {
687 	double cov = block_range__coverage(br);
688 
689 	if (cov >= 0) {
690 		/* mark dull for <1% coverage */
691 		if (cov < 0.01)
692 			return PERF_COLOR_NORMAL;
693 	}
694 
695 	return PERF_COLOR_BLUE;
696 }
697 
698 static void annotate__branch_printf(struct block_range *br, u64 addr)
699 {
700 	bool emit_comment = true;
701 
702 	if (!br)
703 		return;
704 
705 #if 1
706 	if (br->is_target && br->start == addr) {
707 		struct block_range *branch = br;
708 		double p;
709 
710 		/*
711 		 * Find matching branch to our target.
712 		 */
713 		while (!branch->is_branch)
714 			branch = block_range__next(branch);
715 
716 		p = 100 *(double)br->entry / branch->coverage;
717 
718 		if (p > 0.1) {
719 			if (emit_comment) {
720 				emit_comment = false;
721 				printf("\t#");
722 			}
723 
724 			/*
725 			 * The percentage of coverage joined at this target in relation
726 			 * to the next branch.
727 			 */
728 			printf(" +%.2f%%", p);
729 		}
730 	}
731 #endif
732 	if (br->is_branch && br->end == addr) {
733 		double p = 100*(double)br->taken / br->coverage;
734 
735 		if (p > 0.1) {
736 			if (emit_comment) {
737 				emit_comment = false;
738 				printf("\t#");
739 			}
740 
741 			/*
742 			 * The percentage of coverage leaving at this branch, and
743 			 * its prediction ratio.
744 			 */
745 			printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred  / br->taken);
746 		}
747 	}
748 }
749 
750 static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width)
751 {
752 	s64 offset = dl->al.offset;
753 	const u64 addr = start + offset;
754 	struct block_range *br;
755 
756 	br = block_range__find(addr);
757 	color_fprintf(stdout, annotate__address_color(br), "  %*" PRIx64 ":", addr_fmt_width, addr);
758 	color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line);
759 	annotate__branch_printf(br, addr);
760 	return 0;
761 }
762 
763 static struct annotated_data_type *
764 __hist_entry__get_data_type(struct hist_entry *he, struct arch *arch,
765 			    struct debuginfo *dbg, struct disasm_line *dl,
766 			    int *type_offset);
767 
768 struct annotation_print_data {
769 	struct hist_entry *he;
770 	struct evsel *evsel;
771 	struct arch *arch;
772 	struct debuginfo *dbg;
773 	u64 start;
774 	int addr_fmt_width;
775 };
776 
777 static int
778 annotation_line__print(struct annotation_line *al, struct annotation_print_data *apd,
779 		       struct annotation_options *opts, int printed,
780 		       struct annotation_line *queue)
781 {
782 	struct symbol *sym = apd->he->ms.sym;
783 	struct disasm_line *dl = container_of(al, struct disasm_line, al);
784 	struct annotation *notes = symbol__annotation(sym);
785 	static const char *prev_line;
786 	int max_lines = opts->max_lines;
787 	int percent_type = opts->percent_type;
788 
789 	if (al->offset != -1) {
790 		double max_percent = 0.0;
791 		int i, nr_percent = 1;
792 		const char *color;
793 
794 		for (i = 0; i < al->data_nr; i++) {
795 			double percent;
796 
797 			percent = annotation_data__percent(&al->data[i],
798 							   percent_type);
799 
800 			if (percent > max_percent)
801 				max_percent = percent;
802 		}
803 
804 		if (al->data_nr > nr_percent)
805 			nr_percent = al->data_nr;
806 
807 		if (max_percent < opts->min_pcnt)
808 			return -1;
809 
810 		if (max_lines && printed >= max_lines)
811 			return 1;
812 
813 		if (queue != NULL) {
814 			struct annotation_options queue_opts = {
815 				.max_lines = 1,
816 				.percent_type = percent_type,
817 			};
818 
819 			list_for_each_entry_from(queue, &notes->src->source, node) {
820 				if (queue == al)
821 					break;
822 				annotation_line__print(queue, apd, &queue_opts,
823 						       /*printed=*/0, /*queue=*/NULL);
824 			}
825 		}
826 
827 		color = get_percent_color(max_percent);
828 
829 		for (i = 0; i < nr_percent; i++) {
830 			struct annotation_data *data = &al->data[i];
831 			double percent;
832 
833 			percent = annotation_data__percent(data, percent_type);
834 			color = get_percent_color(percent);
835 
836 			if (symbol_conf.show_total_period)
837 				color_fprintf(stdout, color, " %11" PRIu64,
838 					      data->he.period);
839 			else if (symbol_conf.show_nr_samples)
840 				color_fprintf(stdout, color, " %7" PRIu64,
841 					      data->he.nr_samples);
842 			else
843 				color_fprintf(stdout, color, " %7.2f", percent);
844 		}
845 
846 		printf(" : ");
847 
848 		disasm_line__print(dl, apd->start, apd->addr_fmt_width);
849 
850 		if (opts->code_with_type && apd->dbg) {
851 			struct annotated_data_type *data_type;
852 			int offset = 0;
853 
854 			data_type = __hist_entry__get_data_type(apd->he, apd->arch,
855 								apd->dbg, dl, &offset);
856 			if (data_type && data_type != NO_TYPE) {
857 				char buf[4096];
858 
859 				printf("\t\t# data-type: %s",
860 				       data_type->self.type_name);
861 
862 				if (data_type != &stackop_type &&
863 				    data_type != &canary_type)
864 					printf(" +%#x", offset);
865 
866 				if (annotated_data_type__get_member_name(data_type,
867 									 buf,
868 									 sizeof(buf),
869 									 offset))
870 					printf(" (%s)", buf);
871 			}
872 		}
873 
874 		/*
875 		 * Also color the filename and line if needed, with
876 		 * the same color than the percentage. Don't print it
877 		 * twice for close colored addr with the same filename:line
878 		 */
879 		if (al->path) {
880 			if (!prev_line || strcmp(prev_line, al->path)) {
881 				color_fprintf(stdout, color, " // %s", al->path);
882 				prev_line = al->path;
883 			}
884 		}
885 
886 		printf("\n");
887 	} else if (max_lines && printed >= max_lines)
888 		return 1;
889 	else {
890 		int width = annotation__pcnt_width(notes);
891 
892 		if (queue)
893 			return -1;
894 
895 		if (!*al->line)
896 			printf(" %*s:\n", width, " ");
897 		else
898 			printf(" %*s: %-*d %s\n", width, " ", apd->addr_fmt_width,
899 			       al->line_nr, al->line);
900 	}
901 
902 	return 0;
903 }
904 
905 static void calc_percent(struct annotation *notes,
906 			 struct evsel *evsel,
907 			 struct annotation_data *data,
908 			 s64 offset, s64 end)
909 {
910 	struct hists *hists = evsel__hists(evsel);
911 	struct sym_hist *sym_hist = annotation__histogram(notes, evsel);
912 	unsigned int hits = 0;
913 	u64 period = 0;
914 
915 	while (offset < end) {
916 		struct sym_hist_entry *entry;
917 
918 		entry = annotated_source__hist_entry(notes->src, evsel, offset);
919 		if (entry) {
920 			hits   += entry->nr_samples;
921 			period += entry->period;
922 		}
923 		++offset;
924 	}
925 
926 	if (sym_hist->nr_samples) {
927 		data->he.period     = period;
928 		data->he.nr_samples = hits;
929 		data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples;
930 	}
931 
932 	if (hists->stats.nr_non_filtered_samples)
933 		data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples;
934 
935 	if (sym_hist->period)
936 		data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period;
937 
938 	if (hists->stats.total_period)
939 		data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period;
940 }
941 
942 static void annotation__calc_percent(struct annotation *notes,
943 				     struct evsel *leader, s64 len)
944 {
945 	struct annotation_line *al, *next;
946 	struct evsel *evsel;
947 
948 	list_for_each_entry(al, &notes->src->source, node) {
949 		s64 end;
950 		int i = 0;
951 
952 		if (al->offset == -1)
953 			continue;
954 
955 		next = annotation_line__next(al, &notes->src->source);
956 		end  = next ? next->offset : len;
957 
958 		for_each_group_evsel(evsel, leader) {
959 			struct annotation_data *data;
960 
961 			BUG_ON(i >= al->data_nr);
962 
963 			if (symbol_conf.skip_empty &&
964 			    evsel__hists(evsel)->stats.nr_samples == 0)
965 				continue;
966 
967 			data = &al->data[i++];
968 
969 			calc_percent(notes, evsel, data, al->offset, end);
970 		}
971 	}
972 }
973 
974 void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
975 {
976 	struct annotation *notes = symbol__annotation(sym);
977 
978 	annotation__calc_percent(notes, evsel, symbol__size(sym));
979 }
980 
981 static int evsel__get_arch(struct evsel *evsel, struct arch **parch)
982 {
983 	struct perf_env *env = evsel__env(evsel);
984 	const char *arch_name = perf_env__arch(env);
985 	struct arch *arch;
986 	int err;
987 
988 	if (!arch_name) {
989 		*parch = NULL;
990 		return errno;
991 	}
992 
993 	*parch = arch = arch__find(arch_name);
994 	if (arch == NULL) {
995 		pr_err("%s: unsupported arch %s\n", __func__, arch_name);
996 		return ENOTSUP;
997 	}
998 
999 	if (arch->init) {
1000 		err = arch->init(arch, env ? env->cpuid : NULL);
1001 		if (err) {
1002 			pr_err("%s: failed to initialize %s arch priv area\n",
1003 			       __func__, arch->name);
1004 			return err;
1005 		}
1006 	}
1007 	return 0;
1008 }
1009 
1010 int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
1011 		     struct arch **parch)
1012 {
1013 	struct symbol *sym = ms->sym;
1014 	struct annotation *notes = symbol__annotation(sym);
1015 	struct annotate_args args = {
1016 		.evsel		= evsel,
1017 		.options	= &annotate_opts,
1018 	};
1019 	struct arch *arch = NULL;
1020 	int err, nr;
1021 
1022 	err = evsel__get_arch(evsel, &arch);
1023 	if (err < 0)
1024 		return err;
1025 
1026 	if (parch)
1027 		*parch = arch;
1028 
1029 	if (notes->src && !list_empty(&notes->src->source))
1030 		return 0;
1031 
1032 	args.arch = arch;
1033 	args.ms = *ms;
1034 
1035 	if (notes->src == NULL) {
1036 		notes->src = annotated_source__new();
1037 		if (notes->src == NULL)
1038 			return -1;
1039 	}
1040 
1041 	nr = 0;
1042 	if (evsel__is_group_event(evsel)) {
1043 		struct evsel *pos;
1044 
1045 		for_each_group_evsel(pos, evsel) {
1046 			if (symbol_conf.skip_empty &&
1047 			    evsel__hists(pos)->stats.nr_samples == 0)
1048 				continue;
1049 			nr++;
1050 		}
1051 	}
1052 	notes->src->nr_events = nr ? nr : 1;
1053 
1054 	if (annotate_opts.full_addr)
1055 		notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
1056 	else
1057 		notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
1058 
1059 	return symbol__disassemble(sym, &args);
1060 }
1061 
1062 static void insert_source_line(struct rb_root *root, struct annotation_line *al)
1063 {
1064 	struct annotation_line *iter;
1065 	struct rb_node **p = &root->rb_node;
1066 	struct rb_node *parent = NULL;
1067 	unsigned int percent_type = annotate_opts.percent_type;
1068 	int i, ret;
1069 
1070 	while (*p != NULL) {
1071 		parent = *p;
1072 		iter = rb_entry(parent, struct annotation_line, rb_node);
1073 
1074 		ret = strcmp(iter->path, al->path);
1075 		if (ret == 0) {
1076 			for (i = 0; i < al->data_nr; i++) {
1077 				iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
1078 										      percent_type);
1079 			}
1080 			return;
1081 		}
1082 
1083 		if (ret < 0)
1084 			p = &(*p)->rb_left;
1085 		else
1086 			p = &(*p)->rb_right;
1087 	}
1088 
1089 	for (i = 0; i < al->data_nr; i++) {
1090 		al->data[i].percent_sum = annotation_data__percent(&al->data[i],
1091 								   percent_type);
1092 	}
1093 
1094 	rb_link_node(&al->rb_node, parent, p);
1095 	rb_insert_color(&al->rb_node, root);
1096 }
1097 
1098 static int cmp_source_line(struct annotation_line *a, struct annotation_line *b)
1099 {
1100 	int i;
1101 
1102 	for (i = 0; i < a->data_nr; i++) {
1103 		if (a->data[i].percent_sum == b->data[i].percent_sum)
1104 			continue;
1105 		return a->data[i].percent_sum > b->data[i].percent_sum;
1106 	}
1107 
1108 	return 0;
1109 }
1110 
1111 static void __resort_source_line(struct rb_root *root, struct annotation_line *al)
1112 {
1113 	struct annotation_line *iter;
1114 	struct rb_node **p = &root->rb_node;
1115 	struct rb_node *parent = NULL;
1116 
1117 	while (*p != NULL) {
1118 		parent = *p;
1119 		iter = rb_entry(parent, struct annotation_line, rb_node);
1120 
1121 		if (cmp_source_line(al, iter))
1122 			p = &(*p)->rb_left;
1123 		else
1124 			p = &(*p)->rb_right;
1125 	}
1126 
1127 	rb_link_node(&al->rb_node, parent, p);
1128 	rb_insert_color(&al->rb_node, root);
1129 }
1130 
1131 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
1132 {
1133 	struct annotation_line *al;
1134 	struct rb_node *node;
1135 
1136 	node = rb_first(src_root);
1137 	while (node) {
1138 		struct rb_node *next;
1139 
1140 		al = rb_entry(node, struct annotation_line, rb_node);
1141 		next = rb_next(node);
1142 		rb_erase(node, src_root);
1143 
1144 		__resort_source_line(dest_root, al);
1145 		node = next;
1146 	}
1147 }
1148 
1149 static void print_summary(struct rb_root *root, const char *filename)
1150 {
1151 	struct annotation_line *al;
1152 	struct rb_node *node;
1153 
1154 	printf("\nSorted summary for file %s\n", filename);
1155 	printf("----------------------------------------------\n\n");
1156 
1157 	if (RB_EMPTY_ROOT(root)) {
1158 		printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
1159 		return;
1160 	}
1161 
1162 	node = rb_first(root);
1163 	while (node) {
1164 		double percent, percent_max = 0.0;
1165 		const char *color;
1166 		char *path;
1167 		int i;
1168 
1169 		al = rb_entry(node, struct annotation_line, rb_node);
1170 		for (i = 0; i < al->data_nr; i++) {
1171 			percent = al->data[i].percent_sum;
1172 			color = get_percent_color(percent);
1173 			color_fprintf(stdout, color, " %7.2f", percent);
1174 
1175 			if (percent > percent_max)
1176 				percent_max = percent;
1177 		}
1178 
1179 		path = al->path;
1180 		color = get_percent_color(percent_max);
1181 		color_fprintf(stdout, color, " %s\n", path);
1182 
1183 		node = rb_next(node);
1184 	}
1185 }
1186 
1187 static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel)
1188 {
1189 	struct annotation *notes = symbol__annotation(sym);
1190 	struct sym_hist *h = annotation__histogram(notes, evsel);
1191 	u64 len = symbol__size(sym), offset;
1192 
1193 	for (offset = 0; offset < len; ++offset) {
1194 		struct sym_hist_entry *entry;
1195 
1196 		entry = annotated_source__hist_entry(notes->src, evsel, offset);
1197 		if (entry && entry->nr_samples != 0)
1198 			printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
1199 			       sym->start + offset, entry->nr_samples);
1200 	}
1201 	printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
1202 }
1203 
1204 static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
1205 {
1206 	char bf[32];
1207 	struct annotation_line *line;
1208 
1209 	list_for_each_entry_reverse(line, lines, node) {
1210 		if (line->offset != -1)
1211 			return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset);
1212 	}
1213 
1214 	return 0;
1215 }
1216 
1217 int hist_entry__annotate_printf(struct hist_entry *he, struct evsel *evsel)
1218 {
1219 	struct map_symbol *ms = &he->ms;
1220 	struct map *map = ms->map;
1221 	struct symbol *sym = ms->sym;
1222 	struct dso *dso = map__dso(map);
1223 	char *filename;
1224 	const char *d_filename;
1225 	const char *evsel_name = evsel__name(evsel);
1226 	struct annotation *notes = symbol__annotation(sym);
1227 	struct sym_hist *h = annotation__histogram(notes, evsel);
1228 	struct annotation_line *pos, *queue = NULL;
1229 	struct annotation_options *opts = &annotate_opts;
1230 	struct annotation_print_data apd = {
1231 		.he = he,
1232 		.evsel = evsel,
1233 		.start = map__rip_2objdump(map, sym->start),
1234 	};
1235 	int printed = 2, queue_len = 0;
1236 	int more = 0;
1237 	bool context = opts->context;
1238 	int width = annotation__pcnt_width(notes);
1239 	int graph_dotted_len;
1240 	char buf[512];
1241 
1242 	filename = strdup(dso__long_name(dso));
1243 	if (!filename)
1244 		return -ENOMEM;
1245 
1246 	if (opts->full_path)
1247 		d_filename = filename;
1248 	else
1249 		d_filename = basename(filename);
1250 
1251 	if (evsel__is_group_event(evsel)) {
1252 		evsel__group_desc(evsel, buf, sizeof(buf));
1253 		evsel_name = buf;
1254 	}
1255 
1256 	graph_dotted_len = printf(" %-*.*s|	Source code & Disassembly of %s for %s (%" PRIu64 " samples, "
1257 				  "percent: %s)\n",
1258 				  width, width, symbol_conf.show_total_period ? "Period" :
1259 				  symbol_conf.show_nr_samples ? "Samples" : "Percent",
1260 				  d_filename, evsel_name, h->nr_samples,
1261 				  percent_type_str(opts->percent_type));
1262 
1263 	printf("%-*.*s----\n",
1264 	       graph_dotted_len, graph_dotted_len, graph_dotted_line);
1265 
1266 	if (verbose > 0)
1267 		symbol__annotate_hits(sym, evsel);
1268 
1269 	apd.addr_fmt_width = annotated_source__addr_fmt_width(&notes->src->source,
1270 							      apd.start);
1271 	evsel__get_arch(evsel, &apd.arch);
1272 	apd.dbg = debuginfo__new(filename);
1273 
1274 	list_for_each_entry(pos, &notes->src->source, node) {
1275 		int err;
1276 
1277 		if (context && queue == NULL) {
1278 			queue = pos;
1279 			queue_len = 0;
1280 		}
1281 
1282 		err = annotation_line__print(pos, &apd, opts, printed, queue);
1283 
1284 		switch (err) {
1285 		case 0:
1286 			++printed;
1287 			if (context) {
1288 				printed += queue_len;
1289 				queue = NULL;
1290 				queue_len = 0;
1291 			}
1292 			break;
1293 		case 1:
1294 			/* filtered by max_lines */
1295 			++more;
1296 			break;
1297 		case -1:
1298 		default:
1299 			/*
1300 			 * Filtered by min_pcnt or non IP lines when
1301 			 * context != 0
1302 			 */
1303 			if (!context)
1304 				break;
1305 			if (queue_len == context)
1306 				queue = list_entry(queue->node.next, typeof(*queue), node);
1307 			else
1308 				++queue_len;
1309 			break;
1310 		}
1311 	}
1312 
1313 	debuginfo__delete(apd.dbg);
1314 	free(filename);
1315 
1316 	return more;
1317 }
1318 
1319 static void FILE__set_percent_color(void *fp __maybe_unused,
1320 				    double percent __maybe_unused,
1321 				    bool current __maybe_unused)
1322 {
1323 }
1324 
1325 static int FILE__set_jumps_percent_color(void *fp __maybe_unused,
1326 					 int nr __maybe_unused, bool current __maybe_unused)
1327 {
1328 	return 0;
1329 }
1330 
1331 static int FILE__set_color(void *fp __maybe_unused, int color __maybe_unused)
1332 {
1333 	return 0;
1334 }
1335 
1336 static void FILE__printf(void *fp, const char *fmt, ...)
1337 {
1338 	va_list args;
1339 
1340 	va_start(args, fmt);
1341 	vfprintf(fp, fmt, args);
1342 	va_end(args);
1343 }
1344 
1345 static void FILE__write_graph(void *fp, int graph)
1346 {
1347 	const char *s;
1348 	switch (graph) {
1349 
1350 	case DARROW_CHAR: s = "↓"; break;
1351 	case UARROW_CHAR: s = "↑"; break;
1352 	case LARROW_CHAR: s = "←"; break;
1353 	case RARROW_CHAR: s = "→"; break;
1354 	default:		s = "?"; break;
1355 	}
1356 
1357 	fputs(s, fp);
1358 }
1359 
1360 static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp)
1361 {
1362 	struct annotation *notes = symbol__annotation(sym);
1363 	struct annotation_write_ops wops = {
1364 		.first_line		 = true,
1365 		.obj			 = fp,
1366 		.set_color		 = FILE__set_color,
1367 		.set_percent_color	 = FILE__set_percent_color,
1368 		.set_jumps_percent_color = FILE__set_jumps_percent_color,
1369 		.printf			 = FILE__printf,
1370 		.write_graph		 = FILE__write_graph,
1371 	};
1372 	struct annotation_line *al;
1373 
1374 	list_for_each_entry(al, &notes->src->source, node) {
1375 		if (annotation_line__filter(al))
1376 			continue;
1377 		annotation_line__write(al, notes, &wops);
1378 		fputc('\n', fp);
1379 		wops.first_line = false;
1380 	}
1381 
1382 	return 0;
1383 }
1384 
1385 int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel)
1386 {
1387 	const char *ev_name = evsel__name(evsel);
1388 	char buf[1024];
1389 	char *filename;
1390 	int err = -1;
1391 	FILE *fp;
1392 
1393 	if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0)
1394 		return -1;
1395 
1396 	fp = fopen(filename, "w");
1397 	if (fp == NULL)
1398 		goto out_free_filename;
1399 
1400 	if (evsel__is_group_event(evsel)) {
1401 		evsel__group_desc(evsel, buf, sizeof(buf));
1402 		ev_name = buf;
1403 	}
1404 
1405 	fprintf(fp, "%s() %s\nEvent: %s\n\n",
1406 		ms->sym->name, dso__long_name(map__dso(ms->map)), ev_name);
1407 	symbol__annotate_fprintf2(ms->sym, fp);
1408 
1409 	fclose(fp);
1410 	err = 0;
1411 out_free_filename:
1412 	free(filename);
1413 	return err;
1414 }
1415 
1416 void symbol__annotate_zero_histogram(struct symbol *sym, struct evsel *evsel)
1417 {
1418 	struct annotation *notes = symbol__annotation(sym);
1419 	struct sym_hist *h = annotation__histogram(notes, evsel);
1420 
1421 	memset(h, 0, sizeof(*notes->src->histograms) * notes->src->nr_histograms);
1422 }
1423 
1424 void symbol__annotate_decay_histogram(struct symbol *sym, struct evsel *evsel)
1425 {
1426 	struct annotation *notes = symbol__annotation(sym);
1427 	struct sym_hist *h = annotation__histogram(notes, evsel);
1428 	struct annotation_line *al;
1429 
1430 	h->nr_samples = 0;
1431 	list_for_each_entry(al, &notes->src->source, node) {
1432 		struct sym_hist_entry *entry;
1433 
1434 		if (al->offset == -1)
1435 			continue;
1436 
1437 		entry = annotated_source__hist_entry(notes->src, evsel, al->offset);
1438 		if (entry == NULL)
1439 			continue;
1440 
1441 		entry->nr_samples = entry->nr_samples * 7 / 8;
1442 		h->nr_samples += entry->nr_samples;
1443 	}
1444 }
1445 
1446 void annotated_source__purge(struct annotated_source *as)
1447 {
1448 	struct annotation_line *al, *n;
1449 
1450 	list_for_each_entry_safe(al, n, &as->source, node) {
1451 		list_del_init(&al->node);
1452 		disasm_line__free(disasm_line(al));
1453 	}
1454 }
1455 
1456 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
1457 {
1458 	size_t printed;
1459 
1460 	if (dl->al.offset == -1)
1461 		return fprintf(fp, "%s\n", dl->al.line);
1462 
1463 	printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name);
1464 
1465 	if (dl->ops.raw[0] != '\0') {
1466 		printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
1467 				   dl->ops.raw);
1468 	}
1469 
1470 	return printed + fprintf(fp, "\n");
1471 }
1472 
1473 size_t disasm__fprintf(struct list_head *head, FILE *fp)
1474 {
1475 	struct disasm_line *pos;
1476 	size_t printed = 0;
1477 
1478 	list_for_each_entry(pos, head, al.node)
1479 		printed += disasm_line__fprintf(pos, fp);
1480 
1481 	return printed;
1482 }
1483 
1484 bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym)
1485 {
1486 	if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins) ||
1487 	    !disasm_line__has_local_offset(dl) || dl->ops.target.offset < 0 ||
1488 	    dl->ops.target.offset >= (s64)symbol__size(sym))
1489 		return false;
1490 
1491 	return true;
1492 }
1493 
1494 static void
1495 annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
1496 {
1497 	struct annotation_line *al;
1498 
1499 	/* PLT symbols contain external offsets */
1500 	if (strstr(sym->name, "@plt"))
1501 		return;
1502 
1503 	list_for_each_entry(al, &notes->src->source, node) {
1504 		struct disasm_line *dl;
1505 		struct annotation_line *target;
1506 
1507 		dl = disasm_line(al);
1508 
1509 		if (!disasm_line__is_valid_local_jump(dl, sym))
1510 			continue;
1511 
1512 		target = annotated_source__get_line(notes->src,
1513 						    dl->ops.target.offset);
1514 		/*
1515 		 * FIXME: Oops, no jump target? Buggy disassembler? Or do we
1516 		 * have to adjust to the previous offset?
1517 		 */
1518 		if (target == NULL)
1519 			continue;
1520 
1521 		if (++target->jump_sources > notes->src->max_jump_sources)
1522 			notes->src->max_jump_sources = target->jump_sources;
1523 	}
1524 }
1525 
1526 static void annotation__set_index(struct annotation *notes)
1527 {
1528 	struct annotation_line *al;
1529 	struct annotated_source *src = notes->src;
1530 
1531 	src->widths.max_line_len = 0;
1532 	src->nr_entries = 0;
1533 	src->nr_asm_entries = 0;
1534 
1535 	list_for_each_entry(al, &src->source, node) {
1536 		size_t line_len = strlen(al->line);
1537 
1538 		if (src->widths.max_line_len < line_len)
1539 			src->widths.max_line_len = line_len;
1540 		al->idx = src->nr_entries++;
1541 		if (al->offset != -1)
1542 			al->idx_asm = src->nr_asm_entries++;
1543 		else
1544 			al->idx_asm = -1;
1545 	}
1546 }
1547 
1548 static inline int width_jumps(int n)
1549 {
1550 	if (n >= 100)
1551 		return 5;
1552 	if (n / 10)
1553 		return 2;
1554 	return 1;
1555 }
1556 
1557 static int annotation__max_ins_name(struct annotation *notes)
1558 {
1559 	int max_name = 0, len;
1560 	struct annotation_line *al;
1561 
1562         list_for_each_entry(al, &notes->src->source, node) {
1563 		if (al->offset == -1)
1564 			continue;
1565 
1566 		len = strlen(disasm_line(al)->ins.name);
1567 		if (max_name < len)
1568 			max_name = len;
1569 	}
1570 
1571 	return max_name;
1572 }
1573 
1574 static void
1575 annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
1576 {
1577 	notes->src->widths.addr = notes->src->widths.target =
1578 		notes->src->widths.min_addr = hex_width(symbol__size(sym));
1579 	notes->src->widths.max_addr = hex_width(sym->end);
1580 	notes->src->widths.jumps = width_jumps(notes->src->max_jump_sources);
1581 	notes->src->widths.max_ins_name = annotation__max_ins_name(notes);
1582 }
1583 
1584 void annotation__update_column_widths(struct annotation *notes)
1585 {
1586 	if (annotate_opts.use_offset)
1587 		notes->src->widths.target = notes->src->widths.min_addr;
1588 	else if (annotate_opts.full_addr)
1589 		notes->src->widths.target = BITS_PER_LONG / 4;
1590 	else
1591 		notes->src->widths.target = notes->src->widths.max_addr;
1592 
1593 	notes->src->widths.addr = notes->src->widths.target;
1594 
1595 	if (annotate_opts.show_nr_jumps)
1596 		notes->src->widths.addr += notes->src->widths.jumps + 1;
1597 }
1598 
1599 void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms)
1600 {
1601 	annotate_opts.full_addr = !annotate_opts.full_addr;
1602 
1603 	if (annotate_opts.full_addr)
1604 		notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
1605 	else
1606 		notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
1607 
1608 	annotation__update_column_widths(notes);
1609 }
1610 
1611 static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms,
1612 				   struct rb_root *root)
1613 {
1614 	struct annotation_line *al;
1615 	struct rb_root tmp_root = RB_ROOT;
1616 
1617 	list_for_each_entry(al, &notes->src->source, node) {
1618 		double percent_max = 0.0;
1619 		u64 addr;
1620 		int i;
1621 
1622 		for (i = 0; i < al->data_nr; i++) {
1623 			double percent;
1624 
1625 			percent = annotation_data__percent(&al->data[i],
1626 							   annotate_opts.percent_type);
1627 
1628 			if (percent > percent_max)
1629 				percent_max = percent;
1630 		}
1631 
1632 		if (percent_max <= 0.5)
1633 			continue;
1634 
1635 		addr = map__rip_2objdump(ms->map, ms->sym->start);
1636 		al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL,
1637 				       false, true, ms->sym->start + al->offset);
1638 		insert_source_line(&tmp_root, al);
1639 	}
1640 
1641 	resort_source_line(root, &tmp_root);
1642 }
1643 
1644 static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root)
1645 {
1646 	struct annotation *notes = symbol__annotation(ms->sym);
1647 
1648 	annotation__calc_lines(notes, ms, root);
1649 }
1650 
1651 int hist_entry__tty_annotate2(struct hist_entry *he, struct evsel *evsel)
1652 {
1653 	struct map_symbol *ms = &he->ms;
1654 	struct dso *dso = map__dso(ms->map);
1655 	struct symbol *sym = ms->sym;
1656 	struct rb_root source_line = RB_ROOT;
1657 	struct hists *hists = evsel__hists(evsel);
1658 	char buf[1024];
1659 	int err;
1660 
1661 	err = symbol__annotate2(ms, evsel, NULL);
1662 	if (err) {
1663 		char msg[BUFSIZ];
1664 
1665 		dso__set_annotate_warned(dso);
1666 		symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
1667 		ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
1668 		return -1;
1669 	}
1670 
1671 	if (annotate_opts.print_lines) {
1672 		srcline_full_filename = annotate_opts.full_path;
1673 		symbol__calc_lines(ms, &source_line);
1674 		print_summary(&source_line, dso__long_name(dso));
1675 	}
1676 
1677 	hists__scnprintf_title(hists, buf, sizeof(buf));
1678 	fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
1679 		buf, percent_type_str(annotate_opts.percent_type), sym->name, dso__long_name(dso));
1680 	symbol__annotate_fprintf2(sym, stdout);
1681 
1682 	annotated_source__purge(symbol__annotation(sym)->src);
1683 
1684 	return 0;
1685 }
1686 
1687 int hist_entry__tty_annotate(struct hist_entry *he, struct evsel *evsel)
1688 {
1689 	struct map_symbol *ms = &he->ms;
1690 	struct dso *dso = map__dso(ms->map);
1691 	struct symbol *sym = ms->sym;
1692 	struct rb_root source_line = RB_ROOT;
1693 	int err;
1694 
1695 	err = symbol__annotate(ms, evsel, NULL);
1696 	if (err) {
1697 		char msg[BUFSIZ];
1698 
1699 		dso__set_annotate_warned(dso);
1700 		symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
1701 		ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
1702 		return -1;
1703 	}
1704 
1705 	symbol__calc_percent(sym, evsel);
1706 
1707 	if (annotate_opts.print_lines) {
1708 		srcline_full_filename = annotate_opts.full_path;
1709 		symbol__calc_lines(ms, &source_line);
1710 		print_summary(&source_line, dso__long_name(dso));
1711 	}
1712 
1713 	hist_entry__annotate_printf(he, evsel);
1714 
1715 	annotated_source__purge(symbol__annotation(sym)->src);
1716 
1717 	return 0;
1718 }
1719 
1720 bool ui__has_annotation(void)
1721 {
1722 	return use_browser == 1 && perf_hpp_list.sym;
1723 }
1724 
1725 
1726 static double annotation_line__max_percent(struct annotation_line *al,
1727 					   unsigned int percent_type)
1728 {
1729 	double percent_max = 0.0;
1730 	int i;
1731 
1732 	for (i = 0; i < al->data_nr; i++) {
1733 		double percent;
1734 
1735 		percent = annotation_data__percent(&al->data[i],
1736 						   percent_type);
1737 
1738 		if (percent > percent_max)
1739 			percent_max = percent;
1740 	}
1741 
1742 	return percent_max;
1743 }
1744 
1745 static void disasm_line__write(struct disasm_line *dl, struct annotation *notes,
1746 			       void *obj, char *bf, size_t size,
1747 			       void (*obj__printf)(void *obj, const char *fmt, ...),
1748 			       void (*obj__write_graph)(void *obj, int graph))
1749 {
1750 	if (dl->ins.ops && dl->ins.ops->scnprintf) {
1751 		if (ins__is_jump(&dl->ins)) {
1752 			bool fwd;
1753 
1754 			if (dl->ops.target.outside)
1755 				goto call_like;
1756 			fwd = dl->ops.target.offset > dl->al.offset;
1757 			obj__write_graph(obj, fwd ? DARROW_CHAR : UARROW_CHAR);
1758 			obj__printf(obj, " ");
1759 		} else if (ins__is_call(&dl->ins)) {
1760 call_like:
1761 			obj__write_graph(obj, RARROW_CHAR);
1762 			obj__printf(obj, " ");
1763 		} else if (ins__is_ret(&dl->ins)) {
1764 			obj__write_graph(obj, LARROW_CHAR);
1765 			obj__printf(obj, " ");
1766 		} else {
1767 			obj__printf(obj, "  ");
1768 		}
1769 	} else {
1770 		obj__printf(obj, "  ");
1771 	}
1772 
1773 	disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset,
1774 			       notes->src->widths.max_ins_name);
1775 }
1776 
1777 static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
1778 {
1779 	double ipc = 0.0, coverage = 0.0;
1780 	struct annotated_branch *branch = annotation__get_branch(notes);
1781 
1782 	if (branch && branch->hit_cycles)
1783 		ipc = branch->hit_insn / ((double)branch->hit_cycles);
1784 
1785 	if (branch && branch->total_insn) {
1786 		coverage = branch->cover_insn * 100.0 /
1787 			((double)branch->total_insn);
1788 	}
1789 
1790 	scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
1791 		  ipc, coverage);
1792 }
1793 
1794 int annotation_br_cntr_abbr_list(char **str, struct evsel *evsel, bool header)
1795 {
1796 	struct evsel *pos;
1797 	struct strbuf sb;
1798 
1799 	if (evsel->evlist->nr_br_cntr <= 0)
1800 		return -ENOTSUP;
1801 
1802 	strbuf_init(&sb, /*hint=*/ 0);
1803 
1804 	if (header && strbuf_addf(&sb, "# Branch counter abbr list:\n"))
1805 		goto err;
1806 
1807 	evlist__for_each_entry(evsel->evlist, pos) {
1808 		if (!(pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS))
1809 			continue;
1810 		if (header && strbuf_addf(&sb, "#"))
1811 			goto err;
1812 
1813 		if (strbuf_addf(&sb, " %s = %s\n", pos->name, pos->abbr_name))
1814 			goto err;
1815 	}
1816 
1817 	if (header && strbuf_addf(&sb, "#"))
1818 		goto err;
1819 	if (strbuf_addf(&sb, " '-' No event occurs\n"))
1820 		goto err;
1821 
1822 	if (header && strbuf_addf(&sb, "#"))
1823 		goto err;
1824 	if (strbuf_addf(&sb, " '+' Event occurrences may be lost due to branch counter saturated\n"))
1825 		goto err;
1826 
1827 	*str = strbuf_detach(&sb, NULL);
1828 
1829 	return 0;
1830 err:
1831 	strbuf_release(&sb);
1832 	return -ENOMEM;
1833 }
1834 
1835 /* Assume the branch counter saturated at 3 */
1836 #define ANNOTATION_BR_CNTR_SATURATION		3
1837 
1838 int annotation_br_cntr_entry(char **str, int br_cntr_nr,
1839 			     u64 *br_cntr, int num_aggr,
1840 			     struct evsel *evsel)
1841 {
1842 	struct evsel *pos = evsel ? evlist__first(evsel->evlist) : NULL;
1843 	bool saturated = false;
1844 	int i, j, avg, used;
1845 	struct strbuf sb;
1846 
1847 	strbuf_init(&sb, /*hint=*/ 0);
1848 	for (i = 0; i < br_cntr_nr; i++) {
1849 		used = 0;
1850 		avg = ceil((double)(br_cntr[i] & ~ANNOTATION__BR_CNTR_SATURATED_FLAG) /
1851 			   (double)num_aggr);
1852 
1853 		/*
1854 		 * A histogram with the abbr name is displayed by default.
1855 		 * With -v, the exact number of branch counter is displayed.
1856 		 */
1857 		if (verbose) {
1858 			evlist__for_each_entry_from(evsel->evlist, pos) {
1859 				if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
1860 				    (pos->br_cntr_idx == i))
1861 				break;
1862 			}
1863 			if (strbuf_addstr(&sb, pos->abbr_name))
1864 				goto err;
1865 
1866 			if (!br_cntr[i]) {
1867 				if (strbuf_addstr(&sb, "=-"))
1868 					goto err;
1869 			} else {
1870 				if (strbuf_addf(&sb, "=%d", avg))
1871 					goto err;
1872 			}
1873 			if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG) {
1874 				if (strbuf_addch(&sb, '+'))
1875 					goto err;
1876 			} else {
1877 				if (strbuf_addch(&sb, ' '))
1878 					goto err;
1879 			}
1880 
1881 			if ((i < br_cntr_nr - 1) && strbuf_addch(&sb, ','))
1882 				goto err;
1883 			continue;
1884 		}
1885 
1886 		if (strbuf_addch(&sb, '|'))
1887 			goto err;
1888 
1889 		if (!br_cntr[i]) {
1890 			if (strbuf_addch(&sb, '-'))
1891 				goto err;
1892 			used++;
1893 		} else {
1894 			evlist__for_each_entry_from(evsel->evlist, pos) {
1895 				if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
1896 				    (pos->br_cntr_idx == i))
1897 					break;
1898 			}
1899 			if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG)
1900 				saturated = true;
1901 
1902 			for (j = 0; j < avg; j++, used++) {
1903 				/* Print + if the number of logged events > 3 */
1904 				if (j >= ANNOTATION_BR_CNTR_SATURATION) {
1905 					saturated = true;
1906 					break;
1907 				}
1908 				if (strbuf_addstr(&sb, pos->abbr_name))
1909 					goto err;
1910 			}
1911 
1912 			if (saturated) {
1913 				if (strbuf_addch(&sb, '+'))
1914 					goto err;
1915 				used++;
1916 			}
1917 			pos = list_next_entry(pos, core.node);
1918 		}
1919 
1920 		for (j = used; j < ANNOTATION_BR_CNTR_SATURATION + 1; j++) {
1921 			if (strbuf_addch(&sb, ' '))
1922 				goto err;
1923 		}
1924 	}
1925 
1926 	if (!verbose && strbuf_addch(&sb, br_cntr_nr ? '|' : ' '))
1927 		goto err;
1928 
1929 	*str = strbuf_detach(&sb, NULL);
1930 
1931 	return 0;
1932 err:
1933 	strbuf_release(&sb);
1934 	return -ENOMEM;
1935 }
1936 
1937 static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
1938 				     bool first_line, bool current_entry, bool change_color, int width,
1939 				     void *obj, unsigned int percent_type,
1940 				     int  (*obj__set_color)(void *obj, int color),
1941 				     void (*obj__set_percent_color)(void *obj, double percent, bool current),
1942 				     int  (*obj__set_jumps_percent_color)(void *obj, int nr, bool current),
1943 				     void (*obj__printf)(void *obj, const char *fmt, ...),
1944 				     void (*obj__write_graph)(void *obj, int graph))
1945 
1946 {
1947 	double percent_max = annotation_line__max_percent(al, percent_type);
1948 	int pcnt_width = annotation__pcnt_width(notes),
1949 	    cycles_width = annotation__cycles_width(notes);
1950 	bool show_title = false;
1951 	char bf[256];
1952 	int printed;
1953 
1954 	if (first_line && (al->offset == -1 || percent_max == 0.0)) {
1955 		if (notes->branch && al->cycles) {
1956 			if (al->cycles->ipc == 0.0 && al->cycles->avg == 0)
1957 				show_title = true;
1958 		} else
1959 			show_title = true;
1960 	}
1961 
1962 	if (al->offset != -1 && percent_max != 0.0) {
1963 		int i;
1964 
1965 		for (i = 0; i < al->data_nr; i++) {
1966 			double percent;
1967 
1968 			percent = annotation_data__percent(&al->data[i], percent_type);
1969 
1970 			obj__set_percent_color(obj, percent, current_entry);
1971 			if (symbol_conf.show_total_period) {
1972 				obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period);
1973 			} else if (symbol_conf.show_nr_samples) {
1974 				obj__printf(obj, "%7" PRIu64 " ",
1975 						   al->data[i].he.nr_samples);
1976 			} else {
1977 				obj__printf(obj, "%7.2f ", percent);
1978 			}
1979 		}
1980 	} else {
1981 		obj__set_percent_color(obj, 0, current_entry);
1982 
1983 		if (!show_title)
1984 			obj__printf(obj, "%-*s", pcnt_width, " ");
1985 		else {
1986 			obj__printf(obj, "%-*s", pcnt_width,
1987 					   symbol_conf.show_total_period ? "Period" :
1988 					   symbol_conf.show_nr_samples ? "Samples" : "Percent");
1989 		}
1990 	}
1991 
1992 	if (notes->branch) {
1993 		if (al->cycles && al->cycles->ipc)
1994 			obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc);
1995 		else if (!show_title)
1996 			obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " ");
1997 		else
1998 			obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
1999 
2000 		if (!annotate_opts.show_minmax_cycle) {
2001 			if (al->cycles && al->cycles->avg)
2002 				obj__printf(obj, "%*" PRIu64 " ",
2003 					   ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg);
2004 			else if (!show_title)
2005 				obj__printf(obj, "%*s",
2006 					    ANNOTATION__CYCLES_WIDTH, " ");
2007 			else
2008 				obj__printf(obj, "%*s ",
2009 					    ANNOTATION__CYCLES_WIDTH - 1,
2010 					    "Cycle");
2011 		} else {
2012 			if (al->cycles) {
2013 				char str[32];
2014 
2015 				scnprintf(str, sizeof(str),
2016 					"%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
2017 					al->cycles->avg, al->cycles->min,
2018 					al->cycles->max);
2019 
2020 				obj__printf(obj, "%*s ",
2021 					    ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
2022 					    str);
2023 			} else if (!show_title)
2024 				obj__printf(obj, "%*s",
2025 					    ANNOTATION__MINMAX_CYCLES_WIDTH,
2026 					    " ");
2027 			else
2028 				obj__printf(obj, "%*s ",
2029 					    ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
2030 					    "Cycle(min/max)");
2031 		}
2032 
2033 		if (annotate_opts.show_br_cntr) {
2034 			if (show_title) {
2035 				obj__printf(obj, "%*s ",
2036 					    ANNOTATION__BR_CNTR_WIDTH,
2037 					    "Branch Counter");
2038 			} else {
2039 				char *buf;
2040 
2041 				if (!annotation_br_cntr_entry(&buf, al->br_cntr_nr, al->br_cntr,
2042 							      al->num_aggr, al->evsel)) {
2043 					obj__printf(obj, "%*s ", ANNOTATION__BR_CNTR_WIDTH, buf);
2044 					free(buf);
2045 				}
2046 			}
2047 		}
2048 
2049 		if (show_title && !*al->line) {
2050 			ipc_coverage_string(bf, sizeof(bf), notes);
2051 			obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf);
2052 		}
2053 	}
2054 
2055 	obj__printf(obj, " ");
2056 
2057 	if (!*al->line)
2058 		obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " ");
2059 	else if (al->offset == -1) {
2060 		if (al->line_nr && annotate_opts.show_linenr)
2061 			printed = scnprintf(bf, sizeof(bf), "%-*d ",
2062 					    notes->src->widths.addr + 1, al->line_nr);
2063 		else
2064 			printed = scnprintf(bf, sizeof(bf), "%-*s  ",
2065 					    notes->src->widths.addr, " ");
2066 		obj__printf(obj, bf);
2067 		obj__printf(obj, "%-*s", width - printed - pcnt_width - cycles_width + 1, al->line);
2068 	} else {
2069 		u64 addr = al->offset;
2070 		int color = -1;
2071 
2072 		if (!annotate_opts.use_offset)
2073 			addr += notes->src->start;
2074 
2075 		if (!annotate_opts.use_offset) {
2076 			printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
2077 		} else {
2078 			if (al->jump_sources &&
2079 			    annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
2080 				if (annotate_opts.show_nr_jumps) {
2081 					int prev;
2082 					printed = scnprintf(bf, sizeof(bf), "%*d ",
2083 							    notes->src->widths.jumps,
2084 							    al->jump_sources);
2085 					prev = obj__set_jumps_percent_color(obj, al->jump_sources,
2086 									    current_entry);
2087 					obj__printf(obj, bf);
2088 					obj__set_color(obj, prev);
2089 				}
2090 print_addr:
2091 				printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
2092 						    notes->src->widths.target, addr);
2093 			} else if (ins__is_call(&disasm_line(al)->ins) &&
2094 				   annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) {
2095 				goto print_addr;
2096 			} else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
2097 				goto print_addr;
2098 			} else {
2099 				printed = scnprintf(bf, sizeof(bf), "%-*s  ",
2100 						    notes->src->widths.addr, " ");
2101 			}
2102 		}
2103 
2104 		if (change_color)
2105 			color = obj__set_color(obj, HE_COLORSET_ADDR);
2106 		obj__printf(obj, bf);
2107 		if (change_color)
2108 			obj__set_color(obj, color);
2109 
2110 		disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf), obj__printf, obj__write_graph);
2111 
2112 		obj__printf(obj, "%-*s", width - pcnt_width - cycles_width - 3 - printed, bf);
2113 	}
2114 
2115 }
2116 
2117 void annotation_line__write(struct annotation_line *al, struct annotation *notes,
2118 			    struct annotation_write_ops *wops)
2119 {
2120 	__annotation_line__write(al, notes, wops->first_line, wops->current_entry,
2121 				 wops->change_color, wops->width, wops->obj,
2122 				 annotate_opts.percent_type,
2123 				 wops->set_color, wops->set_percent_color,
2124 				 wops->set_jumps_percent_color, wops->printf,
2125 				 wops->write_graph);
2126 }
2127 
2128 int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
2129 		      struct arch **parch)
2130 {
2131 	struct symbol *sym = ms->sym;
2132 	struct annotation *notes = symbol__annotation(sym);
2133 	size_t size = symbol__size(sym);
2134 	int err;
2135 
2136 	err = symbol__annotate(ms, evsel, parch);
2137 	if (err)
2138 		return err;
2139 
2140 	symbol__calc_percent(sym, evsel);
2141 
2142 	annotation__set_index(notes);
2143 	annotation__mark_jump_targets(notes, sym);
2144 
2145 	err = annotation__compute_ipc(notes, size, evsel);
2146 	if (err)
2147 		return err;
2148 
2149 	annotation__init_column_widths(notes, sym);
2150 	annotation__update_column_widths(notes);
2151 	sym->annotate2 = 1;
2152 
2153 	return 0;
2154 }
2155 
2156 const char * const perf_disassembler__strs[] = {
2157 	[PERF_DISASM_UNKNOWN]  = "unknown",
2158 	[PERF_DISASM_LLVM]     = "llvm",
2159 	[PERF_DISASM_CAPSTONE] = "capstone",
2160 	[PERF_DISASM_OBJDUMP]  = "objdump",
2161 };
2162 
2163 
2164 static void annotation_options__add_disassembler(struct annotation_options *options,
2165 						 enum perf_disassembler dis)
2166 {
2167 	for (u8 i = 0; i < ARRAY_SIZE(options->disassemblers); i++) {
2168 		if (options->disassemblers[i] == dis) {
2169 			/* Disassembler is already present then don't add again. */
2170 			return;
2171 		}
2172 		if (options->disassemblers[i] == PERF_DISASM_UNKNOWN) {
2173 			/* Found a free slot. */
2174 			options->disassemblers[i] = dis;
2175 			return;
2176 		}
2177 	}
2178 	pr_err("Failed to add disassembler %d\n", dis);
2179 }
2180 
2181 static int annotation_options__add_disassemblers_str(struct annotation_options *options,
2182 						const char *str)
2183 {
2184 	while (str && *str != '\0') {
2185 		const char *comma = strchr(str, ',');
2186 		int len = comma ? comma - str : (int)strlen(str);
2187 		bool match = false;
2188 
2189 		for (u8 i = 0; i < ARRAY_SIZE(perf_disassembler__strs); i++) {
2190 			const char *dis_str = perf_disassembler__strs[i];
2191 
2192 			if (len == (int)strlen(dis_str) && !strncmp(str, dis_str, len)) {
2193 				annotation_options__add_disassembler(options, i);
2194 				match = true;
2195 				break;
2196 			}
2197 		}
2198 		if (!match) {
2199 			pr_err("Invalid disassembler '%.*s'\n", len, str);
2200 			return -1;
2201 		}
2202 		str = comma ? comma + 1 : NULL;
2203 	}
2204 	return 0;
2205 }
2206 
2207 static int annotation__config(const char *var, const char *value, void *data)
2208 {
2209 	struct annotation_options *opt = data;
2210 
2211 	if (!strstarts(var, "annotate."))
2212 		return 0;
2213 
2214 	if (!strcmp(var, "annotate.offset_level")) {
2215 		perf_config_u8(&opt->offset_level, "offset_level", value);
2216 
2217 		if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
2218 			opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL;
2219 		else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL)
2220 			opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
2221 	} else if (!strcmp(var, "annotate.disassemblers")) {
2222 		int err = annotation_options__add_disassemblers_str(opt, value);
2223 
2224 		if (err)
2225 			return err;
2226 	} else if (!strcmp(var, "annotate.hide_src_code")) {
2227 		opt->hide_src_code = perf_config_bool("hide_src_code", value);
2228 	} else if (!strcmp(var, "annotate.jump_arrows")) {
2229 		opt->jump_arrows = perf_config_bool("jump_arrows", value);
2230 	} else if (!strcmp(var, "annotate.show_linenr")) {
2231 		opt->show_linenr = perf_config_bool("show_linenr", value);
2232 	} else if (!strcmp(var, "annotate.show_nr_jumps")) {
2233 		opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value);
2234 	} else if (!strcmp(var, "annotate.show_nr_samples")) {
2235 		symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples",
2236 								value);
2237 	} else if (!strcmp(var, "annotate.show_total_period")) {
2238 		symbol_conf.show_total_period = perf_config_bool("show_total_period",
2239 								value);
2240 	} else if (!strcmp(var, "annotate.use_offset")) {
2241 		opt->use_offset = perf_config_bool("use_offset", value);
2242 	} else if (!strcmp(var, "annotate.disassembler_style")) {
2243 		opt->disassembler_style = strdup(value);
2244 		if (!opt->disassembler_style) {
2245 			pr_err("Not enough memory for annotate.disassembler_style\n");
2246 			return -1;
2247 		}
2248 	} else if (!strcmp(var, "annotate.objdump")) {
2249 		opt->objdump_path = strdup(value);
2250 		if (!opt->objdump_path) {
2251 			pr_err("Not enough memory for annotate.objdump\n");
2252 			return -1;
2253 		}
2254 	} else if (!strcmp(var, "annotate.addr2line")) {
2255 		symbol_conf.addr2line_path = strdup(value);
2256 		if (!symbol_conf.addr2line_path) {
2257 			pr_err("Not enough memory for annotate.addr2line\n");
2258 			return -1;
2259 		}
2260 	} else if (!strcmp(var, "annotate.demangle")) {
2261 		symbol_conf.demangle = perf_config_bool("demangle", value);
2262 	} else if (!strcmp(var, "annotate.demangle_kernel")) {
2263 		symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value);
2264 	} else {
2265 		pr_debug("%s variable unknown, ignoring...", var);
2266 	}
2267 
2268 	return 0;
2269 }
2270 
2271 void annotation_options__init(void)
2272 {
2273 	struct annotation_options *opt = &annotate_opts;
2274 
2275 	memset(opt, 0, sizeof(*opt));
2276 
2277 	/* Default values. */
2278 	opt->use_offset = true;
2279 	opt->jump_arrows = true;
2280 	opt->annotate_src = true;
2281 	opt->offset_level = ANNOTATION__OFFSET_JUMP_TARGETS;
2282 	opt->percent_type = PERCENT_PERIOD_LOCAL;
2283 	opt->hide_src_code_on_title = true;
2284 }
2285 
2286 void annotation_options__exit(void)
2287 {
2288 	zfree(&annotate_opts.disassembler_style);
2289 	zfree(&annotate_opts.objdump_path);
2290 }
2291 
2292 static void annotation_options__default_init_disassemblers(struct annotation_options *options)
2293 {
2294 	if (options->disassemblers[0] != PERF_DISASM_UNKNOWN) {
2295 		/* Already initialized. */
2296 		return;
2297 	}
2298 #ifdef HAVE_LIBLLVM_SUPPORT
2299 	annotation_options__add_disassembler(options, PERF_DISASM_LLVM);
2300 #endif
2301 #ifdef HAVE_LIBCAPSTONE_SUPPORT
2302 	annotation_options__add_disassembler(options, PERF_DISASM_CAPSTONE);
2303 #endif
2304 	annotation_options__add_disassembler(options, PERF_DISASM_OBJDUMP);
2305 }
2306 
2307 void annotation_config__init(void)
2308 {
2309 	perf_config(annotation__config, &annotate_opts);
2310 	annotation_options__default_init_disassemblers(&annotate_opts);
2311 }
2312 
2313 static unsigned int parse_percent_type(char *str1, char *str2)
2314 {
2315 	unsigned int type = (unsigned int) -1;
2316 
2317 	if (!strcmp("period", str1)) {
2318 		if (!strcmp("local", str2))
2319 			type = PERCENT_PERIOD_LOCAL;
2320 		else if (!strcmp("global", str2))
2321 			type = PERCENT_PERIOD_GLOBAL;
2322 	}
2323 
2324 	if (!strcmp("hits", str1)) {
2325 		if (!strcmp("local", str2))
2326 			type = PERCENT_HITS_LOCAL;
2327 		else if (!strcmp("global", str2))
2328 			type = PERCENT_HITS_GLOBAL;
2329 	}
2330 
2331 	return type;
2332 }
2333 
2334 int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str,
2335 				int unset __maybe_unused)
2336 {
2337 	unsigned int type;
2338 	char *str1, *str2;
2339 	int err = -1;
2340 
2341 	str1 = strdup(_str);
2342 	if (!str1)
2343 		return -ENOMEM;
2344 
2345 	str2 = strchr(str1, '-');
2346 	if (!str2)
2347 		goto out;
2348 
2349 	*str2++ = 0;
2350 
2351 	type = parse_percent_type(str1, str2);
2352 	if (type == (unsigned int) -1)
2353 		type = parse_percent_type(str2, str1);
2354 	if (type != (unsigned int) -1) {
2355 		annotate_opts.percent_type = type;
2356 		err = 0;
2357 	}
2358 
2359 out:
2360 	free(str1);
2361 	return err;
2362 }
2363 
2364 int annotate_check_args(void)
2365 {
2366 	struct annotation_options *args = &annotate_opts;
2367 
2368 	if (args->prefix_strip && !args->prefix) {
2369 		pr_err("--prefix-strip requires --prefix\n");
2370 		return -1;
2371 	}
2372 	return 0;
2373 }
2374 
2375 /*
2376  * Get register number and access offset from the given instruction.
2377  * It assumes AT&T x86 asm format like OFFSET(REG).  Maybe it needs
2378  * to revisit the format when it handles different architecture.
2379  * Fills @reg and @offset when return 0.
2380  */
2381 static int extract_reg_offset(struct arch *arch, const char *str,
2382 			      struct annotated_op_loc *op_loc)
2383 {
2384 	char *p;
2385 	char *regname;
2386 
2387 	if (arch->objdump.register_char == 0)
2388 		return -1;
2389 
2390 	/*
2391 	 * It should start from offset, but it's possible to skip 0
2392 	 * in the asm.  So 0(%rax) should be same as (%rax).
2393 	 *
2394 	 * However, it also start with a segment select register like
2395 	 * %gs:0x18(%rbx).  In that case it should skip the part.
2396 	 */
2397 	if (*str == arch->objdump.register_char) {
2398 		if (arch__is(arch, "x86")) {
2399 			/* FIXME: Handle other segment registers */
2400 			if (!strncmp(str, "%gs:", 4))
2401 				op_loc->segment = INSN_SEG_X86_GS;
2402 		}
2403 
2404 		while (*str && !isdigit(*str) &&
2405 		       *str != arch->objdump.memory_ref_char)
2406 			str++;
2407 	}
2408 
2409 	op_loc->offset = strtol(str, &p, 0);
2410 
2411 	p = strchr(p, arch->objdump.register_char);
2412 	if (p == NULL)
2413 		return -1;
2414 
2415 	regname = strdup(p);
2416 	if (regname == NULL)
2417 		return -1;
2418 
2419 	op_loc->reg1 = get_dwarf_regnum(regname, arch->e_machine, arch->e_flags);
2420 	free(regname);
2421 
2422 	/* Get the second register */
2423 	if (op_loc->multi_regs) {
2424 		p = strchr(p + 1, arch->objdump.register_char);
2425 		if (p == NULL)
2426 			return -1;
2427 
2428 		regname = strdup(p);
2429 		if (regname == NULL)
2430 			return -1;
2431 
2432 		op_loc->reg2 = get_dwarf_regnum(regname, arch->e_machine, arch->e_flags);
2433 		free(regname);
2434 	}
2435 	return 0;
2436 }
2437 
2438 /**
2439  * annotate_get_insn_location - Get location of instruction
2440  * @arch: the architecture info
2441  * @dl: the target instruction
2442  * @loc: a buffer to save the data
2443  *
2444  * Get detailed location info (register and offset) in the instruction.
2445  * It needs both source and target operand and whether it accesses a
2446  * memory location.  The offset field is meaningful only when the
2447  * corresponding mem flag is set.  The reg2 field is meaningful only
2448  * when multi_regs flag is set.
2449  *
2450  * Some examples on x86:
2451  *
2452  *   mov  (%rax), %rcx   # src_reg1 = rax, src_mem = 1, src_offset = 0
2453  *                       # dst_reg1 = rcx, dst_mem = 0
2454  *
2455  *   mov  0x18, %r8      # src_reg1 = -1, src_mem = 0
2456  *                       # dst_reg1 = r8, dst_mem = 0
2457  *
2458  *   mov  %rsi, 8(%rbx,%rcx,4)  # src_reg1 = rsi, src_mem = 0, src_multi_regs = 0
2459  *                              # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1
2460  *                              # dst_multi_regs = 1, dst_offset = 8
2461  */
2462 int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl,
2463 			       struct annotated_insn_loc *loc)
2464 {
2465 	struct ins_operands *ops;
2466 	struct annotated_op_loc *op_loc;
2467 	int i;
2468 
2469 	if (ins__is_lock(&dl->ins))
2470 		ops = dl->ops.locked.ops;
2471 	else
2472 		ops = &dl->ops;
2473 
2474 	if (ops == NULL)
2475 		return -1;
2476 
2477 	memset(loc, 0, sizeof(*loc));
2478 
2479 	for_each_insn_op_loc(loc, i, op_loc) {
2480 		const char *insn_str = ops->source.raw;
2481 		bool multi_regs = ops->source.multi_regs;
2482 		bool mem_ref = ops->source.mem_ref;
2483 
2484 		if (i == INSN_OP_TARGET) {
2485 			insn_str = ops->target.raw;
2486 			multi_regs = ops->target.multi_regs;
2487 			mem_ref = ops->target.mem_ref;
2488 		}
2489 
2490 		/* Invalidate the register by default */
2491 		op_loc->reg1 = -1;
2492 		op_loc->reg2 = -1;
2493 
2494 		if (insn_str == NULL) {
2495 			if (!arch__is(arch, "powerpc"))
2496 				continue;
2497 		}
2498 
2499 		/*
2500 		 * For powerpc, call get_powerpc_regs function which extracts the
2501 		 * required fields for op_loc, ie reg1, reg2, offset from the
2502 		 * raw instruction.
2503 		 */
2504 		if (arch__is(arch, "powerpc")) {
2505 			op_loc->mem_ref = mem_ref;
2506 			op_loc->multi_regs = multi_regs;
2507 			get_powerpc_regs(dl->raw.raw_insn, !i, op_loc);
2508 		} else if (strchr(insn_str, arch->objdump.memory_ref_char)) {
2509 			op_loc->mem_ref = true;
2510 			op_loc->multi_regs = multi_regs;
2511 			extract_reg_offset(arch, insn_str, op_loc);
2512 		} else {
2513 			char *s, *p = NULL;
2514 
2515 			if (arch__is(arch, "x86")) {
2516 				/* FIXME: Handle other segment registers */
2517 				if (!strncmp(insn_str, "%gs:", 4)) {
2518 					op_loc->segment = INSN_SEG_X86_GS;
2519 					op_loc->offset = strtol(insn_str + 4,
2520 								&p, 0);
2521 					if (p && p != insn_str + 4)
2522 						op_loc->imm = true;
2523 					continue;
2524 				}
2525 			}
2526 
2527 			s = strdup(insn_str);
2528 			if (s == NULL)
2529 				return -1;
2530 
2531 			if (*s == arch->objdump.register_char)
2532 				op_loc->reg1 = get_dwarf_regnum(s, arch->e_machine, arch->e_flags);
2533 			else if (*s == arch->objdump.imm_char) {
2534 				op_loc->offset = strtol(s + 1, &p, 0);
2535 				if (p && p != s + 1)
2536 					op_loc->imm = true;
2537 			}
2538 			free(s);
2539 		}
2540 	}
2541 
2542 	return 0;
2543 }
2544 
2545 static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip,
2546 					    bool allow_update)
2547 {
2548 	struct disasm_line *dl;
2549 	struct annotation *notes;
2550 
2551 	notes = symbol__annotation(sym);
2552 
2553 	list_for_each_entry(dl, &notes->src->source, al.node) {
2554 		if (dl->al.offset == -1)
2555 			continue;
2556 
2557 		if (sym->start + dl->al.offset == ip) {
2558 			/*
2559 			 * llvm-objdump places "lock" in a separate line and
2560 			 * in that case, we want to get the next line.
2561 			 */
2562 			if (ins__is_lock(&dl->ins) &&
2563 			    *dl->ops.raw == '\0' && allow_update) {
2564 				ip++;
2565 				continue;
2566 			}
2567 			return dl;
2568 		}
2569 	}
2570 	return NULL;
2571 }
2572 
2573 static struct annotated_item_stat *annotate_data_stat(struct list_head *head,
2574 						      const char *name)
2575 {
2576 	struct annotated_item_stat *istat;
2577 
2578 	list_for_each_entry(istat, head, list) {
2579 		if (!strcmp(istat->name, name))
2580 			return istat;
2581 	}
2582 
2583 	istat = zalloc(sizeof(*istat));
2584 	if (istat == NULL)
2585 		return NULL;
2586 
2587 	istat->name = strdup(name);
2588 	if ((istat->name == NULL) || (!strlen(istat->name))) {
2589 		free(istat);
2590 		return NULL;
2591 	}
2592 
2593 	list_add_tail(&istat->list, head);
2594 	return istat;
2595 }
2596 
2597 static bool is_stack_operation(struct arch *arch, struct disasm_line *dl)
2598 {
2599 	if (arch__is(arch, "x86")) {
2600 		if (!strncmp(dl->ins.name, "push", 4) ||
2601 		    !strncmp(dl->ins.name, "pop", 3) ||
2602 		    !strncmp(dl->ins.name, "call", 4) ||
2603 		    !strncmp(dl->ins.name, "ret", 3))
2604 			return true;
2605 	}
2606 
2607 	return false;
2608 }
2609 
2610 static bool is_stack_canary(struct arch *arch, struct annotated_op_loc *loc)
2611 {
2612 	/* On x86_64, %gs:40 is used for stack canary */
2613 	if (arch__is(arch, "x86")) {
2614 		if (loc->segment == INSN_SEG_X86_GS && loc->imm &&
2615 		    loc->offset == 40)
2616 			return true;
2617 	}
2618 
2619 	return false;
2620 }
2621 
2622 static struct disasm_line *
2623 annotation__prev_asm_line(struct annotation *notes, struct disasm_line *curr)
2624 {
2625 	struct list_head *sources = &notes->src->source;
2626 	struct disasm_line *prev;
2627 
2628 	if (curr == list_first_entry(sources, struct disasm_line, al.node))
2629 		return NULL;
2630 
2631 	prev = list_prev_entry(curr, al.node);
2632 	while (prev->al.offset == -1 &&
2633 	       prev != list_first_entry(sources, struct disasm_line, al.node))
2634 		prev = list_prev_entry(prev, al.node);
2635 
2636 	if (prev->al.offset == -1)
2637 		return NULL;
2638 
2639 	return prev;
2640 }
2641 
2642 static struct disasm_line *
2643 annotation__next_asm_line(struct annotation *notes, struct disasm_line *curr)
2644 {
2645 	struct list_head *sources = &notes->src->source;
2646 	struct disasm_line *next;
2647 
2648 	if (curr == list_last_entry(sources, struct disasm_line, al.node))
2649 		return NULL;
2650 
2651 	next = list_next_entry(curr, al.node);
2652 	while (next->al.offset == -1 &&
2653 	       next != list_last_entry(sources, struct disasm_line, al.node))
2654 		next = list_next_entry(next, al.node);
2655 
2656 	if (next->al.offset == -1)
2657 		return NULL;
2658 
2659 	return next;
2660 }
2661 
2662 u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset,
2663 			struct disasm_line *dl)
2664 {
2665 	struct annotation *notes;
2666 	struct disasm_line *next;
2667 	u64 addr;
2668 
2669 	notes = symbol__annotation(ms->sym);
2670 	/*
2671 	 * PC-relative addressing starts from the next instruction address
2672 	 * But the IP is for the current instruction.  Since disasm_line
2673 	 * doesn't have the instruction size, calculate it using the next
2674 	 * disasm_line.  If it's the last one, we can use symbol's end
2675 	 * address directly.
2676 	 */
2677 	next = annotation__next_asm_line(notes, dl);
2678 	if (next == NULL)
2679 		addr = ms->sym->end + offset;
2680 	else
2681 		addr = ip + (next->al.offset - dl->al.offset) + offset;
2682 
2683 	return map__rip_2objdump(ms->map, addr);
2684 }
2685 
2686 static struct debuginfo_cache {
2687 	struct dso *dso;
2688 	struct debuginfo *dbg;
2689 } di_cache;
2690 
2691 void debuginfo_cache__delete(void)
2692 {
2693 	dso__put(di_cache.dso);
2694 	di_cache.dso = NULL;
2695 
2696 	debuginfo__delete(di_cache.dbg);
2697 	di_cache.dbg = NULL;
2698 }
2699 
2700 static struct annotated_data_type *
2701 __hist_entry__get_data_type(struct hist_entry *he, struct arch *arch,
2702 			    struct debuginfo *dbg, struct disasm_line *dl,
2703 			    int *type_offset)
2704 {
2705 	struct map_symbol *ms = &he->ms;
2706 	struct annotated_insn_loc loc;
2707 	struct annotated_op_loc *op_loc;
2708 	struct annotated_data_type *mem_type;
2709 	struct annotated_item_stat *istat;
2710 	int i;
2711 
2712 	istat = annotate_data_stat(&ann_insn_stat, dl->ins.name);
2713 	if (istat == NULL) {
2714 		ann_data_stat.no_insn++;
2715 		return NO_TYPE;
2716 	}
2717 
2718 	if (annotate_get_insn_location(arch, dl, &loc) < 0) {
2719 		ann_data_stat.no_insn_ops++;
2720 		istat->bad++;
2721 		return NO_TYPE;
2722 	}
2723 
2724 	if (is_stack_operation(arch, dl)) {
2725 		istat->good++;
2726 		*type_offset = 0;
2727 		return &stackop_type;
2728 	}
2729 
2730 	for_each_insn_op_loc(&loc, i, op_loc) {
2731 		struct data_loc_info dloc = {
2732 			.arch = arch,
2733 			.thread = he->thread,
2734 			.ms = ms,
2735 			.ip = ms->sym->start + dl->al.offset,
2736 			.cpumode = he->cpumode,
2737 			.op = op_loc,
2738 			.di = dbg,
2739 		};
2740 
2741 		if (!op_loc->mem_ref && op_loc->segment == INSN_SEG_NONE)
2742 			continue;
2743 
2744 		/* PC-relative addressing */
2745 		if (op_loc->reg1 == DWARF_REG_PC) {
2746 			dloc.var_addr = annotate_calc_pcrel(ms, dloc.ip,
2747 							    op_loc->offset, dl);
2748 		}
2749 
2750 		/* This CPU access in kernel - pretend PC-relative addressing */
2751 		if (dso__kernel(map__dso(ms->map)) && arch__is(arch, "x86") &&
2752 		    op_loc->segment == INSN_SEG_X86_GS && op_loc->imm) {
2753 			dloc.var_addr = op_loc->offset;
2754 			op_loc->reg1 = DWARF_REG_PC;
2755 		}
2756 
2757 		mem_type = find_data_type(&dloc);
2758 
2759 		if (mem_type == NULL && is_stack_canary(arch, op_loc)) {
2760 			istat->good++;
2761 			*type_offset = 0;
2762 			return &canary_type;
2763 		}
2764 
2765 		if (mem_type)
2766 			istat->good++;
2767 		else
2768 			istat->bad++;
2769 
2770 		if (symbol_conf.annotate_data_sample) {
2771 			struct evsel *evsel = hists_to_evsel(he->hists);
2772 
2773 			annotated_data_type__update_samples(mem_type, evsel,
2774 							    dloc.type_offset,
2775 							    he->stat.nr_events,
2776 							    he->stat.period);
2777 		}
2778 		*type_offset = dloc.type_offset;
2779 		return mem_type ?: NO_TYPE;
2780 	}
2781 
2782 	/* retry with a fused instruction */
2783 	return NULL;
2784 }
2785 
2786 /**
2787  * hist_entry__get_data_type - find data type for given hist entry
2788  * @he: hist entry
2789  *
2790  * This function first annotates the instruction at @he->ip and extracts
2791  * register and offset info from it.  Then it searches the DWARF debug
2792  * info to get a variable and type information using the address, register,
2793  * and offset.
2794  */
2795 struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he)
2796 {
2797 	struct map_symbol *ms = &he->ms;
2798 	struct evsel *evsel = hists_to_evsel(he->hists);
2799 	struct arch *arch;
2800 	struct disasm_line *dl;
2801 	struct annotated_data_type *mem_type;
2802 	struct annotated_item_stat *istat;
2803 	u64 ip = he->ip;
2804 
2805 	ann_data_stat.total++;
2806 
2807 	if (ms->map == NULL || ms->sym == NULL) {
2808 		ann_data_stat.no_sym++;
2809 		return NULL;
2810 	}
2811 
2812 	if (!symbol_conf.init_annotation) {
2813 		ann_data_stat.no_sym++;
2814 		return NULL;
2815 	}
2816 
2817 	/*
2818 	 * di_cache holds a pair of values, but code below assumes
2819 	 * di_cache.dso can be compared/updated and di_cache.dbg can be
2820 	 * read/updated independently from each other. That assumption only
2821 	 * holds in single threaded code.
2822 	 */
2823 	assert(perf_singlethreaded);
2824 
2825 	if (map__dso(ms->map) != di_cache.dso) {
2826 		dso__put(di_cache.dso);
2827 		di_cache.dso = dso__get(map__dso(ms->map));
2828 
2829 		debuginfo__delete(di_cache.dbg);
2830 		di_cache.dbg = debuginfo__new(dso__long_name(di_cache.dso));
2831 	}
2832 
2833 	if (di_cache.dbg == NULL) {
2834 		ann_data_stat.no_dbginfo++;
2835 		return NULL;
2836 	}
2837 
2838 	/* Make sure it has the disasm of the function */
2839 	if (symbol__annotate(ms, evsel, &arch) < 0) {
2840 		ann_data_stat.no_insn++;
2841 		return NULL;
2842 	}
2843 
2844 	/*
2845 	 * Get a disasm to extract the location from the insn.
2846 	 * This is too slow...
2847 	 */
2848 	dl = find_disasm_line(ms->sym, ip, /*allow_update=*/true);
2849 	if (dl == NULL) {
2850 		ann_data_stat.no_insn++;
2851 		return NULL;
2852 	}
2853 
2854 retry:
2855 	mem_type = __hist_entry__get_data_type(he, arch, di_cache.dbg, dl,
2856 					       &he->mem_type_off);
2857 	if (mem_type)
2858 		return mem_type == NO_TYPE ? NULL : mem_type;
2859 
2860 	/*
2861 	 * Some instructions can be fused and the actual memory access came
2862 	 * from the previous instruction.
2863 	 */
2864 	if (dl->al.offset > 0) {
2865 		struct annotation *notes;
2866 		struct disasm_line *prev_dl;
2867 
2868 		notes = symbol__annotation(ms->sym);
2869 		prev_dl = annotation__prev_asm_line(notes, dl);
2870 
2871 		if (prev_dl && ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) {
2872 			dl = prev_dl;
2873 			goto retry;
2874 		}
2875 	}
2876 
2877 	ann_data_stat.no_mem_ops++;
2878 	istat = annotate_data_stat(&ann_insn_stat, dl->ins.name);
2879 	if (istat)
2880 		istat->bad++;
2881 	return NULL;
2882 }
2883 
2884 /* Basic block traversal (BFS) data structure */
2885 struct basic_block_data {
2886 	struct list_head queue;
2887 	struct list_head visited;
2888 };
2889 
2890 /*
2891  * During the traversal, it needs to know the parent block where the current
2892  * block block started from.  Note that single basic block can be parent of
2893  * two child basic blocks (in case of condition jump).
2894  */
2895 struct basic_block_link {
2896 	struct list_head node;
2897 	struct basic_block_link *parent;
2898 	struct annotated_basic_block *bb;
2899 };
2900 
2901 /* Check any of basic block in the list already has the offset */
2902 static bool basic_block_has_offset(struct list_head *head, s64 offset)
2903 {
2904 	struct basic_block_link *link;
2905 
2906 	list_for_each_entry(link, head, node) {
2907 		s64 begin_offset = link->bb->begin->al.offset;
2908 		s64 end_offset = link->bb->end->al.offset;
2909 
2910 		if (begin_offset <= offset && offset <= end_offset)
2911 			return true;
2912 	}
2913 	return false;
2914 }
2915 
2916 static bool is_new_basic_block(struct basic_block_data *bb_data,
2917 			       struct disasm_line *dl)
2918 {
2919 	s64 offset = dl->al.offset;
2920 
2921 	if (basic_block_has_offset(&bb_data->visited, offset))
2922 		return false;
2923 	if (basic_block_has_offset(&bb_data->queue, offset))
2924 		return false;
2925 	return true;
2926 }
2927 
2928 /* Add a basic block starting from dl and link it to the parent */
2929 static int add_basic_block(struct basic_block_data *bb_data,
2930 			   struct basic_block_link *parent,
2931 			   struct disasm_line *dl)
2932 {
2933 	struct annotated_basic_block *bb;
2934 	struct basic_block_link *link;
2935 
2936 	if (dl == NULL)
2937 		return -1;
2938 
2939 	if (!is_new_basic_block(bb_data, dl))
2940 		return 0;
2941 
2942 	bb = zalloc(sizeof(*bb));
2943 	if (bb == NULL)
2944 		return -1;
2945 
2946 	bb->begin = dl;
2947 	bb->end = dl;
2948 	INIT_LIST_HEAD(&bb->list);
2949 
2950 	link = malloc(sizeof(*link));
2951 	if (link == NULL) {
2952 		free(bb);
2953 		return -1;
2954 	}
2955 
2956 	link->bb = bb;
2957 	link->parent = parent;
2958 	list_add_tail(&link->node, &bb_data->queue);
2959 	return 0;
2960 }
2961 
2962 /* Returns true when it finds the target in the current basic block */
2963 static bool process_basic_block(struct basic_block_data *bb_data,
2964 				struct basic_block_link *link,
2965 				struct symbol *sym, u64 target)
2966 {
2967 	struct disasm_line *dl, *next_dl, *last_dl;
2968 	struct annotation *notes = symbol__annotation(sym);
2969 	bool found = false;
2970 
2971 	dl = link->bb->begin;
2972 	/* Check if it's already visited */
2973 	if (basic_block_has_offset(&bb_data->visited, dl->al.offset))
2974 		return false;
2975 
2976 	last_dl = list_last_entry(&notes->src->source,
2977 				  struct disasm_line, al.node);
2978 	if (last_dl->al.offset == -1)
2979 		last_dl = annotation__prev_asm_line(notes, last_dl);
2980 
2981 	if (last_dl == NULL)
2982 		return false;
2983 
2984 	list_for_each_entry_from(dl, &notes->src->source, al.node) {
2985 		/* Skip comment or debug info line */
2986 		if (dl->al.offset == -1)
2987 			continue;
2988 		/* Found the target instruction */
2989 		if (sym->start + dl->al.offset == target) {
2990 			found = true;
2991 			break;
2992 		}
2993 		/* End of the function, finish the block */
2994 		if (dl == last_dl)
2995 			break;
2996 		/* 'return' instruction finishes the block */
2997 		if (ins__is_ret(&dl->ins))
2998 			break;
2999 		/* normal instructions are part of the basic block */
3000 		if (!ins__is_jump(&dl->ins))
3001 			continue;
3002 		/* jump to a different function, tail call or return */
3003 		if (dl->ops.target.outside)
3004 			break;
3005 		/* jump instruction creates new basic block(s) */
3006 		next_dl = find_disasm_line(sym, sym->start + dl->ops.target.offset,
3007 					   /*allow_update=*/false);
3008 		if (next_dl)
3009 			add_basic_block(bb_data, link, next_dl);
3010 
3011 		/*
3012 		 * FIXME: determine conditional jumps properly.
3013 		 * Conditional jumps create another basic block with the
3014 		 * next disasm line.
3015 		 */
3016 		if (!strstr(dl->ins.name, "jmp")) {
3017 			next_dl = annotation__next_asm_line(notes, dl);
3018 			if (next_dl)
3019 				add_basic_block(bb_data, link, next_dl);
3020 		}
3021 		break;
3022 
3023 	}
3024 	link->bb->end = dl;
3025 	return found;
3026 }
3027 
3028 /*
3029  * It founds a target basic block, build a proper linked list of basic blocks
3030  * by following the link recursively.
3031  */
3032 static void link_found_basic_blocks(struct basic_block_link *link,
3033 				    struct list_head *head)
3034 {
3035 	while (link) {
3036 		struct basic_block_link *parent = link->parent;
3037 
3038 		list_move(&link->bb->list, head);
3039 		list_del(&link->node);
3040 		free(link);
3041 
3042 		link = parent;
3043 	}
3044 }
3045 
3046 static void delete_basic_blocks(struct basic_block_data *bb_data)
3047 {
3048 	struct basic_block_link *link, *tmp;
3049 
3050 	list_for_each_entry_safe(link, tmp, &bb_data->queue, node) {
3051 		list_del(&link->node);
3052 		zfree(&link->bb);
3053 		free(link);
3054 	}
3055 
3056 	list_for_each_entry_safe(link, tmp, &bb_data->visited, node) {
3057 		list_del(&link->node);
3058 		zfree(&link->bb);
3059 		free(link);
3060 	}
3061 }
3062 
3063 /**
3064  * annotate_get_basic_blocks - Get basic blocks for given address range
3065  * @sym: symbol to annotate
3066  * @src: source address
3067  * @dst: destination address
3068  * @head: list head to save basic blocks
3069  *
3070  * This function traverses disasm_lines from @src to @dst and save them in a
3071  * list of annotated_basic_block to @head.  It uses BFS to find the shortest
3072  * path between two.  The basic_block_link is to maintain parent links so
3073  * that it can build a list of blocks from the start.
3074  */
3075 int annotate_get_basic_blocks(struct symbol *sym, s64 src, s64 dst,
3076 			      struct list_head *head)
3077 {
3078 	struct basic_block_data bb_data = {
3079 		.queue = LIST_HEAD_INIT(bb_data.queue),
3080 		.visited = LIST_HEAD_INIT(bb_data.visited),
3081 	};
3082 	struct basic_block_link *link;
3083 	struct disasm_line *dl;
3084 	int ret = -1;
3085 
3086 	dl = find_disasm_line(sym, src, /*allow_update=*/false);
3087 	if (dl == NULL)
3088 		return -1;
3089 
3090 	if (add_basic_block(&bb_data, /*parent=*/NULL, dl) < 0)
3091 		return -1;
3092 
3093 	/* Find shortest path from src to dst using BFS */
3094 	while (!list_empty(&bb_data.queue)) {
3095 		link = list_first_entry(&bb_data.queue, struct basic_block_link, node);
3096 
3097 		if (process_basic_block(&bb_data, link, sym, dst)) {
3098 			link_found_basic_blocks(link, head);
3099 			ret = 0;
3100 			break;
3101 		}
3102 		list_move(&link->node, &bb_data.visited);
3103 	}
3104 	delete_basic_blocks(&bb_data);
3105 	return ret;
3106 }
3107