xref: /linux/tools/perf/util/annotate.c (revision 230a7a71f92212e723fa435d4ca5922de33ec88a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4  *
5  * Parts came from builtin-annotate.c, see those files for further
6  * copyright notes.
7  */
8 
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <libgen.h>
12 #include <stdlib.h>
13 #include "util.h" // hex_width()
14 #include "ui/ui.h"
15 #include "sort.h"
16 #include "build-id.h"
17 #include "color.h"
18 #include "config.h"
19 #include "disasm.h"
20 #include "dso.h"
21 #include "env.h"
22 #include "map.h"
23 #include "maps.h"
24 #include "symbol.h"
25 #include "srcline.h"
26 #include "units.h"
27 #include "debug.h"
28 #include "annotate.h"
29 #include "annotate-data.h"
30 #include "evsel.h"
31 #include "evlist.h"
32 #include "bpf-event.h"
33 #include "bpf-utils.h"
34 #include "block-range.h"
35 #include "string2.h"
36 #include "dwarf-regs.h"
37 #include "util/event.h"
38 #include "util/sharded_mutex.h"
39 #include "arch/common.h"
40 #include "namespaces.h"
41 #include "thread.h"
42 #include "hashmap.h"
43 #include <regex.h>
44 #include <linux/bitops.h>
45 #include <linux/kernel.h>
46 #include <linux/string.h>
47 #include <linux/zalloc.h>
48 #include <subcmd/parse-options.h>
49 #include <subcmd/run-command.h>
50 
51 /* FIXME: For the HE_COLORSET */
52 #include "ui/browser.h"
53 
54 /*
55  * FIXME: Using the same values as slang.h,
56  * but that header may not be available everywhere
57  */
58 #define LARROW_CHAR	((unsigned char)',')
59 #define RARROW_CHAR	((unsigned char)'+')
60 #define DARROW_CHAR	((unsigned char)'.')
61 #define UARROW_CHAR	((unsigned char)'-')
62 
63 #include <linux/ctype.h>
64 
65 /* global annotation options */
66 struct annotation_options annotate_opts;
67 
68 /* Data type collection debug statistics */
69 struct annotated_data_stat ann_data_stat;
70 LIST_HEAD(ann_insn_stat);
71 
72 /* Pseudo data types */
73 struct annotated_data_type stackop_type = {
74 	.self = {
75 		.type_name = (char *)"(stack operation)",
76 		.children = LIST_HEAD_INIT(stackop_type.self.children),
77 	},
78 };
79 
80 struct annotated_data_type canary_type = {
81 	.self = {
82 		.type_name = (char *)"(stack canary)",
83 		.children = LIST_HEAD_INIT(canary_type.self.children),
84 	},
85 };
86 
87 /* symbol histogram: key = offset << 16 | evsel->core.idx */
88 static size_t sym_hist_hash(long key, void *ctx __maybe_unused)
89 {
90 	return (key >> 16) + (key & 0xffff);
91 }
92 
93 static bool sym_hist_equal(long key1, long key2, void *ctx __maybe_unused)
94 {
95 	return key1 == key2;
96 }
97 
98 static struct annotated_source *annotated_source__new(void)
99 {
100 	struct annotated_source *src = zalloc(sizeof(*src));
101 
102 	if (src != NULL)
103 		INIT_LIST_HEAD(&src->source);
104 
105 	return src;
106 }
107 
108 static __maybe_unused void annotated_source__delete(struct annotated_source *src)
109 {
110 	struct hashmap_entry *cur;
111 	size_t bkt;
112 
113 	if (src == NULL)
114 		return;
115 
116 	hashmap__for_each_entry(src->samples, cur, bkt)
117 		zfree(&cur->pvalue);
118 
119 	hashmap__free(src->samples);
120 	zfree(&src->histograms);
121 	free(src);
122 }
123 
124 static int annotated_source__alloc_histograms(struct annotated_source *src,
125 					      int nr_hists)
126 {
127 	src->nr_histograms   = nr_hists;
128 	src->histograms	     = calloc(nr_hists, sizeof(*src->histograms));
129 
130 	if (src->histograms == NULL)
131 		return -1;
132 
133 	src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL);
134 	if (src->samples == NULL)
135 		zfree(&src->histograms);
136 
137 	return src->histograms ? 0 : -1;
138 }
139 
140 void symbol__annotate_zero_histograms(struct symbol *sym)
141 {
142 	struct annotation *notes = symbol__annotation(sym);
143 
144 	annotation__lock(notes);
145 	if (notes->src != NULL) {
146 		memset(notes->src->histograms, 0,
147 		       notes->src->nr_histograms * sizeof(*notes->src->histograms));
148 		hashmap__clear(notes->src->samples);
149 	}
150 	if (notes->branch && notes->branch->cycles_hist) {
151 		memset(notes->branch->cycles_hist, 0,
152 		       symbol__size(sym) * sizeof(struct cyc_hist));
153 	}
154 	annotation__unlock(notes);
155 }
156 
157 static int __symbol__account_cycles(struct cyc_hist *ch,
158 				    u64 start,
159 				    unsigned offset, unsigned cycles,
160 				    unsigned have_start)
161 {
162 	/*
163 	 * For now we can only account one basic block per
164 	 * final jump. But multiple could be overlapping.
165 	 * Always account the longest one. So when
166 	 * a shorter one has been already seen throw it away.
167 	 *
168 	 * We separately always account the full cycles.
169 	 */
170 	ch[offset].num_aggr++;
171 	ch[offset].cycles_aggr += cycles;
172 
173 	if (cycles > ch[offset].cycles_max)
174 		ch[offset].cycles_max = cycles;
175 
176 	if (ch[offset].cycles_min) {
177 		if (cycles && cycles < ch[offset].cycles_min)
178 			ch[offset].cycles_min = cycles;
179 	} else
180 		ch[offset].cycles_min = cycles;
181 
182 	if (!have_start && ch[offset].have_start)
183 		return 0;
184 	if (ch[offset].num) {
185 		if (have_start && (!ch[offset].have_start ||
186 				   ch[offset].start > start)) {
187 			ch[offset].have_start = 0;
188 			ch[offset].cycles = 0;
189 			ch[offset].num = 0;
190 			if (ch[offset].reset < 0xffff)
191 				ch[offset].reset++;
192 		} else if (have_start &&
193 			   ch[offset].start < start)
194 			return 0;
195 	}
196 
197 	if (ch[offset].num < NUM_SPARKS)
198 		ch[offset].cycles_spark[ch[offset].num] = cycles;
199 
200 	ch[offset].have_start = have_start;
201 	ch[offset].start = start;
202 	ch[offset].cycles += cycles;
203 	ch[offset].num++;
204 	return 0;
205 }
206 
207 static int __symbol__inc_addr_samples(struct map_symbol *ms,
208 				      struct annotated_source *src, int evidx, u64 addr,
209 				      struct perf_sample *sample)
210 {
211 	struct symbol *sym = ms->sym;
212 	long hash_key;
213 	u64 offset;
214 	struct sym_hist *h;
215 	struct sym_hist_entry *entry;
216 
217 	pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr));
218 
219 	if ((addr < sym->start || addr >= sym->end) &&
220 	    (addr != sym->end || sym->start != sym->end)) {
221 		pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
222 		       __func__, __LINE__, sym->name, sym->start, addr, sym->end);
223 		return -ERANGE;
224 	}
225 
226 	offset = addr - sym->start;
227 	h = annotated_source__histogram(src, evidx);
228 	if (h == NULL) {
229 		pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n",
230 			 __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC);
231 		return -ENOMEM;
232 	}
233 
234 	hash_key = offset << 16 | evidx;
235 	if (!hashmap__find(src->samples, hash_key, &entry)) {
236 		entry = zalloc(sizeof(*entry));
237 		if (entry == NULL)
238 			return -ENOMEM;
239 
240 		if (hashmap__add(src->samples, hash_key, entry) < 0)
241 			return -ENOMEM;
242 	}
243 
244 	h->nr_samples++;
245 	h->period += sample->period;
246 	entry->nr_samples++;
247 	entry->period += sample->period;
248 
249 	pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
250 		  ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n",
251 		  sym->start, sym->name, addr, addr - sym->start, evidx,
252 		  entry->nr_samples, entry->period);
253 	return 0;
254 }
255 
256 struct annotated_branch *annotation__get_branch(struct annotation *notes)
257 {
258 	if (notes == NULL)
259 		return NULL;
260 
261 	if (notes->branch == NULL)
262 		notes->branch = zalloc(sizeof(*notes->branch));
263 
264 	return notes->branch;
265 }
266 
267 static struct cyc_hist *symbol__cycles_hist(struct symbol *sym)
268 {
269 	struct annotation *notes = symbol__annotation(sym);
270 	struct annotated_branch *branch;
271 
272 	branch = annotation__get_branch(notes);
273 	if (branch == NULL)
274 		return NULL;
275 
276 	if (branch->cycles_hist == NULL) {
277 		const size_t size = symbol__size(sym);
278 
279 		branch->cycles_hist = calloc(size, sizeof(struct cyc_hist));
280 	}
281 
282 	return branch->cycles_hist;
283 }
284 
285 struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
286 {
287 	struct annotation *notes = symbol__annotation(sym);
288 
289 	if (notes->src == NULL) {
290 		notes->src = annotated_source__new();
291 		if (notes->src == NULL)
292 			return NULL;
293 		goto alloc_histograms;
294 	}
295 
296 	if (notes->src->histograms == NULL) {
297 alloc_histograms:
298 		annotated_source__alloc_histograms(notes->src, nr_hists);
299 	}
300 
301 	return notes->src;
302 }
303 
304 static int symbol__inc_addr_samples(struct map_symbol *ms,
305 				    struct evsel *evsel, u64 addr,
306 				    struct perf_sample *sample)
307 {
308 	struct symbol *sym = ms->sym;
309 	struct annotated_source *src;
310 
311 	if (sym == NULL)
312 		return 0;
313 	src = symbol__hists(sym, evsel->evlist->core.nr_entries);
314 	return src ? __symbol__inc_addr_samples(ms, src, evsel->core.idx, addr, sample) : 0;
315 }
316 
317 static int symbol__account_cycles(u64 addr, u64 start,
318 				  struct symbol *sym, unsigned cycles)
319 {
320 	struct cyc_hist *cycles_hist;
321 	unsigned offset;
322 
323 	if (sym == NULL)
324 		return 0;
325 	cycles_hist = symbol__cycles_hist(sym);
326 	if (cycles_hist == NULL)
327 		return -ENOMEM;
328 	if (addr < sym->start || addr >= sym->end)
329 		return -ERANGE;
330 
331 	if (start) {
332 		if (start < sym->start || start >= sym->end)
333 			return -ERANGE;
334 		if (start >= addr)
335 			start = 0;
336 	}
337 	offset = addr - sym->start;
338 	return __symbol__account_cycles(cycles_hist,
339 					start ? start - sym->start : 0,
340 					offset, cycles,
341 					!!start);
342 }
343 
344 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
345 				    struct addr_map_symbol *start,
346 				    unsigned cycles)
347 {
348 	u64 saddr = 0;
349 	int err;
350 
351 	if (!cycles)
352 		return 0;
353 
354 	/*
355 	 * Only set start when IPC can be computed. We can only
356 	 * compute it when the basic block is completely in a single
357 	 * function.
358 	 * Special case the case when the jump is elsewhere, but
359 	 * it starts on the function start.
360 	 */
361 	if (start &&
362 		(start->ms.sym == ams->ms.sym ||
363 		 (ams->ms.sym &&
364 		  start->addr == ams->ms.sym->start + map__start(ams->ms.map))))
365 		saddr = start->al_addr;
366 	if (saddr == 0)
367 		pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
368 			ams->addr,
369 			start ? start->addr : 0,
370 			ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0,
371 			saddr);
372 	err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles);
373 	if (err)
374 		pr_debug2("account_cycles failed %d\n", err);
375 	return err;
376 }
377 
378 struct annotation_line *annotated_source__get_line(struct annotated_source *src,
379 						   s64 offset)
380 {
381 	struct annotation_line *al;
382 
383 	list_for_each_entry(al, &src->source, node) {
384 		if (al->offset == offset)
385 			return al;
386 	}
387 	return NULL;
388 }
389 
390 static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end)
391 {
392 	struct annotation_line *al;
393 	unsigned n_insn = 0;
394 
395 	al = annotated_source__get_line(notes->src, start);
396 	if (al == NULL)
397 		return 0;
398 
399 	list_for_each_entry_from(al, &notes->src->source, node) {
400 		if (al->offset == -1)
401 			continue;
402 		if ((u64)al->offset > end)
403 			break;
404 		n_insn++;
405 	}
406 	return n_insn;
407 }
408 
409 static void annotated_branch__delete(struct annotated_branch *branch)
410 {
411 	if (branch) {
412 		zfree(&branch->cycles_hist);
413 		free(branch);
414 	}
415 }
416 
417 static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
418 {
419 	unsigned n_insn;
420 	unsigned int cover_insn = 0;
421 
422 	n_insn = annotation__count_insn(notes, start, end);
423 	if (n_insn && ch->num && ch->cycles) {
424 		struct annotation_line *al;
425 		struct annotated_branch *branch;
426 		float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
427 
428 		/* Hide data when there are too many overlaps. */
429 		if (ch->reset >= 0x7fff)
430 			return;
431 
432 		al = annotated_source__get_line(notes->src, start);
433 		if (al == NULL)
434 			return;
435 
436 		list_for_each_entry_from(al, &notes->src->source, node) {
437 			if (al->offset == -1)
438 				continue;
439 			if ((u64)al->offset > end)
440 				break;
441 			if (al->cycles && al->cycles->ipc == 0.0) {
442 				al->cycles->ipc = ipc;
443 				cover_insn++;
444 			}
445 		}
446 
447 		branch = annotation__get_branch(notes);
448 		if (cover_insn && branch) {
449 			branch->hit_cycles += ch->cycles;
450 			branch->hit_insn += n_insn * ch->num;
451 			branch->cover_insn += cover_insn;
452 		}
453 	}
454 }
455 
456 static int annotation__compute_ipc(struct annotation *notes, size_t size)
457 {
458 	int err = 0;
459 	s64 offset;
460 
461 	if (!notes->branch || !notes->branch->cycles_hist)
462 		return 0;
463 
464 	notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1);
465 	notes->branch->hit_cycles = 0;
466 	notes->branch->hit_insn = 0;
467 	notes->branch->cover_insn = 0;
468 
469 	annotation__lock(notes);
470 	for (offset = size - 1; offset >= 0; --offset) {
471 		struct cyc_hist *ch;
472 
473 		ch = &notes->branch->cycles_hist[offset];
474 		if (ch && ch->cycles) {
475 			struct annotation_line *al;
476 
477 			al = annotated_source__get_line(notes->src, offset);
478 			if (al && al->cycles == NULL) {
479 				al->cycles = zalloc(sizeof(*al->cycles));
480 				if (al->cycles == NULL) {
481 					err = ENOMEM;
482 					break;
483 				}
484 			}
485 			if (ch->have_start)
486 				annotation__count_and_fill(notes, ch->start, offset, ch);
487 			if (al && ch->num_aggr) {
488 				al->cycles->avg = ch->cycles_aggr / ch->num_aggr;
489 				al->cycles->max = ch->cycles_max;
490 				al->cycles->min = ch->cycles_min;
491 			}
492 		}
493 	}
494 
495 	if (err) {
496 		while (++offset < (s64)size) {
497 			struct cyc_hist *ch = &notes->branch->cycles_hist[offset];
498 
499 			if (ch && ch->cycles) {
500 				struct annotation_line *al;
501 
502 				al = annotated_source__get_line(notes->src, offset);
503 				if (al)
504 					zfree(&al->cycles);
505 			}
506 		}
507 	}
508 
509 	annotation__unlock(notes);
510 	return 0;
511 }
512 
513 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
514 				 struct evsel *evsel)
515 {
516 	return symbol__inc_addr_samples(&ams->ms, evsel, ams->al_addr, sample);
517 }
518 
519 int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
520 				 struct evsel *evsel, u64 ip)
521 {
522 	return symbol__inc_addr_samples(&he->ms, evsel, ip, sample);
523 }
524 
525 
526 void annotation__exit(struct annotation *notes)
527 {
528 	annotated_source__delete(notes->src);
529 	annotated_branch__delete(notes->branch);
530 }
531 
532 static struct sharded_mutex *sharded_mutex;
533 
534 static void annotation__init_sharded_mutex(void)
535 {
536 	/* As many mutexes as there are CPUs. */
537 	sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu);
538 }
539 
540 static size_t annotation__hash(const struct annotation *notes)
541 {
542 	return (size_t)notes;
543 }
544 
545 static struct mutex *annotation__get_mutex(const struct annotation *notes)
546 {
547 	static pthread_once_t once = PTHREAD_ONCE_INIT;
548 
549 	pthread_once(&once, annotation__init_sharded_mutex);
550 	if (!sharded_mutex)
551 		return NULL;
552 
553 	return sharded_mutex__get_mutex(sharded_mutex, annotation__hash(notes));
554 }
555 
556 void annotation__lock(struct annotation *notes)
557 	NO_THREAD_SAFETY_ANALYSIS
558 {
559 	struct mutex *mutex = annotation__get_mutex(notes);
560 
561 	if (mutex)
562 		mutex_lock(mutex);
563 }
564 
565 void annotation__unlock(struct annotation *notes)
566 	NO_THREAD_SAFETY_ANALYSIS
567 {
568 	struct mutex *mutex = annotation__get_mutex(notes);
569 
570 	if (mutex)
571 		mutex_unlock(mutex);
572 }
573 
574 bool annotation__trylock(struct annotation *notes)
575 {
576 	struct mutex *mutex = annotation__get_mutex(notes);
577 
578 	if (!mutex)
579 		return false;
580 
581 	return mutex_trylock(mutex);
582 }
583 
584 void annotation_line__add(struct annotation_line *al, struct list_head *head)
585 {
586 	list_add_tail(&al->node, head);
587 }
588 
589 struct annotation_line *
590 annotation_line__next(struct annotation_line *pos, struct list_head *head)
591 {
592 	list_for_each_entry_continue(pos, head, node)
593 		if (pos->offset >= 0)
594 			return pos;
595 
596 	return NULL;
597 }
598 
599 static const char *annotate__address_color(struct block_range *br)
600 {
601 	double cov = block_range__coverage(br);
602 
603 	if (cov >= 0) {
604 		/* mark red for >75% coverage */
605 		if (cov > 0.75)
606 			return PERF_COLOR_RED;
607 
608 		/* mark dull for <1% coverage */
609 		if (cov < 0.01)
610 			return PERF_COLOR_NORMAL;
611 	}
612 
613 	return PERF_COLOR_MAGENTA;
614 }
615 
616 static const char *annotate__asm_color(struct block_range *br)
617 {
618 	double cov = block_range__coverage(br);
619 
620 	if (cov >= 0) {
621 		/* mark dull for <1% coverage */
622 		if (cov < 0.01)
623 			return PERF_COLOR_NORMAL;
624 	}
625 
626 	return PERF_COLOR_BLUE;
627 }
628 
629 static void annotate__branch_printf(struct block_range *br, u64 addr)
630 {
631 	bool emit_comment = true;
632 
633 	if (!br)
634 		return;
635 
636 #if 1
637 	if (br->is_target && br->start == addr) {
638 		struct block_range *branch = br;
639 		double p;
640 
641 		/*
642 		 * Find matching branch to our target.
643 		 */
644 		while (!branch->is_branch)
645 			branch = block_range__next(branch);
646 
647 		p = 100 *(double)br->entry / branch->coverage;
648 
649 		if (p > 0.1) {
650 			if (emit_comment) {
651 				emit_comment = false;
652 				printf("\t#");
653 			}
654 
655 			/*
656 			 * The percentage of coverage joined at this target in relation
657 			 * to the next branch.
658 			 */
659 			printf(" +%.2f%%", p);
660 		}
661 	}
662 #endif
663 	if (br->is_branch && br->end == addr) {
664 		double p = 100*(double)br->taken / br->coverage;
665 
666 		if (p > 0.1) {
667 			if (emit_comment) {
668 				emit_comment = false;
669 				printf("\t#");
670 			}
671 
672 			/*
673 			 * The percentage of coverage leaving at this branch, and
674 			 * its prediction ratio.
675 			 */
676 			printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred  / br->taken);
677 		}
678 	}
679 }
680 
681 static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width)
682 {
683 	s64 offset = dl->al.offset;
684 	const u64 addr = start + offset;
685 	struct block_range *br;
686 
687 	br = block_range__find(addr);
688 	color_fprintf(stdout, annotate__address_color(br), "  %*" PRIx64 ":", addr_fmt_width, addr);
689 	color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line);
690 	annotate__branch_printf(br, addr);
691 	return 0;
692 }
693 
694 static int
695 annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start,
696 		       struct evsel *evsel, u64 len, int min_pcnt, int printed,
697 		       int max_lines, struct annotation_line *queue, int addr_fmt_width,
698 		       int percent_type)
699 {
700 	struct disasm_line *dl = container_of(al, struct disasm_line, al);
701 	static const char *prev_line;
702 
703 	if (al->offset != -1) {
704 		double max_percent = 0.0;
705 		int i, nr_percent = 1;
706 		const char *color;
707 		struct annotation *notes = symbol__annotation(sym);
708 
709 		for (i = 0; i < al->data_nr; i++) {
710 			double percent;
711 
712 			percent = annotation_data__percent(&al->data[i],
713 							   percent_type);
714 
715 			if (percent > max_percent)
716 				max_percent = percent;
717 		}
718 
719 		if (al->data_nr > nr_percent)
720 			nr_percent = al->data_nr;
721 
722 		if (max_percent < min_pcnt)
723 			return -1;
724 
725 		if (max_lines && printed >= max_lines)
726 			return 1;
727 
728 		if (queue != NULL) {
729 			list_for_each_entry_from(queue, &notes->src->source, node) {
730 				if (queue == al)
731 					break;
732 				annotation_line__print(queue, sym, start, evsel, len,
733 						       0, 0, 1, NULL, addr_fmt_width,
734 						       percent_type);
735 			}
736 		}
737 
738 		color = get_percent_color(max_percent);
739 
740 		for (i = 0; i < nr_percent; i++) {
741 			struct annotation_data *data = &al->data[i];
742 			double percent;
743 
744 			percent = annotation_data__percent(data, percent_type);
745 			color = get_percent_color(percent);
746 
747 			if (symbol_conf.show_total_period)
748 				color_fprintf(stdout, color, " %11" PRIu64,
749 					      data->he.period);
750 			else if (symbol_conf.show_nr_samples)
751 				color_fprintf(stdout, color, " %7" PRIu64,
752 					      data->he.nr_samples);
753 			else
754 				color_fprintf(stdout, color, " %7.2f", percent);
755 		}
756 
757 		printf(" : ");
758 
759 		disasm_line__print(dl, start, addr_fmt_width);
760 
761 		/*
762 		 * Also color the filename and line if needed, with
763 		 * the same color than the percentage. Don't print it
764 		 * twice for close colored addr with the same filename:line
765 		 */
766 		if (al->path) {
767 			if (!prev_line || strcmp(prev_line, al->path)) {
768 				color_fprintf(stdout, color, " // %s", al->path);
769 				prev_line = al->path;
770 			}
771 		}
772 
773 		printf("\n");
774 	} else if (max_lines && printed >= max_lines)
775 		return 1;
776 	else {
777 		int width = symbol_conf.show_total_period ? 12 : 8;
778 
779 		if (queue)
780 			return -1;
781 
782 		if (evsel__is_group_event(evsel))
783 			width *= evsel->core.nr_members;
784 
785 		if (!*al->line)
786 			printf(" %*s:\n", width, " ");
787 		else
788 			printf(" %*s: %-*d %s\n", width, " ", addr_fmt_width, al->line_nr, al->line);
789 	}
790 
791 	return 0;
792 }
793 
794 static void calc_percent(struct annotation *notes,
795 			 struct evsel *evsel,
796 			 struct annotation_data *data,
797 			 s64 offset, s64 end)
798 {
799 	struct hists *hists = evsel__hists(evsel);
800 	int evidx = evsel->core.idx;
801 	struct sym_hist *sym_hist = annotation__histogram(notes, evidx);
802 	unsigned int hits = 0;
803 	u64 period = 0;
804 
805 	while (offset < end) {
806 		struct sym_hist_entry *entry;
807 
808 		entry = annotated_source__hist_entry(notes->src, evidx, offset);
809 		if (entry) {
810 			hits   += entry->nr_samples;
811 			period += entry->period;
812 		}
813 		++offset;
814 	}
815 
816 	if (sym_hist->nr_samples) {
817 		data->he.period     = period;
818 		data->he.nr_samples = hits;
819 		data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples;
820 	}
821 
822 	if (hists->stats.nr_non_filtered_samples)
823 		data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples;
824 
825 	if (sym_hist->period)
826 		data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period;
827 
828 	if (hists->stats.total_period)
829 		data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period;
830 }
831 
832 static void annotation__calc_percent(struct annotation *notes,
833 				     struct evsel *leader, s64 len)
834 {
835 	struct annotation_line *al, *next;
836 	struct evsel *evsel;
837 
838 	list_for_each_entry(al, &notes->src->source, node) {
839 		s64 end;
840 		int i = 0;
841 
842 		if (al->offset == -1)
843 			continue;
844 
845 		next = annotation_line__next(al, &notes->src->source);
846 		end  = next ? next->offset : len;
847 
848 		for_each_group_evsel(evsel, leader) {
849 			struct annotation_data *data;
850 
851 			BUG_ON(i >= al->data_nr);
852 
853 			data = &al->data[i++];
854 
855 			calc_percent(notes, evsel, data, al->offset, end);
856 		}
857 	}
858 }
859 
860 void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
861 {
862 	struct annotation *notes = symbol__annotation(sym);
863 
864 	annotation__calc_percent(notes, evsel, symbol__size(sym));
865 }
866 
867 static int evsel__get_arch(struct evsel *evsel, struct arch **parch)
868 {
869 	struct perf_env *env = evsel__env(evsel);
870 	const char *arch_name = perf_env__arch(env);
871 	struct arch *arch;
872 	int err;
873 
874 	if (!arch_name) {
875 		*parch = NULL;
876 		return errno;
877 	}
878 
879 	*parch = arch = arch__find(arch_name);
880 	if (arch == NULL) {
881 		pr_err("%s: unsupported arch %s\n", __func__, arch_name);
882 		return ENOTSUP;
883 	}
884 
885 	if (arch->init) {
886 		err = arch->init(arch, env ? env->cpuid : NULL);
887 		if (err) {
888 			pr_err("%s: failed to initialize %s arch priv area\n",
889 			       __func__, arch->name);
890 			return err;
891 		}
892 	}
893 	return 0;
894 }
895 
896 int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
897 		     struct arch **parch)
898 {
899 	struct symbol *sym = ms->sym;
900 	struct annotation *notes = symbol__annotation(sym);
901 	struct annotate_args args = {
902 		.evsel		= evsel,
903 		.options	= &annotate_opts,
904 	};
905 	struct arch *arch = NULL;
906 	int err;
907 
908 	err = evsel__get_arch(evsel, &arch);
909 	if (err < 0)
910 		return err;
911 
912 	if (parch)
913 		*parch = arch;
914 
915 	if (notes->src && !list_empty(&notes->src->source))
916 		return 0;
917 
918 	args.arch = arch;
919 	args.ms = *ms;
920 
921 	if (notes->src == NULL) {
922 		notes->src = annotated_source__new();
923 		if (notes->src == NULL)
924 			return -1;
925 	}
926 
927 	if (annotate_opts.full_addr)
928 		notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
929 	else
930 		notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
931 
932 	return symbol__disassemble(sym, &args);
933 }
934 
935 static void insert_source_line(struct rb_root *root, struct annotation_line *al)
936 {
937 	struct annotation_line *iter;
938 	struct rb_node **p = &root->rb_node;
939 	struct rb_node *parent = NULL;
940 	unsigned int percent_type = annotate_opts.percent_type;
941 	int i, ret;
942 
943 	while (*p != NULL) {
944 		parent = *p;
945 		iter = rb_entry(parent, struct annotation_line, rb_node);
946 
947 		ret = strcmp(iter->path, al->path);
948 		if (ret == 0) {
949 			for (i = 0; i < al->data_nr; i++) {
950 				iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
951 										      percent_type);
952 			}
953 			return;
954 		}
955 
956 		if (ret < 0)
957 			p = &(*p)->rb_left;
958 		else
959 			p = &(*p)->rb_right;
960 	}
961 
962 	for (i = 0; i < al->data_nr; i++) {
963 		al->data[i].percent_sum = annotation_data__percent(&al->data[i],
964 								   percent_type);
965 	}
966 
967 	rb_link_node(&al->rb_node, parent, p);
968 	rb_insert_color(&al->rb_node, root);
969 }
970 
971 static int cmp_source_line(struct annotation_line *a, struct annotation_line *b)
972 {
973 	int i;
974 
975 	for (i = 0; i < a->data_nr; i++) {
976 		if (a->data[i].percent_sum == b->data[i].percent_sum)
977 			continue;
978 		return a->data[i].percent_sum > b->data[i].percent_sum;
979 	}
980 
981 	return 0;
982 }
983 
984 static void __resort_source_line(struct rb_root *root, struct annotation_line *al)
985 {
986 	struct annotation_line *iter;
987 	struct rb_node **p = &root->rb_node;
988 	struct rb_node *parent = NULL;
989 
990 	while (*p != NULL) {
991 		parent = *p;
992 		iter = rb_entry(parent, struct annotation_line, rb_node);
993 
994 		if (cmp_source_line(al, iter))
995 			p = &(*p)->rb_left;
996 		else
997 			p = &(*p)->rb_right;
998 	}
999 
1000 	rb_link_node(&al->rb_node, parent, p);
1001 	rb_insert_color(&al->rb_node, root);
1002 }
1003 
1004 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
1005 {
1006 	struct annotation_line *al;
1007 	struct rb_node *node;
1008 
1009 	node = rb_first(src_root);
1010 	while (node) {
1011 		struct rb_node *next;
1012 
1013 		al = rb_entry(node, struct annotation_line, rb_node);
1014 		next = rb_next(node);
1015 		rb_erase(node, src_root);
1016 
1017 		__resort_source_line(dest_root, al);
1018 		node = next;
1019 	}
1020 }
1021 
1022 static void print_summary(struct rb_root *root, const char *filename)
1023 {
1024 	struct annotation_line *al;
1025 	struct rb_node *node;
1026 
1027 	printf("\nSorted summary for file %s\n", filename);
1028 	printf("----------------------------------------------\n\n");
1029 
1030 	if (RB_EMPTY_ROOT(root)) {
1031 		printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
1032 		return;
1033 	}
1034 
1035 	node = rb_first(root);
1036 	while (node) {
1037 		double percent, percent_max = 0.0;
1038 		const char *color;
1039 		char *path;
1040 		int i;
1041 
1042 		al = rb_entry(node, struct annotation_line, rb_node);
1043 		for (i = 0; i < al->data_nr; i++) {
1044 			percent = al->data[i].percent_sum;
1045 			color = get_percent_color(percent);
1046 			color_fprintf(stdout, color, " %7.2f", percent);
1047 
1048 			if (percent > percent_max)
1049 				percent_max = percent;
1050 		}
1051 
1052 		path = al->path;
1053 		color = get_percent_color(percent_max);
1054 		color_fprintf(stdout, color, " %s\n", path);
1055 
1056 		node = rb_next(node);
1057 	}
1058 }
1059 
1060 static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel)
1061 {
1062 	int evidx = evsel->core.idx;
1063 	struct annotation *notes = symbol__annotation(sym);
1064 	struct sym_hist *h = annotation__histogram(notes, evidx);
1065 	u64 len = symbol__size(sym), offset;
1066 
1067 	for (offset = 0; offset < len; ++offset) {
1068 		struct sym_hist_entry *entry;
1069 
1070 		entry = annotated_source__hist_entry(notes->src, evidx, offset);
1071 		if (entry && entry->nr_samples != 0)
1072 			printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
1073 			       sym->start + offset, entry->nr_samples);
1074 	}
1075 	printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
1076 }
1077 
1078 static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
1079 {
1080 	char bf[32];
1081 	struct annotation_line *line;
1082 
1083 	list_for_each_entry_reverse(line, lines, node) {
1084 		if (line->offset != -1)
1085 			return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset);
1086 	}
1087 
1088 	return 0;
1089 }
1090 
1091 int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel)
1092 {
1093 	struct map *map = ms->map;
1094 	struct symbol *sym = ms->sym;
1095 	struct dso *dso = map__dso(map);
1096 	char *filename;
1097 	const char *d_filename;
1098 	const char *evsel_name = evsel__name(evsel);
1099 	struct annotation *notes = symbol__annotation(sym);
1100 	struct sym_hist *h = annotation__histogram(notes, evsel->core.idx);
1101 	struct annotation_line *pos, *queue = NULL;
1102 	struct annotation_options *opts = &annotate_opts;
1103 	u64 start = map__rip_2objdump(map, sym->start);
1104 	int printed = 2, queue_len = 0, addr_fmt_width;
1105 	int more = 0;
1106 	bool context = opts->context;
1107 	u64 len;
1108 	int width = symbol_conf.show_total_period ? 12 : 8;
1109 	int graph_dotted_len;
1110 	char buf[512];
1111 
1112 	filename = strdup(dso__long_name(dso));
1113 	if (!filename)
1114 		return -ENOMEM;
1115 
1116 	if (opts->full_path)
1117 		d_filename = filename;
1118 	else
1119 		d_filename = basename(filename);
1120 
1121 	len = symbol__size(sym);
1122 
1123 	if (evsel__is_group_event(evsel)) {
1124 		width *= evsel->core.nr_members;
1125 		evsel__group_desc(evsel, buf, sizeof(buf));
1126 		evsel_name = buf;
1127 	}
1128 
1129 	graph_dotted_len = printf(" %-*.*s|	Source code & Disassembly of %s for %s (%" PRIu64 " samples, "
1130 				  "percent: %s)\n",
1131 				  width, width, symbol_conf.show_total_period ? "Period" :
1132 				  symbol_conf.show_nr_samples ? "Samples" : "Percent",
1133 				  d_filename, evsel_name, h->nr_samples,
1134 				  percent_type_str(opts->percent_type));
1135 
1136 	printf("%-*.*s----\n",
1137 	       graph_dotted_len, graph_dotted_len, graph_dotted_line);
1138 
1139 	if (verbose > 0)
1140 		symbol__annotate_hits(sym, evsel);
1141 
1142 	addr_fmt_width = annotated_source__addr_fmt_width(&notes->src->source, start);
1143 
1144 	list_for_each_entry(pos, &notes->src->source, node) {
1145 		int err;
1146 
1147 		if (context && queue == NULL) {
1148 			queue = pos;
1149 			queue_len = 0;
1150 		}
1151 
1152 		err = annotation_line__print(pos, sym, start, evsel, len,
1153 					     opts->min_pcnt, printed, opts->max_lines,
1154 					     queue, addr_fmt_width, opts->percent_type);
1155 
1156 		switch (err) {
1157 		case 0:
1158 			++printed;
1159 			if (context) {
1160 				printed += queue_len;
1161 				queue = NULL;
1162 				queue_len = 0;
1163 			}
1164 			break;
1165 		case 1:
1166 			/* filtered by max_lines */
1167 			++more;
1168 			break;
1169 		case -1:
1170 		default:
1171 			/*
1172 			 * Filtered by min_pcnt or non IP lines when
1173 			 * context != 0
1174 			 */
1175 			if (!context)
1176 				break;
1177 			if (queue_len == context)
1178 				queue = list_entry(queue->node.next, typeof(*queue), node);
1179 			else
1180 				++queue_len;
1181 			break;
1182 		}
1183 	}
1184 
1185 	free(filename);
1186 
1187 	return more;
1188 }
1189 
1190 static void FILE__set_percent_color(void *fp __maybe_unused,
1191 				    double percent __maybe_unused,
1192 				    bool current __maybe_unused)
1193 {
1194 }
1195 
1196 static int FILE__set_jumps_percent_color(void *fp __maybe_unused,
1197 					 int nr __maybe_unused, bool current __maybe_unused)
1198 {
1199 	return 0;
1200 }
1201 
1202 static int FILE__set_color(void *fp __maybe_unused, int color __maybe_unused)
1203 {
1204 	return 0;
1205 }
1206 
1207 static void FILE__printf(void *fp, const char *fmt, ...)
1208 {
1209 	va_list args;
1210 
1211 	va_start(args, fmt);
1212 	vfprintf(fp, fmt, args);
1213 	va_end(args);
1214 }
1215 
1216 static void FILE__write_graph(void *fp, int graph)
1217 {
1218 	const char *s;
1219 	switch (graph) {
1220 
1221 	case DARROW_CHAR: s = "↓"; break;
1222 	case UARROW_CHAR: s = "↑"; break;
1223 	case LARROW_CHAR: s = "←"; break;
1224 	case RARROW_CHAR: s = "→"; break;
1225 	default:		s = "?"; break;
1226 	}
1227 
1228 	fputs(s, fp);
1229 }
1230 
1231 static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp)
1232 {
1233 	struct annotation *notes = symbol__annotation(sym);
1234 	struct annotation_write_ops wops = {
1235 		.first_line		 = true,
1236 		.obj			 = fp,
1237 		.set_color		 = FILE__set_color,
1238 		.set_percent_color	 = FILE__set_percent_color,
1239 		.set_jumps_percent_color = FILE__set_jumps_percent_color,
1240 		.printf			 = FILE__printf,
1241 		.write_graph		 = FILE__write_graph,
1242 	};
1243 	struct annotation_line *al;
1244 
1245 	list_for_each_entry(al, &notes->src->source, node) {
1246 		if (annotation_line__filter(al))
1247 			continue;
1248 		annotation_line__write(al, notes, &wops);
1249 		fputc('\n', fp);
1250 		wops.first_line = false;
1251 	}
1252 
1253 	return 0;
1254 }
1255 
1256 int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel)
1257 {
1258 	const char *ev_name = evsel__name(evsel);
1259 	char buf[1024];
1260 	char *filename;
1261 	int err = -1;
1262 	FILE *fp;
1263 
1264 	if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0)
1265 		return -1;
1266 
1267 	fp = fopen(filename, "w");
1268 	if (fp == NULL)
1269 		goto out_free_filename;
1270 
1271 	if (evsel__is_group_event(evsel)) {
1272 		evsel__group_desc(evsel, buf, sizeof(buf));
1273 		ev_name = buf;
1274 	}
1275 
1276 	fprintf(fp, "%s() %s\nEvent: %s\n\n",
1277 		ms->sym->name, dso__long_name(map__dso(ms->map)), ev_name);
1278 	symbol__annotate_fprintf2(ms->sym, fp);
1279 
1280 	fclose(fp);
1281 	err = 0;
1282 out_free_filename:
1283 	free(filename);
1284 	return err;
1285 }
1286 
1287 void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
1288 {
1289 	struct annotation *notes = symbol__annotation(sym);
1290 	struct sym_hist *h = annotation__histogram(notes, evidx);
1291 
1292 	memset(h, 0, sizeof(*notes->src->histograms) * notes->src->nr_histograms);
1293 }
1294 
1295 void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
1296 {
1297 	struct annotation *notes = symbol__annotation(sym);
1298 	struct sym_hist *h = annotation__histogram(notes, evidx);
1299 	struct annotation_line *al;
1300 
1301 	h->nr_samples = 0;
1302 	list_for_each_entry(al, &notes->src->source, node) {
1303 		struct sym_hist_entry *entry;
1304 
1305 		if (al->offset == -1)
1306 			continue;
1307 
1308 		entry = annotated_source__hist_entry(notes->src, evidx, al->offset);
1309 		if (entry == NULL)
1310 			continue;
1311 
1312 		entry->nr_samples = entry->nr_samples * 7 / 8;
1313 		h->nr_samples += entry->nr_samples;
1314 	}
1315 }
1316 
1317 void annotated_source__purge(struct annotated_source *as)
1318 {
1319 	struct annotation_line *al, *n;
1320 
1321 	list_for_each_entry_safe(al, n, &as->source, node) {
1322 		list_del_init(&al->node);
1323 		disasm_line__free(disasm_line(al));
1324 	}
1325 }
1326 
1327 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
1328 {
1329 	size_t printed;
1330 
1331 	if (dl->al.offset == -1)
1332 		return fprintf(fp, "%s\n", dl->al.line);
1333 
1334 	printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name);
1335 
1336 	if (dl->ops.raw[0] != '\0') {
1337 		printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
1338 				   dl->ops.raw);
1339 	}
1340 
1341 	return printed + fprintf(fp, "\n");
1342 }
1343 
1344 size_t disasm__fprintf(struct list_head *head, FILE *fp)
1345 {
1346 	struct disasm_line *pos;
1347 	size_t printed = 0;
1348 
1349 	list_for_each_entry(pos, head, al.node)
1350 		printed += disasm_line__fprintf(pos, fp);
1351 
1352 	return printed;
1353 }
1354 
1355 bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym)
1356 {
1357 	if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins) ||
1358 	    !disasm_line__has_local_offset(dl) || dl->ops.target.offset < 0 ||
1359 	    dl->ops.target.offset >= (s64)symbol__size(sym))
1360 		return false;
1361 
1362 	return true;
1363 }
1364 
1365 static void
1366 annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
1367 {
1368 	struct annotation_line *al;
1369 
1370 	/* PLT symbols contain external offsets */
1371 	if (strstr(sym->name, "@plt"))
1372 		return;
1373 
1374 	list_for_each_entry(al, &notes->src->source, node) {
1375 		struct disasm_line *dl;
1376 		struct annotation_line *target;
1377 
1378 		dl = disasm_line(al);
1379 
1380 		if (!disasm_line__is_valid_local_jump(dl, sym))
1381 			continue;
1382 
1383 		target = annotated_source__get_line(notes->src,
1384 						    dl->ops.target.offset);
1385 		/*
1386 		 * FIXME: Oops, no jump target? Buggy disassembler? Or do we
1387 		 * have to adjust to the previous offset?
1388 		 */
1389 		if (target == NULL)
1390 			continue;
1391 
1392 		if (++target->jump_sources > notes->src->max_jump_sources)
1393 			notes->src->max_jump_sources = target->jump_sources;
1394 	}
1395 }
1396 
1397 static void annotation__set_index(struct annotation *notes)
1398 {
1399 	struct annotation_line *al;
1400 	struct annotated_source *src = notes->src;
1401 
1402 	src->widths.max_line_len = 0;
1403 	src->nr_entries = 0;
1404 	src->nr_asm_entries = 0;
1405 
1406 	list_for_each_entry(al, &src->source, node) {
1407 		size_t line_len = strlen(al->line);
1408 
1409 		if (src->widths.max_line_len < line_len)
1410 			src->widths.max_line_len = line_len;
1411 		al->idx = src->nr_entries++;
1412 		if (al->offset != -1)
1413 			al->idx_asm = src->nr_asm_entries++;
1414 		else
1415 			al->idx_asm = -1;
1416 	}
1417 }
1418 
1419 static inline int width_jumps(int n)
1420 {
1421 	if (n >= 100)
1422 		return 5;
1423 	if (n / 10)
1424 		return 2;
1425 	return 1;
1426 }
1427 
1428 static int annotation__max_ins_name(struct annotation *notes)
1429 {
1430 	int max_name = 0, len;
1431 	struct annotation_line *al;
1432 
1433         list_for_each_entry(al, &notes->src->source, node) {
1434 		if (al->offset == -1)
1435 			continue;
1436 
1437 		len = strlen(disasm_line(al)->ins.name);
1438 		if (max_name < len)
1439 			max_name = len;
1440 	}
1441 
1442 	return max_name;
1443 }
1444 
1445 static void
1446 annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
1447 {
1448 	notes->src->widths.addr = notes->src->widths.target =
1449 		notes->src->widths.min_addr = hex_width(symbol__size(sym));
1450 	notes->src->widths.max_addr = hex_width(sym->end);
1451 	notes->src->widths.jumps = width_jumps(notes->src->max_jump_sources);
1452 	notes->src->widths.max_ins_name = annotation__max_ins_name(notes);
1453 }
1454 
1455 void annotation__update_column_widths(struct annotation *notes)
1456 {
1457 	if (annotate_opts.use_offset)
1458 		notes->src->widths.target = notes->src->widths.min_addr;
1459 	else if (annotate_opts.full_addr)
1460 		notes->src->widths.target = BITS_PER_LONG / 4;
1461 	else
1462 		notes->src->widths.target = notes->src->widths.max_addr;
1463 
1464 	notes->src->widths.addr = notes->src->widths.target;
1465 
1466 	if (annotate_opts.show_nr_jumps)
1467 		notes->src->widths.addr += notes->src->widths.jumps + 1;
1468 }
1469 
1470 void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms)
1471 {
1472 	annotate_opts.full_addr = !annotate_opts.full_addr;
1473 
1474 	if (annotate_opts.full_addr)
1475 		notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
1476 	else
1477 		notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
1478 
1479 	annotation__update_column_widths(notes);
1480 }
1481 
1482 static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms,
1483 				   struct rb_root *root)
1484 {
1485 	struct annotation_line *al;
1486 	struct rb_root tmp_root = RB_ROOT;
1487 
1488 	list_for_each_entry(al, &notes->src->source, node) {
1489 		double percent_max = 0.0;
1490 		u64 addr;
1491 		int i;
1492 
1493 		for (i = 0; i < al->data_nr; i++) {
1494 			double percent;
1495 
1496 			percent = annotation_data__percent(&al->data[i],
1497 							   annotate_opts.percent_type);
1498 
1499 			if (percent > percent_max)
1500 				percent_max = percent;
1501 		}
1502 
1503 		if (percent_max <= 0.5)
1504 			continue;
1505 
1506 		addr = map__rip_2objdump(ms->map, ms->sym->start);
1507 		al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL,
1508 				       false, true, ms->sym->start + al->offset);
1509 		insert_source_line(&tmp_root, al);
1510 	}
1511 
1512 	resort_source_line(root, &tmp_root);
1513 }
1514 
1515 static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root)
1516 {
1517 	struct annotation *notes = symbol__annotation(ms->sym);
1518 
1519 	annotation__calc_lines(notes, ms, root);
1520 }
1521 
1522 int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel)
1523 {
1524 	struct dso *dso = map__dso(ms->map);
1525 	struct symbol *sym = ms->sym;
1526 	struct rb_root source_line = RB_ROOT;
1527 	struct hists *hists = evsel__hists(evsel);
1528 	char buf[1024];
1529 	int err;
1530 
1531 	err = symbol__annotate2(ms, evsel, NULL);
1532 	if (err) {
1533 		char msg[BUFSIZ];
1534 
1535 		dso__set_annotate_warned(dso);
1536 		symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
1537 		ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
1538 		return -1;
1539 	}
1540 
1541 	if (annotate_opts.print_lines) {
1542 		srcline_full_filename = annotate_opts.full_path;
1543 		symbol__calc_lines(ms, &source_line);
1544 		print_summary(&source_line, dso__long_name(dso));
1545 	}
1546 
1547 	hists__scnprintf_title(hists, buf, sizeof(buf));
1548 	fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
1549 		buf, percent_type_str(annotate_opts.percent_type), sym->name, dso__long_name(dso));
1550 	symbol__annotate_fprintf2(sym, stdout);
1551 
1552 	annotated_source__purge(symbol__annotation(sym)->src);
1553 
1554 	return 0;
1555 }
1556 
1557 int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel)
1558 {
1559 	struct dso *dso = map__dso(ms->map);
1560 	struct symbol *sym = ms->sym;
1561 	struct rb_root source_line = RB_ROOT;
1562 	int err;
1563 
1564 	err = symbol__annotate(ms, evsel, NULL);
1565 	if (err) {
1566 		char msg[BUFSIZ];
1567 
1568 		dso__set_annotate_warned(dso);
1569 		symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
1570 		ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
1571 		return -1;
1572 	}
1573 
1574 	symbol__calc_percent(sym, evsel);
1575 
1576 	if (annotate_opts.print_lines) {
1577 		srcline_full_filename = annotate_opts.full_path;
1578 		symbol__calc_lines(ms, &source_line);
1579 		print_summary(&source_line, dso__long_name(dso));
1580 	}
1581 
1582 	symbol__annotate_printf(ms, evsel);
1583 
1584 	annotated_source__purge(symbol__annotation(sym)->src);
1585 
1586 	return 0;
1587 }
1588 
1589 bool ui__has_annotation(void)
1590 {
1591 	return use_browser == 1 && perf_hpp_list.sym;
1592 }
1593 
1594 
1595 static double annotation_line__max_percent(struct annotation_line *al,
1596 					   struct annotation *notes,
1597 					   unsigned int percent_type)
1598 {
1599 	double percent_max = 0.0;
1600 	int i;
1601 
1602 	for (i = 0; i < notes->src->nr_events; i++) {
1603 		double percent;
1604 
1605 		percent = annotation_data__percent(&al->data[i],
1606 						   percent_type);
1607 
1608 		if (percent > percent_max)
1609 			percent_max = percent;
1610 	}
1611 
1612 	return percent_max;
1613 }
1614 
1615 static void disasm_line__write(struct disasm_line *dl, struct annotation *notes,
1616 			       void *obj, char *bf, size_t size,
1617 			       void (*obj__printf)(void *obj, const char *fmt, ...),
1618 			       void (*obj__write_graph)(void *obj, int graph))
1619 {
1620 	if (dl->ins.ops && dl->ins.ops->scnprintf) {
1621 		if (ins__is_jump(&dl->ins)) {
1622 			bool fwd;
1623 
1624 			if (dl->ops.target.outside)
1625 				goto call_like;
1626 			fwd = dl->ops.target.offset > dl->al.offset;
1627 			obj__write_graph(obj, fwd ? DARROW_CHAR : UARROW_CHAR);
1628 			obj__printf(obj, " ");
1629 		} else if (ins__is_call(&dl->ins)) {
1630 call_like:
1631 			obj__write_graph(obj, RARROW_CHAR);
1632 			obj__printf(obj, " ");
1633 		} else if (ins__is_ret(&dl->ins)) {
1634 			obj__write_graph(obj, LARROW_CHAR);
1635 			obj__printf(obj, " ");
1636 		} else {
1637 			obj__printf(obj, "  ");
1638 		}
1639 	} else {
1640 		obj__printf(obj, "  ");
1641 	}
1642 
1643 	disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset,
1644 			       notes->src->widths.max_ins_name);
1645 }
1646 
1647 static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
1648 {
1649 	double ipc = 0.0, coverage = 0.0;
1650 	struct annotated_branch *branch = annotation__get_branch(notes);
1651 
1652 	if (branch && branch->hit_cycles)
1653 		ipc = branch->hit_insn / ((double)branch->hit_cycles);
1654 
1655 	if (branch && branch->total_insn) {
1656 		coverage = branch->cover_insn * 100.0 /
1657 			((double)branch->total_insn);
1658 	}
1659 
1660 	scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
1661 		  ipc, coverage);
1662 }
1663 
1664 static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
1665 				     bool first_line, bool current_entry, bool change_color, int width,
1666 				     void *obj, unsigned int percent_type,
1667 				     int  (*obj__set_color)(void *obj, int color),
1668 				     void (*obj__set_percent_color)(void *obj, double percent, bool current),
1669 				     int  (*obj__set_jumps_percent_color)(void *obj, int nr, bool current),
1670 				     void (*obj__printf)(void *obj, const char *fmt, ...),
1671 				     void (*obj__write_graph)(void *obj, int graph))
1672 
1673 {
1674 	double percent_max = annotation_line__max_percent(al, notes, percent_type);
1675 	int pcnt_width = annotation__pcnt_width(notes),
1676 	    cycles_width = annotation__cycles_width(notes);
1677 	bool show_title = false;
1678 	char bf[256];
1679 	int printed;
1680 
1681 	if (first_line && (al->offset == -1 || percent_max == 0.0)) {
1682 		if (notes->branch && al->cycles) {
1683 			if (al->cycles->ipc == 0.0 && al->cycles->avg == 0)
1684 				show_title = true;
1685 		} else
1686 			show_title = true;
1687 	}
1688 
1689 	if (al->offset != -1 && percent_max != 0.0) {
1690 		int i;
1691 
1692 		for (i = 0; i < notes->src->nr_events; i++) {
1693 			double percent;
1694 
1695 			percent = annotation_data__percent(&al->data[i], percent_type);
1696 
1697 			obj__set_percent_color(obj, percent, current_entry);
1698 			if (symbol_conf.show_total_period) {
1699 				obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period);
1700 			} else if (symbol_conf.show_nr_samples) {
1701 				obj__printf(obj, "%6" PRIu64 " ",
1702 						   al->data[i].he.nr_samples);
1703 			} else {
1704 				obj__printf(obj, "%6.2f ", percent);
1705 			}
1706 		}
1707 	} else {
1708 		obj__set_percent_color(obj, 0, current_entry);
1709 
1710 		if (!show_title)
1711 			obj__printf(obj, "%-*s", pcnt_width, " ");
1712 		else {
1713 			obj__printf(obj, "%-*s", pcnt_width,
1714 					   symbol_conf.show_total_period ? "Period" :
1715 					   symbol_conf.show_nr_samples ? "Samples" : "Percent");
1716 		}
1717 	}
1718 
1719 	if (notes->branch) {
1720 		if (al->cycles && al->cycles->ipc)
1721 			obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc);
1722 		else if (!show_title)
1723 			obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " ");
1724 		else
1725 			obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
1726 
1727 		if (!annotate_opts.show_minmax_cycle) {
1728 			if (al->cycles && al->cycles->avg)
1729 				obj__printf(obj, "%*" PRIu64 " ",
1730 					   ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg);
1731 			else if (!show_title)
1732 				obj__printf(obj, "%*s",
1733 					    ANNOTATION__CYCLES_WIDTH, " ");
1734 			else
1735 				obj__printf(obj, "%*s ",
1736 					    ANNOTATION__CYCLES_WIDTH - 1,
1737 					    "Cycle");
1738 		} else {
1739 			if (al->cycles) {
1740 				char str[32];
1741 
1742 				scnprintf(str, sizeof(str),
1743 					"%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
1744 					al->cycles->avg, al->cycles->min,
1745 					al->cycles->max);
1746 
1747 				obj__printf(obj, "%*s ",
1748 					    ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
1749 					    str);
1750 			} else if (!show_title)
1751 				obj__printf(obj, "%*s",
1752 					    ANNOTATION__MINMAX_CYCLES_WIDTH,
1753 					    " ");
1754 			else
1755 				obj__printf(obj, "%*s ",
1756 					    ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
1757 					    "Cycle(min/max)");
1758 		}
1759 
1760 		if (show_title && !*al->line) {
1761 			ipc_coverage_string(bf, sizeof(bf), notes);
1762 			obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf);
1763 		}
1764 	}
1765 
1766 	obj__printf(obj, " ");
1767 
1768 	if (!*al->line)
1769 		obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " ");
1770 	else if (al->offset == -1) {
1771 		if (al->line_nr && annotate_opts.show_linenr)
1772 			printed = scnprintf(bf, sizeof(bf), "%-*d ",
1773 					    notes->src->widths.addr + 1, al->line_nr);
1774 		else
1775 			printed = scnprintf(bf, sizeof(bf), "%-*s  ",
1776 					    notes->src->widths.addr, " ");
1777 		obj__printf(obj, bf);
1778 		obj__printf(obj, "%-*s", width - printed - pcnt_width - cycles_width + 1, al->line);
1779 	} else {
1780 		u64 addr = al->offset;
1781 		int color = -1;
1782 
1783 		if (!annotate_opts.use_offset)
1784 			addr += notes->src->start;
1785 
1786 		if (!annotate_opts.use_offset) {
1787 			printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
1788 		} else {
1789 			if (al->jump_sources &&
1790 			    annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
1791 				if (annotate_opts.show_nr_jumps) {
1792 					int prev;
1793 					printed = scnprintf(bf, sizeof(bf), "%*d ",
1794 							    notes->src->widths.jumps,
1795 							    al->jump_sources);
1796 					prev = obj__set_jumps_percent_color(obj, al->jump_sources,
1797 									    current_entry);
1798 					obj__printf(obj, bf);
1799 					obj__set_color(obj, prev);
1800 				}
1801 print_addr:
1802 				printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
1803 						    notes->src->widths.target, addr);
1804 			} else if (ins__is_call(&disasm_line(al)->ins) &&
1805 				   annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) {
1806 				goto print_addr;
1807 			} else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
1808 				goto print_addr;
1809 			} else {
1810 				printed = scnprintf(bf, sizeof(bf), "%-*s  ",
1811 						    notes->src->widths.addr, " ");
1812 			}
1813 		}
1814 
1815 		if (change_color)
1816 			color = obj__set_color(obj, HE_COLORSET_ADDR);
1817 		obj__printf(obj, bf);
1818 		if (change_color)
1819 			obj__set_color(obj, color);
1820 
1821 		disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf), obj__printf, obj__write_graph);
1822 
1823 		obj__printf(obj, "%-*s", width - pcnt_width - cycles_width - 3 - printed, bf);
1824 	}
1825 
1826 }
1827 
1828 void annotation_line__write(struct annotation_line *al, struct annotation *notes,
1829 			    struct annotation_write_ops *wops)
1830 {
1831 	__annotation_line__write(al, notes, wops->first_line, wops->current_entry,
1832 				 wops->change_color, wops->width, wops->obj,
1833 				 annotate_opts.percent_type,
1834 				 wops->set_color, wops->set_percent_color,
1835 				 wops->set_jumps_percent_color, wops->printf,
1836 				 wops->write_graph);
1837 }
1838 
1839 int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
1840 		      struct arch **parch)
1841 {
1842 	struct symbol *sym = ms->sym;
1843 	struct annotation *notes = symbol__annotation(sym);
1844 	size_t size = symbol__size(sym);
1845 	int nr_pcnt = 1, err;
1846 
1847 	if (evsel__is_group_event(evsel))
1848 		nr_pcnt = evsel->core.nr_members;
1849 
1850 	err = symbol__annotate(ms, evsel, parch);
1851 	if (err)
1852 		return err;
1853 
1854 	symbol__calc_percent(sym, evsel);
1855 
1856 	annotation__set_index(notes);
1857 	annotation__mark_jump_targets(notes, sym);
1858 
1859 	err = annotation__compute_ipc(notes, size);
1860 	if (err)
1861 		return err;
1862 
1863 	annotation__init_column_widths(notes, sym);
1864 	notes->src->nr_events = nr_pcnt;
1865 
1866 	annotation__update_column_widths(notes);
1867 	sym->annotate2 = 1;
1868 
1869 	return 0;
1870 }
1871 
1872 static int annotation__config(const char *var, const char *value, void *data)
1873 {
1874 	struct annotation_options *opt = data;
1875 
1876 	if (!strstarts(var, "annotate."))
1877 		return 0;
1878 
1879 	if (!strcmp(var, "annotate.offset_level")) {
1880 		perf_config_u8(&opt->offset_level, "offset_level", value);
1881 
1882 		if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
1883 			opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL;
1884 		else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL)
1885 			opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
1886 	} else if (!strcmp(var, "annotate.hide_src_code")) {
1887 		opt->hide_src_code = perf_config_bool("hide_src_code", value);
1888 	} else if (!strcmp(var, "annotate.jump_arrows")) {
1889 		opt->jump_arrows = perf_config_bool("jump_arrows", value);
1890 	} else if (!strcmp(var, "annotate.show_linenr")) {
1891 		opt->show_linenr = perf_config_bool("show_linenr", value);
1892 	} else if (!strcmp(var, "annotate.show_nr_jumps")) {
1893 		opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value);
1894 	} else if (!strcmp(var, "annotate.show_nr_samples")) {
1895 		symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples",
1896 								value);
1897 	} else if (!strcmp(var, "annotate.show_total_period")) {
1898 		symbol_conf.show_total_period = perf_config_bool("show_total_period",
1899 								value);
1900 	} else if (!strcmp(var, "annotate.use_offset")) {
1901 		opt->use_offset = perf_config_bool("use_offset", value);
1902 	} else if (!strcmp(var, "annotate.disassembler_style")) {
1903 		opt->disassembler_style = strdup(value);
1904 		if (!opt->disassembler_style) {
1905 			pr_err("Not enough memory for annotate.disassembler_style\n");
1906 			return -1;
1907 		}
1908 	} else if (!strcmp(var, "annotate.objdump")) {
1909 		opt->objdump_path = strdup(value);
1910 		if (!opt->objdump_path) {
1911 			pr_err("Not enough memory for annotate.objdump\n");
1912 			return -1;
1913 		}
1914 	} else if (!strcmp(var, "annotate.addr2line")) {
1915 		symbol_conf.addr2line_path = strdup(value);
1916 		if (!symbol_conf.addr2line_path) {
1917 			pr_err("Not enough memory for annotate.addr2line\n");
1918 			return -1;
1919 		}
1920 	} else if (!strcmp(var, "annotate.demangle")) {
1921 		symbol_conf.demangle = perf_config_bool("demangle", value);
1922 	} else if (!strcmp(var, "annotate.demangle_kernel")) {
1923 		symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value);
1924 	} else {
1925 		pr_debug("%s variable unknown, ignoring...", var);
1926 	}
1927 
1928 	return 0;
1929 }
1930 
1931 void annotation_options__init(void)
1932 {
1933 	struct annotation_options *opt = &annotate_opts;
1934 
1935 	memset(opt, 0, sizeof(*opt));
1936 
1937 	/* Default values. */
1938 	opt->use_offset = true;
1939 	opt->jump_arrows = true;
1940 	opt->annotate_src = true;
1941 	opt->offset_level = ANNOTATION__OFFSET_JUMP_TARGETS;
1942 	opt->percent_type = PERCENT_PERIOD_LOCAL;
1943 }
1944 
1945 void annotation_options__exit(void)
1946 {
1947 	zfree(&annotate_opts.disassembler_style);
1948 	zfree(&annotate_opts.objdump_path);
1949 }
1950 
1951 void annotation_config__init(void)
1952 {
1953 	perf_config(annotation__config, &annotate_opts);
1954 }
1955 
1956 static unsigned int parse_percent_type(char *str1, char *str2)
1957 {
1958 	unsigned int type = (unsigned int) -1;
1959 
1960 	if (!strcmp("period", str1)) {
1961 		if (!strcmp("local", str2))
1962 			type = PERCENT_PERIOD_LOCAL;
1963 		else if (!strcmp("global", str2))
1964 			type = PERCENT_PERIOD_GLOBAL;
1965 	}
1966 
1967 	if (!strcmp("hits", str1)) {
1968 		if (!strcmp("local", str2))
1969 			type = PERCENT_HITS_LOCAL;
1970 		else if (!strcmp("global", str2))
1971 			type = PERCENT_HITS_GLOBAL;
1972 	}
1973 
1974 	return type;
1975 }
1976 
1977 int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str,
1978 				int unset __maybe_unused)
1979 {
1980 	unsigned int type;
1981 	char *str1, *str2;
1982 	int err = -1;
1983 
1984 	str1 = strdup(_str);
1985 	if (!str1)
1986 		return -ENOMEM;
1987 
1988 	str2 = strchr(str1, '-');
1989 	if (!str2)
1990 		goto out;
1991 
1992 	*str2++ = 0;
1993 
1994 	type = parse_percent_type(str1, str2);
1995 	if (type == (unsigned int) -1)
1996 		type = parse_percent_type(str2, str1);
1997 	if (type != (unsigned int) -1) {
1998 		annotate_opts.percent_type = type;
1999 		err = 0;
2000 	}
2001 
2002 out:
2003 	free(str1);
2004 	return err;
2005 }
2006 
2007 int annotate_check_args(void)
2008 {
2009 	struct annotation_options *args = &annotate_opts;
2010 
2011 	if (args->prefix_strip && !args->prefix) {
2012 		pr_err("--prefix-strip requires --prefix\n");
2013 		return -1;
2014 	}
2015 	return 0;
2016 }
2017 
2018 /*
2019  * Get register number and access offset from the given instruction.
2020  * It assumes AT&T x86 asm format like OFFSET(REG).  Maybe it needs
2021  * to revisit the format when it handles different architecture.
2022  * Fills @reg and @offset when return 0.
2023  */
2024 static int extract_reg_offset(struct arch *arch, const char *str,
2025 			      struct annotated_op_loc *op_loc)
2026 {
2027 	char *p;
2028 	char *regname;
2029 
2030 	if (arch->objdump.register_char == 0)
2031 		return -1;
2032 
2033 	/*
2034 	 * It should start from offset, but it's possible to skip 0
2035 	 * in the asm.  So 0(%rax) should be same as (%rax).
2036 	 *
2037 	 * However, it also start with a segment select register like
2038 	 * %gs:0x18(%rbx).  In that case it should skip the part.
2039 	 */
2040 	if (*str == arch->objdump.register_char) {
2041 		if (arch__is(arch, "x86")) {
2042 			/* FIXME: Handle other segment registers */
2043 			if (!strncmp(str, "%gs:", 4))
2044 				op_loc->segment = INSN_SEG_X86_GS;
2045 		}
2046 
2047 		while (*str && !isdigit(*str) &&
2048 		       *str != arch->objdump.memory_ref_char)
2049 			str++;
2050 	}
2051 
2052 	op_loc->offset = strtol(str, &p, 0);
2053 
2054 	p = strchr(p, arch->objdump.register_char);
2055 	if (p == NULL)
2056 		return -1;
2057 
2058 	regname = strdup(p);
2059 	if (regname == NULL)
2060 		return -1;
2061 
2062 	op_loc->reg1 = get_dwarf_regnum(regname, 0);
2063 	free(regname);
2064 
2065 	/* Get the second register */
2066 	if (op_loc->multi_regs) {
2067 		p = strchr(p + 1, arch->objdump.register_char);
2068 		if (p == NULL)
2069 			return -1;
2070 
2071 		regname = strdup(p);
2072 		if (regname == NULL)
2073 			return -1;
2074 
2075 		op_loc->reg2 = get_dwarf_regnum(regname, 0);
2076 		free(regname);
2077 	}
2078 	return 0;
2079 }
2080 
2081 /**
2082  * annotate_get_insn_location - Get location of instruction
2083  * @arch: the architecture info
2084  * @dl: the target instruction
2085  * @loc: a buffer to save the data
2086  *
2087  * Get detailed location info (register and offset) in the instruction.
2088  * It needs both source and target operand and whether it accesses a
2089  * memory location.  The offset field is meaningful only when the
2090  * corresponding mem flag is set.  The reg2 field is meaningful only
2091  * when multi_regs flag is set.
2092  *
2093  * Some examples on x86:
2094  *
2095  *   mov  (%rax), %rcx   # src_reg1 = rax, src_mem = 1, src_offset = 0
2096  *                       # dst_reg1 = rcx, dst_mem = 0
2097  *
2098  *   mov  0x18, %r8      # src_reg1 = -1, src_mem = 0
2099  *                       # dst_reg1 = r8, dst_mem = 0
2100  *
2101  *   mov  %rsi, 8(%rbx,%rcx,4)  # src_reg1 = rsi, src_mem = 0, src_multi_regs = 0
2102  *                              # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1
2103  *                              # dst_multi_regs = 1, dst_offset = 8
2104  */
2105 int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl,
2106 			       struct annotated_insn_loc *loc)
2107 {
2108 	struct ins_operands *ops;
2109 	struct annotated_op_loc *op_loc;
2110 	int i;
2111 
2112 	if (ins__is_lock(&dl->ins))
2113 		ops = dl->ops.locked.ops;
2114 	else
2115 		ops = &dl->ops;
2116 
2117 	if (ops == NULL)
2118 		return -1;
2119 
2120 	memset(loc, 0, sizeof(*loc));
2121 
2122 	for_each_insn_op_loc(loc, i, op_loc) {
2123 		const char *insn_str = ops->source.raw;
2124 		bool multi_regs = ops->source.multi_regs;
2125 
2126 		if (i == INSN_OP_TARGET) {
2127 			insn_str = ops->target.raw;
2128 			multi_regs = ops->target.multi_regs;
2129 		}
2130 
2131 		/* Invalidate the register by default */
2132 		op_loc->reg1 = -1;
2133 		op_loc->reg2 = -1;
2134 
2135 		if (insn_str == NULL)
2136 			continue;
2137 
2138 		if (strchr(insn_str, arch->objdump.memory_ref_char)) {
2139 			op_loc->mem_ref = true;
2140 			op_loc->multi_regs = multi_regs;
2141 			extract_reg_offset(arch, insn_str, op_loc);
2142 		} else {
2143 			char *s, *p = NULL;
2144 
2145 			if (arch__is(arch, "x86")) {
2146 				/* FIXME: Handle other segment registers */
2147 				if (!strncmp(insn_str, "%gs:", 4)) {
2148 					op_loc->segment = INSN_SEG_X86_GS;
2149 					op_loc->offset = strtol(insn_str + 4,
2150 								&p, 0);
2151 					if (p && p != insn_str + 4)
2152 						op_loc->imm = true;
2153 					continue;
2154 				}
2155 			}
2156 
2157 			s = strdup(insn_str);
2158 			if (s == NULL)
2159 				return -1;
2160 
2161 			if (*s == arch->objdump.register_char)
2162 				op_loc->reg1 = get_dwarf_regnum(s, 0);
2163 			else if (*s == arch->objdump.imm_char) {
2164 				op_loc->offset = strtol(s + 1, &p, 0);
2165 				if (p && p != s + 1)
2166 					op_loc->imm = true;
2167 			}
2168 			free(s);
2169 		}
2170 	}
2171 
2172 	return 0;
2173 }
2174 
2175 static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip,
2176 					    bool allow_update)
2177 {
2178 	struct disasm_line *dl;
2179 	struct annotation *notes;
2180 
2181 	notes = symbol__annotation(sym);
2182 
2183 	list_for_each_entry(dl, &notes->src->source, al.node) {
2184 		if (dl->al.offset == -1)
2185 			continue;
2186 
2187 		if (sym->start + dl->al.offset == ip) {
2188 			/*
2189 			 * llvm-objdump places "lock" in a separate line and
2190 			 * in that case, we want to get the next line.
2191 			 */
2192 			if (ins__is_lock(&dl->ins) &&
2193 			    *dl->ops.raw == '\0' && allow_update) {
2194 				ip++;
2195 				continue;
2196 			}
2197 			return dl;
2198 		}
2199 	}
2200 	return NULL;
2201 }
2202 
2203 static struct annotated_item_stat *annotate_data_stat(struct list_head *head,
2204 						      const char *name)
2205 {
2206 	struct annotated_item_stat *istat;
2207 
2208 	list_for_each_entry(istat, head, list) {
2209 		if (!strcmp(istat->name, name))
2210 			return istat;
2211 	}
2212 
2213 	istat = zalloc(sizeof(*istat));
2214 	if (istat == NULL)
2215 		return NULL;
2216 
2217 	istat->name = strdup(name);
2218 	if (istat->name == NULL) {
2219 		free(istat);
2220 		return NULL;
2221 	}
2222 
2223 	list_add_tail(&istat->list, head);
2224 	return istat;
2225 }
2226 
2227 static bool is_stack_operation(struct arch *arch, struct disasm_line *dl)
2228 {
2229 	if (arch__is(arch, "x86")) {
2230 		if (!strncmp(dl->ins.name, "push", 4) ||
2231 		    !strncmp(dl->ins.name, "pop", 3) ||
2232 		    !strncmp(dl->ins.name, "ret", 3))
2233 			return true;
2234 	}
2235 
2236 	return false;
2237 }
2238 
2239 static bool is_stack_canary(struct arch *arch, struct annotated_op_loc *loc)
2240 {
2241 	/* On x86_64, %gs:40 is used for stack canary */
2242 	if (arch__is(arch, "x86")) {
2243 		if (loc->segment == INSN_SEG_X86_GS && loc->imm &&
2244 		    loc->offset == 40)
2245 			return true;
2246 	}
2247 
2248 	return false;
2249 }
2250 
2251 static struct disasm_line *
2252 annotation__prev_asm_line(struct annotation *notes, struct disasm_line *curr)
2253 {
2254 	struct list_head *sources = &notes->src->source;
2255 	struct disasm_line *prev;
2256 
2257 	if (curr == list_first_entry(sources, struct disasm_line, al.node))
2258 		return NULL;
2259 
2260 	prev = list_prev_entry(curr, al.node);
2261 	while (prev->al.offset == -1 &&
2262 	       prev != list_first_entry(sources, struct disasm_line, al.node))
2263 		prev = list_prev_entry(prev, al.node);
2264 
2265 	if (prev->al.offset == -1)
2266 		return NULL;
2267 
2268 	return prev;
2269 }
2270 
2271 static struct disasm_line *
2272 annotation__next_asm_line(struct annotation *notes, struct disasm_line *curr)
2273 {
2274 	struct list_head *sources = &notes->src->source;
2275 	struct disasm_line *next;
2276 
2277 	if (curr == list_last_entry(sources, struct disasm_line, al.node))
2278 		return NULL;
2279 
2280 	next = list_next_entry(curr, al.node);
2281 	while (next->al.offset == -1 &&
2282 	       next != list_last_entry(sources, struct disasm_line, al.node))
2283 		next = list_next_entry(next, al.node);
2284 
2285 	if (next->al.offset == -1)
2286 		return NULL;
2287 
2288 	return next;
2289 }
2290 
2291 u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset,
2292 			struct disasm_line *dl)
2293 {
2294 	struct annotation *notes;
2295 	struct disasm_line *next;
2296 	u64 addr;
2297 
2298 	notes = symbol__annotation(ms->sym);
2299 	/*
2300 	 * PC-relative addressing starts from the next instruction address
2301 	 * But the IP is for the current instruction.  Since disasm_line
2302 	 * doesn't have the instruction size, calculate it using the next
2303 	 * disasm_line.  If it's the last one, we can use symbol's end
2304 	 * address directly.
2305 	 */
2306 	next = annotation__next_asm_line(notes, dl);
2307 	if (next == NULL)
2308 		addr = ms->sym->end + offset;
2309 	else
2310 		addr = ip + (next->al.offset - dl->al.offset) + offset;
2311 
2312 	return map__rip_2objdump(ms->map, addr);
2313 }
2314 
2315 /**
2316  * hist_entry__get_data_type - find data type for given hist entry
2317  * @he: hist entry
2318  *
2319  * This function first annotates the instruction at @he->ip and extracts
2320  * register and offset info from it.  Then it searches the DWARF debug
2321  * info to get a variable and type information using the address, register,
2322  * and offset.
2323  */
2324 struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he)
2325 {
2326 	struct map_symbol *ms = &he->ms;
2327 	struct evsel *evsel = hists_to_evsel(he->hists);
2328 	struct arch *arch;
2329 	struct disasm_line *dl;
2330 	struct annotated_insn_loc loc;
2331 	struct annotated_op_loc *op_loc;
2332 	struct annotated_data_type *mem_type;
2333 	struct annotated_item_stat *istat;
2334 	u64 ip = he->ip;
2335 	int i;
2336 
2337 	ann_data_stat.total++;
2338 
2339 	if (ms->map == NULL || ms->sym == NULL) {
2340 		ann_data_stat.no_sym++;
2341 		return NULL;
2342 	}
2343 
2344 	if (!symbol_conf.init_annotation) {
2345 		ann_data_stat.no_sym++;
2346 		return NULL;
2347 	}
2348 
2349 	/* Make sure it has the disasm of the function */
2350 	if (symbol__annotate(ms, evsel, &arch) < 0) {
2351 		ann_data_stat.no_insn++;
2352 		return NULL;
2353 	}
2354 
2355 	/*
2356 	 * Get a disasm to extract the location from the insn.
2357 	 * This is too slow...
2358 	 */
2359 	dl = find_disasm_line(ms->sym, ip, /*allow_update=*/true);
2360 	if (dl == NULL) {
2361 		ann_data_stat.no_insn++;
2362 		return NULL;
2363 	}
2364 
2365 retry:
2366 	istat = annotate_data_stat(&ann_insn_stat, dl->ins.name);
2367 	if (istat == NULL) {
2368 		ann_data_stat.no_insn++;
2369 		return NULL;
2370 	}
2371 
2372 	if (annotate_get_insn_location(arch, dl, &loc) < 0) {
2373 		ann_data_stat.no_insn_ops++;
2374 		istat->bad++;
2375 		return NULL;
2376 	}
2377 
2378 	if (is_stack_operation(arch, dl)) {
2379 		istat->good++;
2380 		he->mem_type_off = 0;
2381 		return &stackop_type;
2382 	}
2383 
2384 	for_each_insn_op_loc(&loc, i, op_loc) {
2385 		struct data_loc_info dloc = {
2386 			.arch = arch,
2387 			.thread = he->thread,
2388 			.ms = ms,
2389 			/* Recalculate IP for LOCK prefix or insn fusion */
2390 			.ip = ms->sym->start + dl->al.offset,
2391 			.cpumode = he->cpumode,
2392 			.op = op_loc,
2393 		};
2394 
2395 		if (!op_loc->mem_ref && op_loc->segment == INSN_SEG_NONE)
2396 			continue;
2397 
2398 		/* Recalculate IP because of LOCK prefix or insn fusion */
2399 		ip = ms->sym->start + dl->al.offset;
2400 
2401 		/* PC-relative addressing */
2402 		if (op_loc->reg1 == DWARF_REG_PC) {
2403 			dloc.var_addr = annotate_calc_pcrel(ms, dloc.ip,
2404 							    op_loc->offset, dl);
2405 		}
2406 
2407 		/* This CPU access in kernel - pretend PC-relative addressing */
2408 		if (dso__kernel(map__dso(ms->map)) && arch__is(arch, "x86") &&
2409 		    op_loc->segment == INSN_SEG_X86_GS && op_loc->imm) {
2410 			dloc.var_addr = op_loc->offset;
2411 			op_loc->reg1 = DWARF_REG_PC;
2412 		}
2413 
2414 		mem_type = find_data_type(&dloc);
2415 
2416 		if (mem_type == NULL && is_stack_canary(arch, op_loc)) {
2417 			istat->good++;
2418 			he->mem_type_off = 0;
2419 			return &canary_type;
2420 		}
2421 
2422 		if (mem_type)
2423 			istat->good++;
2424 		else
2425 			istat->bad++;
2426 
2427 		if (symbol_conf.annotate_data_sample) {
2428 			annotated_data_type__update_samples(mem_type, evsel,
2429 							    dloc.type_offset,
2430 							    he->stat.nr_events,
2431 							    he->stat.period);
2432 		}
2433 		he->mem_type_off = dloc.type_offset;
2434 		return mem_type;
2435 	}
2436 
2437 	/*
2438 	 * Some instructions can be fused and the actual memory access came
2439 	 * from the previous instruction.
2440 	 */
2441 	if (dl->al.offset > 0) {
2442 		struct annotation *notes;
2443 		struct disasm_line *prev_dl;
2444 
2445 		notes = symbol__annotation(ms->sym);
2446 		prev_dl = annotation__prev_asm_line(notes, dl);
2447 
2448 		if (prev_dl && ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) {
2449 			dl = prev_dl;
2450 			goto retry;
2451 		}
2452 	}
2453 
2454 	ann_data_stat.no_mem_ops++;
2455 	istat->bad++;
2456 	return NULL;
2457 }
2458 
2459 /* Basic block traversal (BFS) data structure */
2460 struct basic_block_data {
2461 	struct list_head queue;
2462 	struct list_head visited;
2463 };
2464 
2465 /*
2466  * During the traversal, it needs to know the parent block where the current
2467  * block block started from.  Note that single basic block can be parent of
2468  * two child basic blocks (in case of condition jump).
2469  */
2470 struct basic_block_link {
2471 	struct list_head node;
2472 	struct basic_block_link *parent;
2473 	struct annotated_basic_block *bb;
2474 };
2475 
2476 /* Check any of basic block in the list already has the offset */
2477 static bool basic_block_has_offset(struct list_head *head, s64 offset)
2478 {
2479 	struct basic_block_link *link;
2480 
2481 	list_for_each_entry(link, head, node) {
2482 		s64 begin_offset = link->bb->begin->al.offset;
2483 		s64 end_offset = link->bb->end->al.offset;
2484 
2485 		if (begin_offset <= offset && offset <= end_offset)
2486 			return true;
2487 	}
2488 	return false;
2489 }
2490 
2491 static bool is_new_basic_block(struct basic_block_data *bb_data,
2492 			       struct disasm_line *dl)
2493 {
2494 	s64 offset = dl->al.offset;
2495 
2496 	if (basic_block_has_offset(&bb_data->visited, offset))
2497 		return false;
2498 	if (basic_block_has_offset(&bb_data->queue, offset))
2499 		return false;
2500 	return true;
2501 }
2502 
2503 /* Add a basic block starting from dl and link it to the parent */
2504 static int add_basic_block(struct basic_block_data *bb_data,
2505 			   struct basic_block_link *parent,
2506 			   struct disasm_line *dl)
2507 {
2508 	struct annotated_basic_block *bb;
2509 	struct basic_block_link *link;
2510 
2511 	if (dl == NULL)
2512 		return -1;
2513 
2514 	if (!is_new_basic_block(bb_data, dl))
2515 		return 0;
2516 
2517 	bb = zalloc(sizeof(*bb));
2518 	if (bb == NULL)
2519 		return -1;
2520 
2521 	bb->begin = dl;
2522 	bb->end = dl;
2523 	INIT_LIST_HEAD(&bb->list);
2524 
2525 	link = malloc(sizeof(*link));
2526 	if (link == NULL) {
2527 		free(bb);
2528 		return -1;
2529 	}
2530 
2531 	link->bb = bb;
2532 	link->parent = parent;
2533 	list_add_tail(&link->node, &bb_data->queue);
2534 	return 0;
2535 }
2536 
2537 /* Returns true when it finds the target in the current basic block */
2538 static bool process_basic_block(struct basic_block_data *bb_data,
2539 				struct basic_block_link *link,
2540 				struct symbol *sym, u64 target)
2541 {
2542 	struct disasm_line *dl, *next_dl, *last_dl;
2543 	struct annotation *notes = symbol__annotation(sym);
2544 	bool found = false;
2545 
2546 	dl = link->bb->begin;
2547 	/* Check if it's already visited */
2548 	if (basic_block_has_offset(&bb_data->visited, dl->al.offset))
2549 		return false;
2550 
2551 	last_dl = list_last_entry(&notes->src->source,
2552 				  struct disasm_line, al.node);
2553 	if (last_dl->al.offset == -1)
2554 		last_dl = annotation__prev_asm_line(notes, last_dl);
2555 
2556 	if (last_dl == NULL)
2557 		return false;
2558 
2559 	list_for_each_entry_from(dl, &notes->src->source, al.node) {
2560 		/* Skip comment or debug info line */
2561 		if (dl->al.offset == -1)
2562 			continue;
2563 		/* Found the target instruction */
2564 		if (sym->start + dl->al.offset == target) {
2565 			found = true;
2566 			break;
2567 		}
2568 		/* End of the function, finish the block */
2569 		if (dl == last_dl)
2570 			break;
2571 		/* 'return' instruction finishes the block */
2572 		if (ins__is_ret(&dl->ins))
2573 			break;
2574 		/* normal instructions are part of the basic block */
2575 		if (!ins__is_jump(&dl->ins))
2576 			continue;
2577 		/* jump to a different function, tail call or return */
2578 		if (dl->ops.target.outside)
2579 			break;
2580 		/* jump instruction creates new basic block(s) */
2581 		next_dl = find_disasm_line(sym, sym->start + dl->ops.target.offset,
2582 					   /*allow_update=*/false);
2583 		if (next_dl)
2584 			add_basic_block(bb_data, link, next_dl);
2585 
2586 		/*
2587 		 * FIXME: determine conditional jumps properly.
2588 		 * Conditional jumps create another basic block with the
2589 		 * next disasm line.
2590 		 */
2591 		if (!strstr(dl->ins.name, "jmp")) {
2592 			next_dl = annotation__next_asm_line(notes, dl);
2593 			if (next_dl)
2594 				add_basic_block(bb_data, link, next_dl);
2595 		}
2596 		break;
2597 
2598 	}
2599 	link->bb->end = dl;
2600 	return found;
2601 }
2602 
2603 /*
2604  * It founds a target basic block, build a proper linked list of basic blocks
2605  * by following the link recursively.
2606  */
2607 static void link_found_basic_blocks(struct basic_block_link *link,
2608 				    struct list_head *head)
2609 {
2610 	while (link) {
2611 		struct basic_block_link *parent = link->parent;
2612 
2613 		list_move(&link->bb->list, head);
2614 		list_del(&link->node);
2615 		free(link);
2616 
2617 		link = parent;
2618 	}
2619 }
2620 
2621 static void delete_basic_blocks(struct basic_block_data *bb_data)
2622 {
2623 	struct basic_block_link *link, *tmp;
2624 
2625 	list_for_each_entry_safe(link, tmp, &bb_data->queue, node) {
2626 		list_del(&link->node);
2627 		zfree(&link->bb);
2628 		free(link);
2629 	}
2630 
2631 	list_for_each_entry_safe(link, tmp, &bb_data->visited, node) {
2632 		list_del(&link->node);
2633 		zfree(&link->bb);
2634 		free(link);
2635 	}
2636 }
2637 
2638 /**
2639  * annotate_get_basic_blocks - Get basic blocks for given address range
2640  * @sym: symbol to annotate
2641  * @src: source address
2642  * @dst: destination address
2643  * @head: list head to save basic blocks
2644  *
2645  * This function traverses disasm_lines from @src to @dst and save them in a
2646  * list of annotated_basic_block to @head.  It uses BFS to find the shortest
2647  * path between two.  The basic_block_link is to maintain parent links so
2648  * that it can build a list of blocks from the start.
2649  */
2650 int annotate_get_basic_blocks(struct symbol *sym, s64 src, s64 dst,
2651 			      struct list_head *head)
2652 {
2653 	struct basic_block_data bb_data = {
2654 		.queue = LIST_HEAD_INIT(bb_data.queue),
2655 		.visited = LIST_HEAD_INIT(bb_data.visited),
2656 	};
2657 	struct basic_block_link *link;
2658 	struct disasm_line *dl;
2659 	int ret = -1;
2660 
2661 	dl = find_disasm_line(sym, src, /*allow_update=*/false);
2662 	if (dl == NULL)
2663 		return -1;
2664 
2665 	if (add_basic_block(&bb_data, /*parent=*/NULL, dl) < 0)
2666 		return -1;
2667 
2668 	/* Find shortest path from src to dst using BFS */
2669 	while (!list_empty(&bb_data.queue)) {
2670 		link = list_first_entry(&bb_data.queue, struct basic_block_link, node);
2671 
2672 		if (process_basic_block(&bb_data, link, sym, dst)) {
2673 			link_found_basic_blocks(link, head);
2674 			ret = 0;
2675 			break;
2676 		}
2677 		list_move(&link->node, &bb_data.visited);
2678 	}
2679 	delete_basic_blocks(&bb_data);
2680 	return ret;
2681 }
2682