xref: /linux/tools/perf/util/annotate.c (revision 173b0b5b0e865348684c02bd9cb1d22b5d46e458)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4  *
5  * Parts came from builtin-annotate.c, see those files for further
6  * copyright notes.
7  */
8 
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <libgen.h>
12 #include <stdlib.h>
13 #include "util.h" // hex_width()
14 #include "ui/ui.h"
15 #include "sort.h"
16 #include "build-id.h"
17 #include "color.h"
18 #include "config.h"
19 #include "disasm.h"
20 #include "dso.h"
21 #include "env.h"
22 #include "map.h"
23 #include "maps.h"
24 #include "symbol.h"
25 #include "srcline.h"
26 #include "units.h"
27 #include "debug.h"
28 #include "annotate.h"
29 #include "annotate-data.h"
30 #include "evsel.h"
31 #include "evlist.h"
32 #include "bpf-event.h"
33 #include "bpf-utils.h"
34 #include "block-range.h"
35 #include "string2.h"
36 #include "dwarf-regs.h"
37 #include "util/event.h"
38 #include "util/sharded_mutex.h"
39 #include "arch/common.h"
40 #include "namespaces.h"
41 #include "thread.h"
42 #include "hashmap.h"
43 #include <regex.h>
44 #include <linux/bitops.h>
45 #include <linux/kernel.h>
46 #include <linux/string.h>
47 #include <linux/zalloc.h>
48 #include <subcmd/parse-options.h>
49 #include <subcmd/run-command.h>
50 
51 /* FIXME: For the HE_COLORSET */
52 #include "ui/browser.h"
53 
54 /*
55  * FIXME: Using the same values as slang.h,
56  * but that header may not be available everywhere
57  */
58 #define LARROW_CHAR	((unsigned char)',')
59 #define RARROW_CHAR	((unsigned char)'+')
60 #define DARROW_CHAR	((unsigned char)'.')
61 #define UARROW_CHAR	((unsigned char)'-')
62 
63 #include <linux/ctype.h>
64 
65 /* global annotation options */
66 struct annotation_options annotate_opts;
67 
68 /* Data type collection debug statistics */
69 struct annotated_data_stat ann_data_stat;
70 LIST_HEAD(ann_insn_stat);
71 
72 /* Pseudo data types */
73 struct annotated_data_type stackop_type = {
74 	.self = {
75 		.type_name = (char *)"(stack operation)",
76 		.children = LIST_HEAD_INIT(stackop_type.self.children),
77 	},
78 };
79 
80 struct annotated_data_type canary_type = {
81 	.self = {
82 		.type_name = (char *)"(stack canary)",
83 		.children = LIST_HEAD_INIT(canary_type.self.children),
84 	},
85 };
86 
87 /* symbol histogram: key = offset << 16 | evsel->core.idx */
88 static size_t sym_hist_hash(long key, void *ctx __maybe_unused)
89 {
90 	return (key >> 16) + (key & 0xffff);
91 }
92 
93 static bool sym_hist_equal(long key1, long key2, void *ctx __maybe_unused)
94 {
95 	return key1 == key2;
96 }
97 
98 static struct annotated_source *annotated_source__new(void)
99 {
100 	struct annotated_source *src = zalloc(sizeof(*src));
101 
102 	if (src != NULL)
103 		INIT_LIST_HEAD(&src->source);
104 
105 	return src;
106 }
107 
108 static __maybe_unused void annotated_source__delete(struct annotated_source *src)
109 {
110 	if (src == NULL)
111 		return;
112 
113 	hashmap__free(src->samples);
114 	zfree(&src->histograms);
115 	free(src);
116 }
117 
118 static int annotated_source__alloc_histograms(struct annotated_source *src,
119 					      int nr_hists)
120 {
121 	src->nr_histograms   = nr_hists;
122 	src->histograms	     = calloc(nr_hists, sizeof(*src->histograms));
123 
124 	if (src->histograms == NULL)
125 		return -1;
126 
127 	src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL);
128 	if (src->samples == NULL)
129 		zfree(&src->histograms);
130 
131 	return src->histograms ? 0 : -1;
132 }
133 
134 void symbol__annotate_zero_histograms(struct symbol *sym)
135 {
136 	struct annotation *notes = symbol__annotation(sym);
137 
138 	annotation__lock(notes);
139 	if (notes->src != NULL) {
140 		memset(notes->src->histograms, 0,
141 		       notes->src->nr_histograms * sizeof(*notes->src->histograms));
142 		hashmap__clear(notes->src->samples);
143 	}
144 	if (notes->branch && notes->branch->cycles_hist) {
145 		memset(notes->branch->cycles_hist, 0,
146 		       symbol__size(sym) * sizeof(struct cyc_hist));
147 	}
148 	annotation__unlock(notes);
149 }
150 
151 static int __symbol__account_cycles(struct cyc_hist *ch,
152 				    u64 start,
153 				    unsigned offset, unsigned cycles,
154 				    unsigned have_start)
155 {
156 	/*
157 	 * For now we can only account one basic block per
158 	 * final jump. But multiple could be overlapping.
159 	 * Always account the longest one. So when
160 	 * a shorter one has been already seen throw it away.
161 	 *
162 	 * We separately always account the full cycles.
163 	 */
164 	ch[offset].num_aggr++;
165 	ch[offset].cycles_aggr += cycles;
166 
167 	if (cycles > ch[offset].cycles_max)
168 		ch[offset].cycles_max = cycles;
169 
170 	if (ch[offset].cycles_min) {
171 		if (cycles && cycles < ch[offset].cycles_min)
172 			ch[offset].cycles_min = cycles;
173 	} else
174 		ch[offset].cycles_min = cycles;
175 
176 	if (!have_start && ch[offset].have_start)
177 		return 0;
178 	if (ch[offset].num) {
179 		if (have_start && (!ch[offset].have_start ||
180 				   ch[offset].start > start)) {
181 			ch[offset].have_start = 0;
182 			ch[offset].cycles = 0;
183 			ch[offset].num = 0;
184 			if (ch[offset].reset < 0xffff)
185 				ch[offset].reset++;
186 		} else if (have_start &&
187 			   ch[offset].start < start)
188 			return 0;
189 	}
190 
191 	if (ch[offset].num < NUM_SPARKS)
192 		ch[offset].cycles_spark[ch[offset].num] = cycles;
193 
194 	ch[offset].have_start = have_start;
195 	ch[offset].start = start;
196 	ch[offset].cycles += cycles;
197 	ch[offset].num++;
198 	return 0;
199 }
200 
201 static int __symbol__inc_addr_samples(struct map_symbol *ms,
202 				      struct annotated_source *src, int evidx, u64 addr,
203 				      struct perf_sample *sample)
204 {
205 	struct symbol *sym = ms->sym;
206 	long hash_key;
207 	u64 offset;
208 	struct sym_hist *h;
209 	struct sym_hist_entry *entry;
210 
211 	pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr));
212 
213 	if ((addr < sym->start || addr >= sym->end) &&
214 	    (addr != sym->end || sym->start != sym->end)) {
215 		pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
216 		       __func__, __LINE__, sym->name, sym->start, addr, sym->end);
217 		return -ERANGE;
218 	}
219 
220 	offset = addr - sym->start;
221 	h = annotated_source__histogram(src, evidx);
222 	if (h == NULL) {
223 		pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n",
224 			 __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC);
225 		return -ENOMEM;
226 	}
227 
228 	hash_key = offset << 16 | evidx;
229 	if (!hashmap__find(src->samples, hash_key, &entry)) {
230 		entry = zalloc(sizeof(*entry));
231 		if (entry == NULL)
232 			return -ENOMEM;
233 
234 		if (hashmap__add(src->samples, hash_key, entry) < 0)
235 			return -ENOMEM;
236 	}
237 
238 	h->nr_samples++;
239 	h->period += sample->period;
240 	entry->nr_samples++;
241 	entry->period += sample->period;
242 
243 	pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
244 		  ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n",
245 		  sym->start, sym->name, addr, addr - sym->start, evidx,
246 		  entry->nr_samples, entry->period);
247 	return 0;
248 }
249 
250 struct annotated_branch *annotation__get_branch(struct annotation *notes)
251 {
252 	if (notes == NULL)
253 		return NULL;
254 
255 	if (notes->branch == NULL)
256 		notes->branch = zalloc(sizeof(*notes->branch));
257 
258 	return notes->branch;
259 }
260 
261 static struct cyc_hist *symbol__cycles_hist(struct symbol *sym)
262 {
263 	struct annotation *notes = symbol__annotation(sym);
264 	struct annotated_branch *branch;
265 
266 	branch = annotation__get_branch(notes);
267 	if (branch == NULL)
268 		return NULL;
269 
270 	if (branch->cycles_hist == NULL) {
271 		const size_t size = symbol__size(sym);
272 
273 		branch->cycles_hist = calloc(size, sizeof(struct cyc_hist));
274 	}
275 
276 	return branch->cycles_hist;
277 }
278 
279 struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
280 {
281 	struct annotation *notes = symbol__annotation(sym);
282 
283 	if (notes->src == NULL) {
284 		notes->src = annotated_source__new();
285 		if (notes->src == NULL)
286 			return NULL;
287 		goto alloc_histograms;
288 	}
289 
290 	if (notes->src->histograms == NULL) {
291 alloc_histograms:
292 		annotated_source__alloc_histograms(notes->src, nr_hists);
293 	}
294 
295 	return notes->src;
296 }
297 
298 static int symbol__inc_addr_samples(struct map_symbol *ms,
299 				    struct evsel *evsel, u64 addr,
300 				    struct perf_sample *sample)
301 {
302 	struct symbol *sym = ms->sym;
303 	struct annotated_source *src;
304 
305 	if (sym == NULL)
306 		return 0;
307 	src = symbol__hists(sym, evsel->evlist->core.nr_entries);
308 	return src ? __symbol__inc_addr_samples(ms, src, evsel->core.idx, addr, sample) : 0;
309 }
310 
311 static int symbol__account_cycles(u64 addr, u64 start,
312 				  struct symbol *sym, unsigned cycles)
313 {
314 	struct cyc_hist *cycles_hist;
315 	unsigned offset;
316 
317 	if (sym == NULL)
318 		return 0;
319 	cycles_hist = symbol__cycles_hist(sym);
320 	if (cycles_hist == NULL)
321 		return -ENOMEM;
322 	if (addr < sym->start || addr >= sym->end)
323 		return -ERANGE;
324 
325 	if (start) {
326 		if (start < sym->start || start >= sym->end)
327 			return -ERANGE;
328 		if (start >= addr)
329 			start = 0;
330 	}
331 	offset = addr - sym->start;
332 	return __symbol__account_cycles(cycles_hist,
333 					start ? start - sym->start : 0,
334 					offset, cycles,
335 					!!start);
336 }
337 
338 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
339 				    struct addr_map_symbol *start,
340 				    unsigned cycles)
341 {
342 	u64 saddr = 0;
343 	int err;
344 
345 	if (!cycles)
346 		return 0;
347 
348 	/*
349 	 * Only set start when IPC can be computed. We can only
350 	 * compute it when the basic block is completely in a single
351 	 * function.
352 	 * Special case the case when the jump is elsewhere, but
353 	 * it starts on the function start.
354 	 */
355 	if (start &&
356 		(start->ms.sym == ams->ms.sym ||
357 		 (ams->ms.sym &&
358 		  start->addr == ams->ms.sym->start + map__start(ams->ms.map))))
359 		saddr = start->al_addr;
360 	if (saddr == 0)
361 		pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
362 			ams->addr,
363 			start ? start->addr : 0,
364 			ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0,
365 			saddr);
366 	err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles);
367 	if (err)
368 		pr_debug2("account_cycles failed %d\n", err);
369 	return err;
370 }
371 
372 struct annotation_line *annotated_source__get_line(struct annotated_source *src,
373 						   s64 offset)
374 {
375 	struct annotation_line *al;
376 
377 	list_for_each_entry(al, &src->source, node) {
378 		if (al->offset == offset)
379 			return al;
380 	}
381 	return NULL;
382 }
383 
384 static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end)
385 {
386 	struct annotation_line *al;
387 	unsigned n_insn = 0;
388 
389 	al = annotated_source__get_line(notes->src, start);
390 	if (al == NULL)
391 		return 0;
392 
393 	list_for_each_entry_from(al, &notes->src->source, node) {
394 		if (al->offset == -1)
395 			continue;
396 		if ((u64)al->offset > end)
397 			break;
398 		n_insn++;
399 	}
400 	return n_insn;
401 }
402 
403 static void annotated_branch__delete(struct annotated_branch *branch)
404 {
405 	if (branch) {
406 		zfree(&branch->cycles_hist);
407 		free(branch);
408 	}
409 }
410 
411 static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
412 {
413 	unsigned n_insn;
414 	unsigned int cover_insn = 0;
415 
416 	n_insn = annotation__count_insn(notes, start, end);
417 	if (n_insn && ch->num && ch->cycles) {
418 		struct annotation_line *al;
419 		struct annotated_branch *branch;
420 		float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
421 
422 		/* Hide data when there are too many overlaps. */
423 		if (ch->reset >= 0x7fff)
424 			return;
425 
426 		al = annotated_source__get_line(notes->src, start);
427 		if (al == NULL)
428 			return;
429 
430 		list_for_each_entry_from(al, &notes->src->source, node) {
431 			if (al->offset == -1)
432 				continue;
433 			if ((u64)al->offset > end)
434 				break;
435 			if (al->cycles && al->cycles->ipc == 0.0) {
436 				al->cycles->ipc = ipc;
437 				cover_insn++;
438 			}
439 		}
440 
441 		branch = annotation__get_branch(notes);
442 		if (cover_insn && branch) {
443 			branch->hit_cycles += ch->cycles;
444 			branch->hit_insn += n_insn * ch->num;
445 			branch->cover_insn += cover_insn;
446 		}
447 	}
448 }
449 
450 static int annotation__compute_ipc(struct annotation *notes, size_t size)
451 {
452 	int err = 0;
453 	s64 offset;
454 
455 	if (!notes->branch || !notes->branch->cycles_hist)
456 		return 0;
457 
458 	notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1);
459 	notes->branch->hit_cycles = 0;
460 	notes->branch->hit_insn = 0;
461 	notes->branch->cover_insn = 0;
462 
463 	annotation__lock(notes);
464 	for (offset = size - 1; offset >= 0; --offset) {
465 		struct cyc_hist *ch;
466 
467 		ch = &notes->branch->cycles_hist[offset];
468 		if (ch && ch->cycles) {
469 			struct annotation_line *al;
470 
471 			al = annotated_source__get_line(notes->src, offset);
472 			if (al && al->cycles == NULL) {
473 				al->cycles = zalloc(sizeof(*al->cycles));
474 				if (al->cycles == NULL) {
475 					err = ENOMEM;
476 					break;
477 				}
478 			}
479 			if (ch->have_start)
480 				annotation__count_and_fill(notes, ch->start, offset, ch);
481 			if (al && ch->num_aggr) {
482 				al->cycles->avg = ch->cycles_aggr / ch->num_aggr;
483 				al->cycles->max = ch->cycles_max;
484 				al->cycles->min = ch->cycles_min;
485 			}
486 		}
487 	}
488 
489 	if (err) {
490 		while (++offset < (s64)size) {
491 			struct cyc_hist *ch = &notes->branch->cycles_hist[offset];
492 
493 			if (ch && ch->cycles) {
494 				struct annotation_line *al;
495 
496 				al = annotated_source__get_line(notes->src, offset);
497 				if (al)
498 					zfree(&al->cycles);
499 			}
500 		}
501 	}
502 
503 	annotation__unlock(notes);
504 	return 0;
505 }
506 
507 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
508 				 struct evsel *evsel)
509 {
510 	return symbol__inc_addr_samples(&ams->ms, evsel, ams->al_addr, sample);
511 }
512 
513 int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
514 				 struct evsel *evsel, u64 ip)
515 {
516 	return symbol__inc_addr_samples(&he->ms, evsel, ip, sample);
517 }
518 
519 
520 void annotation__exit(struct annotation *notes)
521 {
522 	annotated_source__delete(notes->src);
523 	annotated_branch__delete(notes->branch);
524 }
525 
526 static struct sharded_mutex *sharded_mutex;
527 
528 static void annotation__init_sharded_mutex(void)
529 {
530 	/* As many mutexes as there are CPUs. */
531 	sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu);
532 }
533 
534 static size_t annotation__hash(const struct annotation *notes)
535 {
536 	return (size_t)notes;
537 }
538 
539 static struct mutex *annotation__get_mutex(const struct annotation *notes)
540 {
541 	static pthread_once_t once = PTHREAD_ONCE_INIT;
542 
543 	pthread_once(&once, annotation__init_sharded_mutex);
544 	if (!sharded_mutex)
545 		return NULL;
546 
547 	return sharded_mutex__get_mutex(sharded_mutex, annotation__hash(notes));
548 }
549 
550 void annotation__lock(struct annotation *notes)
551 	NO_THREAD_SAFETY_ANALYSIS
552 {
553 	struct mutex *mutex = annotation__get_mutex(notes);
554 
555 	if (mutex)
556 		mutex_lock(mutex);
557 }
558 
559 void annotation__unlock(struct annotation *notes)
560 	NO_THREAD_SAFETY_ANALYSIS
561 {
562 	struct mutex *mutex = annotation__get_mutex(notes);
563 
564 	if (mutex)
565 		mutex_unlock(mutex);
566 }
567 
568 bool annotation__trylock(struct annotation *notes)
569 {
570 	struct mutex *mutex = annotation__get_mutex(notes);
571 
572 	if (!mutex)
573 		return false;
574 
575 	return mutex_trylock(mutex);
576 }
577 
578 void annotation_line__add(struct annotation_line *al, struct list_head *head)
579 {
580 	list_add_tail(&al->node, head);
581 }
582 
583 struct annotation_line *
584 annotation_line__next(struct annotation_line *pos, struct list_head *head)
585 {
586 	list_for_each_entry_continue(pos, head, node)
587 		if (pos->offset >= 0)
588 			return pos;
589 
590 	return NULL;
591 }
592 
593 static const char *annotate__address_color(struct block_range *br)
594 {
595 	double cov = block_range__coverage(br);
596 
597 	if (cov >= 0) {
598 		/* mark red for >75% coverage */
599 		if (cov > 0.75)
600 			return PERF_COLOR_RED;
601 
602 		/* mark dull for <1% coverage */
603 		if (cov < 0.01)
604 			return PERF_COLOR_NORMAL;
605 	}
606 
607 	return PERF_COLOR_MAGENTA;
608 }
609 
610 static const char *annotate__asm_color(struct block_range *br)
611 {
612 	double cov = block_range__coverage(br);
613 
614 	if (cov >= 0) {
615 		/* mark dull for <1% coverage */
616 		if (cov < 0.01)
617 			return PERF_COLOR_NORMAL;
618 	}
619 
620 	return PERF_COLOR_BLUE;
621 }
622 
623 static void annotate__branch_printf(struct block_range *br, u64 addr)
624 {
625 	bool emit_comment = true;
626 
627 	if (!br)
628 		return;
629 
630 #if 1
631 	if (br->is_target && br->start == addr) {
632 		struct block_range *branch = br;
633 		double p;
634 
635 		/*
636 		 * Find matching branch to our target.
637 		 */
638 		while (!branch->is_branch)
639 			branch = block_range__next(branch);
640 
641 		p = 100 *(double)br->entry / branch->coverage;
642 
643 		if (p > 0.1) {
644 			if (emit_comment) {
645 				emit_comment = false;
646 				printf("\t#");
647 			}
648 
649 			/*
650 			 * The percentage of coverage joined at this target in relation
651 			 * to the next branch.
652 			 */
653 			printf(" +%.2f%%", p);
654 		}
655 	}
656 #endif
657 	if (br->is_branch && br->end == addr) {
658 		double p = 100*(double)br->taken / br->coverage;
659 
660 		if (p > 0.1) {
661 			if (emit_comment) {
662 				emit_comment = false;
663 				printf("\t#");
664 			}
665 
666 			/*
667 			 * The percentage of coverage leaving at this branch, and
668 			 * its prediction ratio.
669 			 */
670 			printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred  / br->taken);
671 		}
672 	}
673 }
674 
675 static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width)
676 {
677 	s64 offset = dl->al.offset;
678 	const u64 addr = start + offset;
679 	struct block_range *br;
680 
681 	br = block_range__find(addr);
682 	color_fprintf(stdout, annotate__address_color(br), "  %*" PRIx64 ":", addr_fmt_width, addr);
683 	color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line);
684 	annotate__branch_printf(br, addr);
685 	return 0;
686 }
687 
688 static int
689 annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start,
690 		       struct evsel *evsel, u64 len, int min_pcnt, int printed,
691 		       int max_lines, struct annotation_line *queue, int addr_fmt_width,
692 		       int percent_type)
693 {
694 	struct disasm_line *dl = container_of(al, struct disasm_line, al);
695 	static const char *prev_line;
696 
697 	if (al->offset != -1) {
698 		double max_percent = 0.0;
699 		int i, nr_percent = 1;
700 		const char *color;
701 		struct annotation *notes = symbol__annotation(sym);
702 
703 		for (i = 0; i < al->data_nr; i++) {
704 			double percent;
705 
706 			percent = annotation_data__percent(&al->data[i],
707 							   percent_type);
708 
709 			if (percent > max_percent)
710 				max_percent = percent;
711 		}
712 
713 		if (al->data_nr > nr_percent)
714 			nr_percent = al->data_nr;
715 
716 		if (max_percent < min_pcnt)
717 			return -1;
718 
719 		if (max_lines && printed >= max_lines)
720 			return 1;
721 
722 		if (queue != NULL) {
723 			list_for_each_entry_from(queue, &notes->src->source, node) {
724 				if (queue == al)
725 					break;
726 				annotation_line__print(queue, sym, start, evsel, len,
727 						       0, 0, 1, NULL, addr_fmt_width,
728 						       percent_type);
729 			}
730 		}
731 
732 		color = get_percent_color(max_percent);
733 
734 		for (i = 0; i < nr_percent; i++) {
735 			struct annotation_data *data = &al->data[i];
736 			double percent;
737 
738 			percent = annotation_data__percent(data, percent_type);
739 			color = get_percent_color(percent);
740 
741 			if (symbol_conf.show_total_period)
742 				color_fprintf(stdout, color, " %11" PRIu64,
743 					      data->he.period);
744 			else if (symbol_conf.show_nr_samples)
745 				color_fprintf(stdout, color, " %7" PRIu64,
746 					      data->he.nr_samples);
747 			else
748 				color_fprintf(stdout, color, " %7.2f", percent);
749 		}
750 
751 		printf(" : ");
752 
753 		disasm_line__print(dl, start, addr_fmt_width);
754 
755 		/*
756 		 * Also color the filename and line if needed, with
757 		 * the same color than the percentage. Don't print it
758 		 * twice for close colored addr with the same filename:line
759 		 */
760 		if (al->path) {
761 			if (!prev_line || strcmp(prev_line, al->path)) {
762 				color_fprintf(stdout, color, " // %s", al->path);
763 				prev_line = al->path;
764 			}
765 		}
766 
767 		printf("\n");
768 	} else if (max_lines && printed >= max_lines)
769 		return 1;
770 	else {
771 		int width = symbol_conf.show_total_period ? 12 : 8;
772 
773 		if (queue)
774 			return -1;
775 
776 		if (evsel__is_group_event(evsel))
777 			width *= evsel->core.nr_members;
778 
779 		if (!*al->line)
780 			printf(" %*s:\n", width, " ");
781 		else
782 			printf(" %*s: %-*d %s\n", width, " ", addr_fmt_width, al->line_nr, al->line);
783 	}
784 
785 	return 0;
786 }
787 
788 static void calc_percent(struct annotation *notes,
789 			 struct evsel *evsel,
790 			 struct annotation_data *data,
791 			 s64 offset, s64 end)
792 {
793 	struct hists *hists = evsel__hists(evsel);
794 	int evidx = evsel->core.idx;
795 	struct sym_hist *sym_hist = annotation__histogram(notes, evidx);
796 	unsigned int hits = 0;
797 	u64 period = 0;
798 
799 	while (offset < end) {
800 		struct sym_hist_entry *entry;
801 
802 		entry = annotated_source__hist_entry(notes->src, evidx, offset);
803 		if (entry) {
804 			hits   += entry->nr_samples;
805 			period += entry->period;
806 		}
807 		++offset;
808 	}
809 
810 	if (sym_hist->nr_samples) {
811 		data->he.period     = period;
812 		data->he.nr_samples = hits;
813 		data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples;
814 	}
815 
816 	if (hists->stats.nr_non_filtered_samples)
817 		data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples;
818 
819 	if (sym_hist->period)
820 		data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period;
821 
822 	if (hists->stats.total_period)
823 		data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period;
824 }
825 
826 static void annotation__calc_percent(struct annotation *notes,
827 				     struct evsel *leader, s64 len)
828 {
829 	struct annotation_line *al, *next;
830 	struct evsel *evsel;
831 
832 	list_for_each_entry(al, &notes->src->source, node) {
833 		s64 end;
834 		int i = 0;
835 
836 		if (al->offset == -1)
837 			continue;
838 
839 		next = annotation_line__next(al, &notes->src->source);
840 		end  = next ? next->offset : len;
841 
842 		for_each_group_evsel(evsel, leader) {
843 			struct annotation_data *data;
844 
845 			BUG_ON(i >= al->data_nr);
846 
847 			data = &al->data[i++];
848 
849 			calc_percent(notes, evsel, data, al->offset, end);
850 		}
851 	}
852 }
853 
854 void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
855 {
856 	struct annotation *notes = symbol__annotation(sym);
857 
858 	annotation__calc_percent(notes, evsel, symbol__size(sym));
859 }
860 
861 static int evsel__get_arch(struct evsel *evsel, struct arch **parch)
862 {
863 	struct perf_env *env = evsel__env(evsel);
864 	const char *arch_name = perf_env__arch(env);
865 	struct arch *arch;
866 	int err;
867 
868 	if (!arch_name) {
869 		*parch = NULL;
870 		return errno;
871 	}
872 
873 	*parch = arch = arch__find(arch_name);
874 	if (arch == NULL) {
875 		pr_err("%s: unsupported arch %s\n", __func__, arch_name);
876 		return ENOTSUP;
877 	}
878 
879 	if (arch->init) {
880 		err = arch->init(arch, env ? env->cpuid : NULL);
881 		if (err) {
882 			pr_err("%s: failed to initialize %s arch priv area\n",
883 			       __func__, arch->name);
884 			return err;
885 		}
886 	}
887 	return 0;
888 }
889 
890 int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
891 		     struct arch **parch)
892 {
893 	struct symbol *sym = ms->sym;
894 	struct annotation *notes = symbol__annotation(sym);
895 	struct annotate_args args = {
896 		.evsel		= evsel,
897 		.options	= &annotate_opts,
898 	};
899 	struct arch *arch = NULL;
900 	int err;
901 
902 	err = evsel__get_arch(evsel, &arch);
903 	if (err < 0)
904 		return err;
905 
906 	if (parch)
907 		*parch = arch;
908 
909 	if (!list_empty(&notes->src->source))
910 		return 0;
911 
912 	args.arch = arch;
913 	args.ms = *ms;
914 
915 	if (notes->src == NULL) {
916 		notes->src = annotated_source__new();
917 		if (notes->src == NULL)
918 			return -1;
919 	}
920 
921 	if (annotate_opts.full_addr)
922 		notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
923 	else
924 		notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
925 
926 	return symbol__disassemble(sym, &args);
927 }
928 
929 static void insert_source_line(struct rb_root *root, struct annotation_line *al)
930 {
931 	struct annotation_line *iter;
932 	struct rb_node **p = &root->rb_node;
933 	struct rb_node *parent = NULL;
934 	unsigned int percent_type = annotate_opts.percent_type;
935 	int i, ret;
936 
937 	while (*p != NULL) {
938 		parent = *p;
939 		iter = rb_entry(parent, struct annotation_line, rb_node);
940 
941 		ret = strcmp(iter->path, al->path);
942 		if (ret == 0) {
943 			for (i = 0; i < al->data_nr; i++) {
944 				iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
945 										      percent_type);
946 			}
947 			return;
948 		}
949 
950 		if (ret < 0)
951 			p = &(*p)->rb_left;
952 		else
953 			p = &(*p)->rb_right;
954 	}
955 
956 	for (i = 0; i < al->data_nr; i++) {
957 		al->data[i].percent_sum = annotation_data__percent(&al->data[i],
958 								   percent_type);
959 	}
960 
961 	rb_link_node(&al->rb_node, parent, p);
962 	rb_insert_color(&al->rb_node, root);
963 }
964 
965 static int cmp_source_line(struct annotation_line *a, struct annotation_line *b)
966 {
967 	int i;
968 
969 	for (i = 0; i < a->data_nr; i++) {
970 		if (a->data[i].percent_sum == b->data[i].percent_sum)
971 			continue;
972 		return a->data[i].percent_sum > b->data[i].percent_sum;
973 	}
974 
975 	return 0;
976 }
977 
978 static void __resort_source_line(struct rb_root *root, struct annotation_line *al)
979 {
980 	struct annotation_line *iter;
981 	struct rb_node **p = &root->rb_node;
982 	struct rb_node *parent = NULL;
983 
984 	while (*p != NULL) {
985 		parent = *p;
986 		iter = rb_entry(parent, struct annotation_line, rb_node);
987 
988 		if (cmp_source_line(al, iter))
989 			p = &(*p)->rb_left;
990 		else
991 			p = &(*p)->rb_right;
992 	}
993 
994 	rb_link_node(&al->rb_node, parent, p);
995 	rb_insert_color(&al->rb_node, root);
996 }
997 
998 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
999 {
1000 	struct annotation_line *al;
1001 	struct rb_node *node;
1002 
1003 	node = rb_first(src_root);
1004 	while (node) {
1005 		struct rb_node *next;
1006 
1007 		al = rb_entry(node, struct annotation_line, rb_node);
1008 		next = rb_next(node);
1009 		rb_erase(node, src_root);
1010 
1011 		__resort_source_line(dest_root, al);
1012 		node = next;
1013 	}
1014 }
1015 
1016 static void print_summary(struct rb_root *root, const char *filename)
1017 {
1018 	struct annotation_line *al;
1019 	struct rb_node *node;
1020 
1021 	printf("\nSorted summary for file %s\n", filename);
1022 	printf("----------------------------------------------\n\n");
1023 
1024 	if (RB_EMPTY_ROOT(root)) {
1025 		printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
1026 		return;
1027 	}
1028 
1029 	node = rb_first(root);
1030 	while (node) {
1031 		double percent, percent_max = 0.0;
1032 		const char *color;
1033 		char *path;
1034 		int i;
1035 
1036 		al = rb_entry(node, struct annotation_line, rb_node);
1037 		for (i = 0; i < al->data_nr; i++) {
1038 			percent = al->data[i].percent_sum;
1039 			color = get_percent_color(percent);
1040 			color_fprintf(stdout, color, " %7.2f", percent);
1041 
1042 			if (percent > percent_max)
1043 				percent_max = percent;
1044 		}
1045 
1046 		path = al->path;
1047 		color = get_percent_color(percent_max);
1048 		color_fprintf(stdout, color, " %s\n", path);
1049 
1050 		node = rb_next(node);
1051 	}
1052 }
1053 
1054 static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel)
1055 {
1056 	int evidx = evsel->core.idx;
1057 	struct annotation *notes = symbol__annotation(sym);
1058 	struct sym_hist *h = annotation__histogram(notes, evidx);
1059 	u64 len = symbol__size(sym), offset;
1060 
1061 	for (offset = 0; offset < len; ++offset) {
1062 		struct sym_hist_entry *entry;
1063 
1064 		entry = annotated_source__hist_entry(notes->src, evidx, offset);
1065 		if (entry && entry->nr_samples != 0)
1066 			printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
1067 			       sym->start + offset, entry->nr_samples);
1068 	}
1069 	printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
1070 }
1071 
1072 static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
1073 {
1074 	char bf[32];
1075 	struct annotation_line *line;
1076 
1077 	list_for_each_entry_reverse(line, lines, node) {
1078 		if (line->offset != -1)
1079 			return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset);
1080 	}
1081 
1082 	return 0;
1083 }
1084 
1085 int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel)
1086 {
1087 	struct map *map = ms->map;
1088 	struct symbol *sym = ms->sym;
1089 	struct dso *dso = map__dso(map);
1090 	char *filename;
1091 	const char *d_filename;
1092 	const char *evsel_name = evsel__name(evsel);
1093 	struct annotation *notes = symbol__annotation(sym);
1094 	struct sym_hist *h = annotation__histogram(notes, evsel->core.idx);
1095 	struct annotation_line *pos, *queue = NULL;
1096 	struct annotation_options *opts = &annotate_opts;
1097 	u64 start = map__rip_2objdump(map, sym->start);
1098 	int printed = 2, queue_len = 0, addr_fmt_width;
1099 	int more = 0;
1100 	bool context = opts->context;
1101 	u64 len;
1102 	int width = symbol_conf.show_total_period ? 12 : 8;
1103 	int graph_dotted_len;
1104 	char buf[512];
1105 
1106 	filename = strdup(dso->long_name);
1107 	if (!filename)
1108 		return -ENOMEM;
1109 
1110 	if (opts->full_path)
1111 		d_filename = filename;
1112 	else
1113 		d_filename = basename(filename);
1114 
1115 	len = symbol__size(sym);
1116 
1117 	if (evsel__is_group_event(evsel)) {
1118 		width *= evsel->core.nr_members;
1119 		evsel__group_desc(evsel, buf, sizeof(buf));
1120 		evsel_name = buf;
1121 	}
1122 
1123 	graph_dotted_len = printf(" %-*.*s|	Source code & Disassembly of %s for %s (%" PRIu64 " samples, "
1124 				  "percent: %s)\n",
1125 				  width, width, symbol_conf.show_total_period ? "Period" :
1126 				  symbol_conf.show_nr_samples ? "Samples" : "Percent",
1127 				  d_filename, evsel_name, h->nr_samples,
1128 				  percent_type_str(opts->percent_type));
1129 
1130 	printf("%-*.*s----\n",
1131 	       graph_dotted_len, graph_dotted_len, graph_dotted_line);
1132 
1133 	if (verbose > 0)
1134 		symbol__annotate_hits(sym, evsel);
1135 
1136 	addr_fmt_width = annotated_source__addr_fmt_width(&notes->src->source, start);
1137 
1138 	list_for_each_entry(pos, &notes->src->source, node) {
1139 		int err;
1140 
1141 		if (context && queue == NULL) {
1142 			queue = pos;
1143 			queue_len = 0;
1144 		}
1145 
1146 		err = annotation_line__print(pos, sym, start, evsel, len,
1147 					     opts->min_pcnt, printed, opts->max_lines,
1148 					     queue, addr_fmt_width, opts->percent_type);
1149 
1150 		switch (err) {
1151 		case 0:
1152 			++printed;
1153 			if (context) {
1154 				printed += queue_len;
1155 				queue = NULL;
1156 				queue_len = 0;
1157 			}
1158 			break;
1159 		case 1:
1160 			/* filtered by max_lines */
1161 			++more;
1162 			break;
1163 		case -1:
1164 		default:
1165 			/*
1166 			 * Filtered by min_pcnt or non IP lines when
1167 			 * context != 0
1168 			 */
1169 			if (!context)
1170 				break;
1171 			if (queue_len == context)
1172 				queue = list_entry(queue->node.next, typeof(*queue), node);
1173 			else
1174 				++queue_len;
1175 			break;
1176 		}
1177 	}
1178 
1179 	free(filename);
1180 
1181 	return more;
1182 }
1183 
1184 static void FILE__set_percent_color(void *fp __maybe_unused,
1185 				    double percent __maybe_unused,
1186 				    bool current __maybe_unused)
1187 {
1188 }
1189 
1190 static int FILE__set_jumps_percent_color(void *fp __maybe_unused,
1191 					 int nr __maybe_unused, bool current __maybe_unused)
1192 {
1193 	return 0;
1194 }
1195 
1196 static int FILE__set_color(void *fp __maybe_unused, int color __maybe_unused)
1197 {
1198 	return 0;
1199 }
1200 
1201 static void FILE__printf(void *fp, const char *fmt, ...)
1202 {
1203 	va_list args;
1204 
1205 	va_start(args, fmt);
1206 	vfprintf(fp, fmt, args);
1207 	va_end(args);
1208 }
1209 
1210 static void FILE__write_graph(void *fp, int graph)
1211 {
1212 	const char *s;
1213 	switch (graph) {
1214 
1215 	case DARROW_CHAR: s = "↓"; break;
1216 	case UARROW_CHAR: s = "↑"; break;
1217 	case LARROW_CHAR: s = "←"; break;
1218 	case RARROW_CHAR: s = "→"; break;
1219 	default:		s = "?"; break;
1220 	}
1221 
1222 	fputs(s, fp);
1223 }
1224 
1225 static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp)
1226 {
1227 	struct annotation *notes = symbol__annotation(sym);
1228 	struct annotation_write_ops wops = {
1229 		.first_line		 = true,
1230 		.obj			 = fp,
1231 		.set_color		 = FILE__set_color,
1232 		.set_percent_color	 = FILE__set_percent_color,
1233 		.set_jumps_percent_color = FILE__set_jumps_percent_color,
1234 		.printf			 = FILE__printf,
1235 		.write_graph		 = FILE__write_graph,
1236 	};
1237 	struct annotation_line *al;
1238 
1239 	list_for_each_entry(al, &notes->src->source, node) {
1240 		if (annotation_line__filter(al))
1241 			continue;
1242 		annotation_line__write(al, notes, &wops);
1243 		fputc('\n', fp);
1244 		wops.first_line = false;
1245 	}
1246 
1247 	return 0;
1248 }
1249 
1250 int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel)
1251 {
1252 	const char *ev_name = evsel__name(evsel);
1253 	char buf[1024];
1254 	char *filename;
1255 	int err = -1;
1256 	FILE *fp;
1257 
1258 	if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0)
1259 		return -1;
1260 
1261 	fp = fopen(filename, "w");
1262 	if (fp == NULL)
1263 		goto out_free_filename;
1264 
1265 	if (evsel__is_group_event(evsel)) {
1266 		evsel__group_desc(evsel, buf, sizeof(buf));
1267 		ev_name = buf;
1268 	}
1269 
1270 	fprintf(fp, "%s() %s\nEvent: %s\n\n",
1271 		ms->sym->name, map__dso(ms->map)->long_name, ev_name);
1272 	symbol__annotate_fprintf2(ms->sym, fp);
1273 
1274 	fclose(fp);
1275 	err = 0;
1276 out_free_filename:
1277 	free(filename);
1278 	return err;
1279 }
1280 
1281 void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
1282 {
1283 	struct annotation *notes = symbol__annotation(sym);
1284 	struct sym_hist *h = annotation__histogram(notes, evidx);
1285 
1286 	memset(h, 0, sizeof(*notes->src->histograms) * notes->src->nr_histograms);
1287 }
1288 
1289 void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
1290 {
1291 	struct annotation *notes = symbol__annotation(sym);
1292 	struct sym_hist *h = annotation__histogram(notes, evidx);
1293 	struct annotation_line *al;
1294 
1295 	h->nr_samples = 0;
1296 	list_for_each_entry(al, &notes->src->source, node) {
1297 		struct sym_hist_entry *entry;
1298 
1299 		if (al->offset == -1)
1300 			continue;
1301 
1302 		entry = annotated_source__hist_entry(notes->src, evidx, al->offset);
1303 		if (entry == NULL)
1304 			continue;
1305 
1306 		entry->nr_samples = entry->nr_samples * 7 / 8;
1307 		h->nr_samples += entry->nr_samples;
1308 	}
1309 }
1310 
1311 void annotated_source__purge(struct annotated_source *as)
1312 {
1313 	struct annotation_line *al, *n;
1314 
1315 	list_for_each_entry_safe(al, n, &as->source, node) {
1316 		list_del_init(&al->node);
1317 		disasm_line__free(disasm_line(al));
1318 	}
1319 }
1320 
1321 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
1322 {
1323 	size_t printed;
1324 
1325 	if (dl->al.offset == -1)
1326 		return fprintf(fp, "%s\n", dl->al.line);
1327 
1328 	printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name);
1329 
1330 	if (dl->ops.raw[0] != '\0') {
1331 		printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
1332 				   dl->ops.raw);
1333 	}
1334 
1335 	return printed + fprintf(fp, "\n");
1336 }
1337 
1338 size_t disasm__fprintf(struct list_head *head, FILE *fp)
1339 {
1340 	struct disasm_line *pos;
1341 	size_t printed = 0;
1342 
1343 	list_for_each_entry(pos, head, al.node)
1344 		printed += disasm_line__fprintf(pos, fp);
1345 
1346 	return printed;
1347 }
1348 
1349 bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym)
1350 {
1351 	if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins) ||
1352 	    !disasm_line__has_local_offset(dl) || dl->ops.target.offset < 0 ||
1353 	    dl->ops.target.offset >= (s64)symbol__size(sym))
1354 		return false;
1355 
1356 	return true;
1357 }
1358 
1359 static void
1360 annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
1361 {
1362 	struct annotation_line *al;
1363 
1364 	/* PLT symbols contain external offsets */
1365 	if (strstr(sym->name, "@plt"))
1366 		return;
1367 
1368 	list_for_each_entry(al, &notes->src->source, node) {
1369 		struct disasm_line *dl;
1370 		struct annotation_line *target;
1371 
1372 		dl = disasm_line(al);
1373 
1374 		if (!disasm_line__is_valid_local_jump(dl, sym))
1375 			continue;
1376 
1377 		target = annotated_source__get_line(notes->src,
1378 						    dl->ops.target.offset);
1379 		/*
1380 		 * FIXME: Oops, no jump target? Buggy disassembler? Or do we
1381 		 * have to adjust to the previous offset?
1382 		 */
1383 		if (target == NULL)
1384 			continue;
1385 
1386 		if (++target->jump_sources > notes->src->max_jump_sources)
1387 			notes->src->max_jump_sources = target->jump_sources;
1388 	}
1389 }
1390 
1391 static void annotation__set_index(struct annotation *notes)
1392 {
1393 	struct annotation_line *al;
1394 	struct annotated_source *src = notes->src;
1395 
1396 	src->widths.max_line_len = 0;
1397 	src->nr_entries = 0;
1398 	src->nr_asm_entries = 0;
1399 
1400 	list_for_each_entry(al, &src->source, node) {
1401 		size_t line_len = strlen(al->line);
1402 
1403 		if (src->widths.max_line_len < line_len)
1404 			src->widths.max_line_len = line_len;
1405 		al->idx = src->nr_entries++;
1406 		if (al->offset != -1)
1407 			al->idx_asm = src->nr_asm_entries++;
1408 		else
1409 			al->idx_asm = -1;
1410 	}
1411 }
1412 
1413 static inline int width_jumps(int n)
1414 {
1415 	if (n >= 100)
1416 		return 5;
1417 	if (n / 10)
1418 		return 2;
1419 	return 1;
1420 }
1421 
1422 static int annotation__max_ins_name(struct annotation *notes)
1423 {
1424 	int max_name = 0, len;
1425 	struct annotation_line *al;
1426 
1427         list_for_each_entry(al, &notes->src->source, node) {
1428 		if (al->offset == -1)
1429 			continue;
1430 
1431 		len = strlen(disasm_line(al)->ins.name);
1432 		if (max_name < len)
1433 			max_name = len;
1434 	}
1435 
1436 	return max_name;
1437 }
1438 
1439 static void
1440 annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
1441 {
1442 	notes->src->widths.addr = notes->src->widths.target =
1443 		notes->src->widths.min_addr = hex_width(symbol__size(sym));
1444 	notes->src->widths.max_addr = hex_width(sym->end);
1445 	notes->src->widths.jumps = width_jumps(notes->src->max_jump_sources);
1446 	notes->src->widths.max_ins_name = annotation__max_ins_name(notes);
1447 }
1448 
1449 void annotation__update_column_widths(struct annotation *notes)
1450 {
1451 	if (annotate_opts.use_offset)
1452 		notes->src->widths.target = notes->src->widths.min_addr;
1453 	else if (annotate_opts.full_addr)
1454 		notes->src->widths.target = BITS_PER_LONG / 4;
1455 	else
1456 		notes->src->widths.target = notes->src->widths.max_addr;
1457 
1458 	notes->src->widths.addr = notes->src->widths.target;
1459 
1460 	if (annotate_opts.show_nr_jumps)
1461 		notes->src->widths.addr += notes->src->widths.jumps + 1;
1462 }
1463 
1464 void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms)
1465 {
1466 	annotate_opts.full_addr = !annotate_opts.full_addr;
1467 
1468 	if (annotate_opts.full_addr)
1469 		notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
1470 	else
1471 		notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
1472 
1473 	annotation__update_column_widths(notes);
1474 }
1475 
1476 static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms,
1477 				   struct rb_root *root)
1478 {
1479 	struct annotation_line *al;
1480 	struct rb_root tmp_root = RB_ROOT;
1481 
1482 	list_for_each_entry(al, &notes->src->source, node) {
1483 		double percent_max = 0.0;
1484 		u64 addr;
1485 		int i;
1486 
1487 		for (i = 0; i < al->data_nr; i++) {
1488 			double percent;
1489 
1490 			percent = annotation_data__percent(&al->data[i],
1491 							   annotate_opts.percent_type);
1492 
1493 			if (percent > percent_max)
1494 				percent_max = percent;
1495 		}
1496 
1497 		if (percent_max <= 0.5)
1498 			continue;
1499 
1500 		addr = map__rip_2objdump(ms->map, ms->sym->start);
1501 		al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL,
1502 				       false, true, ms->sym->start + al->offset);
1503 		insert_source_line(&tmp_root, al);
1504 	}
1505 
1506 	resort_source_line(root, &tmp_root);
1507 }
1508 
1509 static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root)
1510 {
1511 	struct annotation *notes = symbol__annotation(ms->sym);
1512 
1513 	annotation__calc_lines(notes, ms, root);
1514 }
1515 
1516 int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel)
1517 {
1518 	struct dso *dso = map__dso(ms->map);
1519 	struct symbol *sym = ms->sym;
1520 	struct rb_root source_line = RB_ROOT;
1521 	struct hists *hists = evsel__hists(evsel);
1522 	char buf[1024];
1523 	int err;
1524 
1525 	err = symbol__annotate2(ms, evsel, NULL);
1526 	if (err) {
1527 		char msg[BUFSIZ];
1528 
1529 		dso->annotate_warned = true;
1530 		symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
1531 		ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
1532 		return -1;
1533 	}
1534 
1535 	if (annotate_opts.print_lines) {
1536 		srcline_full_filename = annotate_opts.full_path;
1537 		symbol__calc_lines(ms, &source_line);
1538 		print_summary(&source_line, dso->long_name);
1539 	}
1540 
1541 	hists__scnprintf_title(hists, buf, sizeof(buf));
1542 	fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
1543 		buf, percent_type_str(annotate_opts.percent_type), sym->name,
1544 		dso->long_name);
1545 	symbol__annotate_fprintf2(sym, stdout);
1546 
1547 	annotated_source__purge(symbol__annotation(sym)->src);
1548 
1549 	return 0;
1550 }
1551 
1552 int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel)
1553 {
1554 	struct dso *dso = map__dso(ms->map);
1555 	struct symbol *sym = ms->sym;
1556 	struct rb_root source_line = RB_ROOT;
1557 	int err;
1558 
1559 	err = symbol__annotate(ms, evsel, NULL);
1560 	if (err) {
1561 		char msg[BUFSIZ];
1562 
1563 		dso->annotate_warned = true;
1564 		symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
1565 		ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
1566 		return -1;
1567 	}
1568 
1569 	symbol__calc_percent(sym, evsel);
1570 
1571 	if (annotate_opts.print_lines) {
1572 		srcline_full_filename = annotate_opts.full_path;
1573 		symbol__calc_lines(ms, &source_line);
1574 		print_summary(&source_line, dso->long_name);
1575 	}
1576 
1577 	symbol__annotate_printf(ms, evsel);
1578 
1579 	annotated_source__purge(symbol__annotation(sym)->src);
1580 
1581 	return 0;
1582 }
1583 
1584 bool ui__has_annotation(void)
1585 {
1586 	return use_browser == 1 && perf_hpp_list.sym;
1587 }
1588 
1589 
1590 static double annotation_line__max_percent(struct annotation_line *al,
1591 					   struct annotation *notes,
1592 					   unsigned int percent_type)
1593 {
1594 	double percent_max = 0.0;
1595 	int i;
1596 
1597 	for (i = 0; i < notes->src->nr_events; i++) {
1598 		double percent;
1599 
1600 		percent = annotation_data__percent(&al->data[i],
1601 						   percent_type);
1602 
1603 		if (percent > percent_max)
1604 			percent_max = percent;
1605 	}
1606 
1607 	return percent_max;
1608 }
1609 
1610 static void disasm_line__write(struct disasm_line *dl, struct annotation *notes,
1611 			       void *obj, char *bf, size_t size,
1612 			       void (*obj__printf)(void *obj, const char *fmt, ...),
1613 			       void (*obj__write_graph)(void *obj, int graph))
1614 {
1615 	if (dl->ins.ops && dl->ins.ops->scnprintf) {
1616 		if (ins__is_jump(&dl->ins)) {
1617 			bool fwd;
1618 
1619 			if (dl->ops.target.outside)
1620 				goto call_like;
1621 			fwd = dl->ops.target.offset > dl->al.offset;
1622 			obj__write_graph(obj, fwd ? DARROW_CHAR : UARROW_CHAR);
1623 			obj__printf(obj, " ");
1624 		} else if (ins__is_call(&dl->ins)) {
1625 call_like:
1626 			obj__write_graph(obj, RARROW_CHAR);
1627 			obj__printf(obj, " ");
1628 		} else if (ins__is_ret(&dl->ins)) {
1629 			obj__write_graph(obj, LARROW_CHAR);
1630 			obj__printf(obj, " ");
1631 		} else {
1632 			obj__printf(obj, "  ");
1633 		}
1634 	} else {
1635 		obj__printf(obj, "  ");
1636 	}
1637 
1638 	disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset,
1639 			       notes->src->widths.max_ins_name);
1640 }
1641 
1642 static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
1643 {
1644 	double ipc = 0.0, coverage = 0.0;
1645 	struct annotated_branch *branch = annotation__get_branch(notes);
1646 
1647 	if (branch && branch->hit_cycles)
1648 		ipc = branch->hit_insn / ((double)branch->hit_cycles);
1649 
1650 	if (branch && branch->total_insn) {
1651 		coverage = branch->cover_insn * 100.0 /
1652 			((double)branch->total_insn);
1653 	}
1654 
1655 	scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
1656 		  ipc, coverage);
1657 }
1658 
1659 static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
1660 				     bool first_line, bool current_entry, bool change_color, int width,
1661 				     void *obj, unsigned int percent_type,
1662 				     int  (*obj__set_color)(void *obj, int color),
1663 				     void (*obj__set_percent_color)(void *obj, double percent, bool current),
1664 				     int  (*obj__set_jumps_percent_color)(void *obj, int nr, bool current),
1665 				     void (*obj__printf)(void *obj, const char *fmt, ...),
1666 				     void (*obj__write_graph)(void *obj, int graph))
1667 
1668 {
1669 	double percent_max = annotation_line__max_percent(al, notes, percent_type);
1670 	int pcnt_width = annotation__pcnt_width(notes),
1671 	    cycles_width = annotation__cycles_width(notes);
1672 	bool show_title = false;
1673 	char bf[256];
1674 	int printed;
1675 
1676 	if (first_line && (al->offset == -1 || percent_max == 0.0)) {
1677 		if (notes->branch && al->cycles) {
1678 			if (al->cycles->ipc == 0.0 && al->cycles->avg == 0)
1679 				show_title = true;
1680 		} else
1681 			show_title = true;
1682 	}
1683 
1684 	if (al->offset != -1 && percent_max != 0.0) {
1685 		int i;
1686 
1687 		for (i = 0; i < notes->src->nr_events; i++) {
1688 			double percent;
1689 
1690 			percent = annotation_data__percent(&al->data[i], percent_type);
1691 
1692 			obj__set_percent_color(obj, percent, current_entry);
1693 			if (symbol_conf.show_total_period) {
1694 				obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period);
1695 			} else if (symbol_conf.show_nr_samples) {
1696 				obj__printf(obj, "%6" PRIu64 " ",
1697 						   al->data[i].he.nr_samples);
1698 			} else {
1699 				obj__printf(obj, "%6.2f ", percent);
1700 			}
1701 		}
1702 	} else {
1703 		obj__set_percent_color(obj, 0, current_entry);
1704 
1705 		if (!show_title)
1706 			obj__printf(obj, "%-*s", pcnt_width, " ");
1707 		else {
1708 			obj__printf(obj, "%-*s", pcnt_width,
1709 					   symbol_conf.show_total_period ? "Period" :
1710 					   symbol_conf.show_nr_samples ? "Samples" : "Percent");
1711 		}
1712 	}
1713 
1714 	if (notes->branch) {
1715 		if (al->cycles && al->cycles->ipc)
1716 			obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc);
1717 		else if (!show_title)
1718 			obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " ");
1719 		else
1720 			obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
1721 
1722 		if (!annotate_opts.show_minmax_cycle) {
1723 			if (al->cycles && al->cycles->avg)
1724 				obj__printf(obj, "%*" PRIu64 " ",
1725 					   ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg);
1726 			else if (!show_title)
1727 				obj__printf(obj, "%*s",
1728 					    ANNOTATION__CYCLES_WIDTH, " ");
1729 			else
1730 				obj__printf(obj, "%*s ",
1731 					    ANNOTATION__CYCLES_WIDTH - 1,
1732 					    "Cycle");
1733 		} else {
1734 			if (al->cycles) {
1735 				char str[32];
1736 
1737 				scnprintf(str, sizeof(str),
1738 					"%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
1739 					al->cycles->avg, al->cycles->min,
1740 					al->cycles->max);
1741 
1742 				obj__printf(obj, "%*s ",
1743 					    ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
1744 					    str);
1745 			} else if (!show_title)
1746 				obj__printf(obj, "%*s",
1747 					    ANNOTATION__MINMAX_CYCLES_WIDTH,
1748 					    " ");
1749 			else
1750 				obj__printf(obj, "%*s ",
1751 					    ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
1752 					    "Cycle(min/max)");
1753 		}
1754 
1755 		if (show_title && !*al->line) {
1756 			ipc_coverage_string(bf, sizeof(bf), notes);
1757 			obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf);
1758 		}
1759 	}
1760 
1761 	obj__printf(obj, " ");
1762 
1763 	if (!*al->line)
1764 		obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " ");
1765 	else if (al->offset == -1) {
1766 		if (al->line_nr && annotate_opts.show_linenr)
1767 			printed = scnprintf(bf, sizeof(bf), "%-*d ",
1768 					    notes->src->widths.addr + 1, al->line_nr);
1769 		else
1770 			printed = scnprintf(bf, sizeof(bf), "%-*s  ",
1771 					    notes->src->widths.addr, " ");
1772 		obj__printf(obj, bf);
1773 		obj__printf(obj, "%-*s", width - printed - pcnt_width - cycles_width + 1, al->line);
1774 	} else {
1775 		u64 addr = al->offset;
1776 		int color = -1;
1777 
1778 		if (!annotate_opts.use_offset)
1779 			addr += notes->src->start;
1780 
1781 		if (!annotate_opts.use_offset) {
1782 			printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
1783 		} else {
1784 			if (al->jump_sources &&
1785 			    annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
1786 				if (annotate_opts.show_nr_jumps) {
1787 					int prev;
1788 					printed = scnprintf(bf, sizeof(bf), "%*d ",
1789 							    notes->src->widths.jumps,
1790 							    al->jump_sources);
1791 					prev = obj__set_jumps_percent_color(obj, al->jump_sources,
1792 									    current_entry);
1793 					obj__printf(obj, bf);
1794 					obj__set_color(obj, prev);
1795 				}
1796 print_addr:
1797 				printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
1798 						    notes->src->widths.target, addr);
1799 			} else if (ins__is_call(&disasm_line(al)->ins) &&
1800 				   annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) {
1801 				goto print_addr;
1802 			} else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
1803 				goto print_addr;
1804 			} else {
1805 				printed = scnprintf(bf, sizeof(bf), "%-*s  ",
1806 						    notes->src->widths.addr, " ");
1807 			}
1808 		}
1809 
1810 		if (change_color)
1811 			color = obj__set_color(obj, HE_COLORSET_ADDR);
1812 		obj__printf(obj, bf);
1813 		if (change_color)
1814 			obj__set_color(obj, color);
1815 
1816 		disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf), obj__printf, obj__write_graph);
1817 
1818 		obj__printf(obj, "%-*s", width - pcnt_width - cycles_width - 3 - printed, bf);
1819 	}
1820 
1821 }
1822 
1823 void annotation_line__write(struct annotation_line *al, struct annotation *notes,
1824 			    struct annotation_write_ops *wops)
1825 {
1826 	__annotation_line__write(al, notes, wops->first_line, wops->current_entry,
1827 				 wops->change_color, wops->width, wops->obj,
1828 				 annotate_opts.percent_type,
1829 				 wops->set_color, wops->set_percent_color,
1830 				 wops->set_jumps_percent_color, wops->printf,
1831 				 wops->write_graph);
1832 }
1833 
1834 int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
1835 		      struct arch **parch)
1836 {
1837 	struct symbol *sym = ms->sym;
1838 	struct annotation *notes = symbol__annotation(sym);
1839 	size_t size = symbol__size(sym);
1840 	int nr_pcnt = 1, err;
1841 
1842 	if (evsel__is_group_event(evsel))
1843 		nr_pcnt = evsel->core.nr_members;
1844 
1845 	err = symbol__annotate(ms, evsel, parch);
1846 	if (err)
1847 		return err;
1848 
1849 	symbol__calc_percent(sym, evsel);
1850 
1851 	annotation__set_index(notes);
1852 	annotation__mark_jump_targets(notes, sym);
1853 
1854 	err = annotation__compute_ipc(notes, size);
1855 	if (err)
1856 		return err;
1857 
1858 	annotation__init_column_widths(notes, sym);
1859 	notes->src->nr_events = nr_pcnt;
1860 
1861 	annotation__update_column_widths(notes);
1862 	sym->annotate2 = 1;
1863 
1864 	return 0;
1865 }
1866 
1867 static int annotation__config(const char *var, const char *value, void *data)
1868 {
1869 	struct annotation_options *opt = data;
1870 
1871 	if (!strstarts(var, "annotate."))
1872 		return 0;
1873 
1874 	if (!strcmp(var, "annotate.offset_level")) {
1875 		perf_config_u8(&opt->offset_level, "offset_level", value);
1876 
1877 		if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
1878 			opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL;
1879 		else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL)
1880 			opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
1881 	} else if (!strcmp(var, "annotate.hide_src_code")) {
1882 		opt->hide_src_code = perf_config_bool("hide_src_code", value);
1883 	} else if (!strcmp(var, "annotate.jump_arrows")) {
1884 		opt->jump_arrows = perf_config_bool("jump_arrows", value);
1885 	} else if (!strcmp(var, "annotate.show_linenr")) {
1886 		opt->show_linenr = perf_config_bool("show_linenr", value);
1887 	} else if (!strcmp(var, "annotate.show_nr_jumps")) {
1888 		opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value);
1889 	} else if (!strcmp(var, "annotate.show_nr_samples")) {
1890 		symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples",
1891 								value);
1892 	} else if (!strcmp(var, "annotate.show_total_period")) {
1893 		symbol_conf.show_total_period = perf_config_bool("show_total_period",
1894 								value);
1895 	} else if (!strcmp(var, "annotate.use_offset")) {
1896 		opt->use_offset = perf_config_bool("use_offset", value);
1897 	} else if (!strcmp(var, "annotate.disassembler_style")) {
1898 		opt->disassembler_style = strdup(value);
1899 		if (!opt->disassembler_style) {
1900 			pr_err("Not enough memory for annotate.disassembler_style\n");
1901 			return -1;
1902 		}
1903 	} else if (!strcmp(var, "annotate.objdump")) {
1904 		opt->objdump_path = strdup(value);
1905 		if (!opt->objdump_path) {
1906 			pr_err("Not enough memory for annotate.objdump\n");
1907 			return -1;
1908 		}
1909 	} else if (!strcmp(var, "annotate.addr2line")) {
1910 		symbol_conf.addr2line_path = strdup(value);
1911 		if (!symbol_conf.addr2line_path) {
1912 			pr_err("Not enough memory for annotate.addr2line\n");
1913 			return -1;
1914 		}
1915 	} else if (!strcmp(var, "annotate.demangle")) {
1916 		symbol_conf.demangle = perf_config_bool("demangle", value);
1917 	} else if (!strcmp(var, "annotate.demangle_kernel")) {
1918 		symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value);
1919 	} else {
1920 		pr_debug("%s variable unknown, ignoring...", var);
1921 	}
1922 
1923 	return 0;
1924 }
1925 
1926 void annotation_options__init(void)
1927 {
1928 	struct annotation_options *opt = &annotate_opts;
1929 
1930 	memset(opt, 0, sizeof(*opt));
1931 
1932 	/* Default values. */
1933 	opt->use_offset = true;
1934 	opt->jump_arrows = true;
1935 	opt->annotate_src = true;
1936 	opt->offset_level = ANNOTATION__OFFSET_JUMP_TARGETS;
1937 	opt->percent_type = PERCENT_PERIOD_LOCAL;
1938 }
1939 
1940 void annotation_options__exit(void)
1941 {
1942 	zfree(&annotate_opts.disassembler_style);
1943 	zfree(&annotate_opts.objdump_path);
1944 }
1945 
1946 void annotation_config__init(void)
1947 {
1948 	perf_config(annotation__config, &annotate_opts);
1949 }
1950 
1951 static unsigned int parse_percent_type(char *str1, char *str2)
1952 {
1953 	unsigned int type = (unsigned int) -1;
1954 
1955 	if (!strcmp("period", str1)) {
1956 		if (!strcmp("local", str2))
1957 			type = PERCENT_PERIOD_LOCAL;
1958 		else if (!strcmp("global", str2))
1959 			type = PERCENT_PERIOD_GLOBAL;
1960 	}
1961 
1962 	if (!strcmp("hits", str1)) {
1963 		if (!strcmp("local", str2))
1964 			type = PERCENT_HITS_LOCAL;
1965 		else if (!strcmp("global", str2))
1966 			type = PERCENT_HITS_GLOBAL;
1967 	}
1968 
1969 	return type;
1970 }
1971 
1972 int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str,
1973 				int unset __maybe_unused)
1974 {
1975 	unsigned int type;
1976 	char *str1, *str2;
1977 	int err = -1;
1978 
1979 	str1 = strdup(_str);
1980 	if (!str1)
1981 		return -ENOMEM;
1982 
1983 	str2 = strchr(str1, '-');
1984 	if (!str2)
1985 		goto out;
1986 
1987 	*str2++ = 0;
1988 
1989 	type = parse_percent_type(str1, str2);
1990 	if (type == (unsigned int) -1)
1991 		type = parse_percent_type(str2, str1);
1992 	if (type != (unsigned int) -1) {
1993 		annotate_opts.percent_type = type;
1994 		err = 0;
1995 	}
1996 
1997 out:
1998 	free(str1);
1999 	return err;
2000 }
2001 
2002 int annotate_check_args(void)
2003 {
2004 	struct annotation_options *args = &annotate_opts;
2005 
2006 	if (args->prefix_strip && !args->prefix) {
2007 		pr_err("--prefix-strip requires --prefix\n");
2008 		return -1;
2009 	}
2010 	return 0;
2011 }
2012 
2013 /*
2014  * Get register number and access offset from the given instruction.
2015  * It assumes AT&T x86 asm format like OFFSET(REG).  Maybe it needs
2016  * to revisit the format when it handles different architecture.
2017  * Fills @reg and @offset when return 0.
2018  */
2019 static int extract_reg_offset(struct arch *arch, const char *str,
2020 			      struct annotated_op_loc *op_loc)
2021 {
2022 	char *p;
2023 	char *regname;
2024 
2025 	if (arch->objdump.register_char == 0)
2026 		return -1;
2027 
2028 	/*
2029 	 * It should start from offset, but it's possible to skip 0
2030 	 * in the asm.  So 0(%rax) should be same as (%rax).
2031 	 *
2032 	 * However, it also start with a segment select register like
2033 	 * %gs:0x18(%rbx).  In that case it should skip the part.
2034 	 */
2035 	if (*str == arch->objdump.register_char) {
2036 		if (arch__is(arch, "x86")) {
2037 			/* FIXME: Handle other segment registers */
2038 			if (!strncmp(str, "%gs:", 4))
2039 				op_loc->segment = INSN_SEG_X86_GS;
2040 		}
2041 
2042 		while (*str && !isdigit(*str) &&
2043 		       *str != arch->objdump.memory_ref_char)
2044 			str++;
2045 	}
2046 
2047 	op_loc->offset = strtol(str, &p, 0);
2048 
2049 	p = strchr(p, arch->objdump.register_char);
2050 	if (p == NULL)
2051 		return -1;
2052 
2053 	regname = strdup(p);
2054 	if (regname == NULL)
2055 		return -1;
2056 
2057 	op_loc->reg1 = get_dwarf_regnum(regname, 0);
2058 	free(regname);
2059 
2060 	/* Get the second register */
2061 	if (op_loc->multi_regs) {
2062 		p = strchr(p + 1, arch->objdump.register_char);
2063 		if (p == NULL)
2064 			return -1;
2065 
2066 		regname = strdup(p);
2067 		if (regname == NULL)
2068 			return -1;
2069 
2070 		op_loc->reg2 = get_dwarf_regnum(regname, 0);
2071 		free(regname);
2072 	}
2073 	return 0;
2074 }
2075 
2076 /**
2077  * annotate_get_insn_location - Get location of instruction
2078  * @arch: the architecture info
2079  * @dl: the target instruction
2080  * @loc: a buffer to save the data
2081  *
2082  * Get detailed location info (register and offset) in the instruction.
2083  * It needs both source and target operand and whether it accesses a
2084  * memory location.  The offset field is meaningful only when the
2085  * corresponding mem flag is set.  The reg2 field is meaningful only
2086  * when multi_regs flag is set.
2087  *
2088  * Some examples on x86:
2089  *
2090  *   mov  (%rax), %rcx   # src_reg1 = rax, src_mem = 1, src_offset = 0
2091  *                       # dst_reg1 = rcx, dst_mem = 0
2092  *
2093  *   mov  0x18, %r8      # src_reg1 = -1, src_mem = 0
2094  *                       # dst_reg1 = r8, dst_mem = 0
2095  *
2096  *   mov  %rsi, 8(%rbx,%rcx,4)  # src_reg1 = rsi, src_mem = 0, dst_multi_regs = 0
2097  *                              # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1
2098  *                              # dst_multi_regs = 1, dst_offset = 8
2099  */
2100 int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl,
2101 			       struct annotated_insn_loc *loc)
2102 {
2103 	struct ins_operands *ops;
2104 	struct annotated_op_loc *op_loc;
2105 	int i;
2106 
2107 	if (ins__is_lock(&dl->ins))
2108 		ops = dl->ops.locked.ops;
2109 	else
2110 		ops = &dl->ops;
2111 
2112 	if (ops == NULL)
2113 		return -1;
2114 
2115 	memset(loc, 0, sizeof(*loc));
2116 
2117 	for_each_insn_op_loc(loc, i, op_loc) {
2118 		const char *insn_str = ops->source.raw;
2119 		bool multi_regs = ops->source.multi_regs;
2120 
2121 		if (i == INSN_OP_TARGET) {
2122 			insn_str = ops->target.raw;
2123 			multi_regs = ops->target.multi_regs;
2124 		}
2125 
2126 		/* Invalidate the register by default */
2127 		op_loc->reg1 = -1;
2128 		op_loc->reg2 = -1;
2129 
2130 		if (insn_str == NULL)
2131 			continue;
2132 
2133 		if (strchr(insn_str, arch->objdump.memory_ref_char)) {
2134 			op_loc->mem_ref = true;
2135 			op_loc->multi_regs = multi_regs;
2136 			extract_reg_offset(arch, insn_str, op_loc);
2137 		} else {
2138 			char *s, *p = NULL;
2139 
2140 			if (arch__is(arch, "x86")) {
2141 				/* FIXME: Handle other segment registers */
2142 				if (!strncmp(insn_str, "%gs:", 4)) {
2143 					op_loc->segment = INSN_SEG_X86_GS;
2144 					op_loc->offset = strtol(insn_str + 4,
2145 								&p, 0);
2146 					if (p && p != insn_str + 4)
2147 						op_loc->imm = true;
2148 					continue;
2149 				}
2150 			}
2151 
2152 			s = strdup(insn_str);
2153 			if (s == NULL)
2154 				return -1;
2155 
2156 			if (*s == arch->objdump.register_char)
2157 				op_loc->reg1 = get_dwarf_regnum(s, 0);
2158 			else if (*s == arch->objdump.imm_char) {
2159 				op_loc->offset = strtol(s + 1, &p, 0);
2160 				if (p && p != s + 1)
2161 					op_loc->imm = true;
2162 			}
2163 			free(s);
2164 		}
2165 	}
2166 
2167 	return 0;
2168 }
2169 
2170 static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip,
2171 					    bool allow_update)
2172 {
2173 	struct disasm_line *dl;
2174 	struct annotation *notes;
2175 
2176 	notes = symbol__annotation(sym);
2177 
2178 	list_for_each_entry(dl, &notes->src->source, al.node) {
2179 		if (dl->al.offset == -1)
2180 			continue;
2181 
2182 		if (sym->start + dl->al.offset == ip) {
2183 			/*
2184 			 * llvm-objdump places "lock" in a separate line and
2185 			 * in that case, we want to get the next line.
2186 			 */
2187 			if (ins__is_lock(&dl->ins) &&
2188 			    *dl->ops.raw == '\0' && allow_update) {
2189 				ip++;
2190 				continue;
2191 			}
2192 			return dl;
2193 		}
2194 	}
2195 	return NULL;
2196 }
2197 
2198 static struct annotated_item_stat *annotate_data_stat(struct list_head *head,
2199 						      const char *name)
2200 {
2201 	struct annotated_item_stat *istat;
2202 
2203 	list_for_each_entry(istat, head, list) {
2204 		if (!strcmp(istat->name, name))
2205 			return istat;
2206 	}
2207 
2208 	istat = zalloc(sizeof(*istat));
2209 	if (istat == NULL)
2210 		return NULL;
2211 
2212 	istat->name = strdup(name);
2213 	if (istat->name == NULL) {
2214 		free(istat);
2215 		return NULL;
2216 	}
2217 
2218 	list_add_tail(&istat->list, head);
2219 	return istat;
2220 }
2221 
2222 static bool is_stack_operation(struct arch *arch, struct disasm_line *dl)
2223 {
2224 	if (arch__is(arch, "x86")) {
2225 		if (!strncmp(dl->ins.name, "push", 4) ||
2226 		    !strncmp(dl->ins.name, "pop", 3) ||
2227 		    !strncmp(dl->ins.name, "ret", 3))
2228 			return true;
2229 	}
2230 
2231 	return false;
2232 }
2233 
2234 static bool is_stack_canary(struct arch *arch, struct annotated_op_loc *loc)
2235 {
2236 	/* On x86_64, %gs:40 is used for stack canary */
2237 	if (arch__is(arch, "x86")) {
2238 		if (loc->segment == INSN_SEG_X86_GS && loc->imm &&
2239 		    loc->offset == 40)
2240 			return true;
2241 	}
2242 
2243 	return false;
2244 }
2245 
2246 static struct disasm_line *
2247 annotation__prev_asm_line(struct annotation *notes, struct disasm_line *curr)
2248 {
2249 	struct list_head *sources = &notes->src->source;
2250 	struct disasm_line *prev;
2251 
2252 	if (curr == list_first_entry(sources, struct disasm_line, al.node))
2253 		return NULL;
2254 
2255 	prev = list_prev_entry(curr, al.node);
2256 	while (prev->al.offset == -1 &&
2257 	       prev != list_first_entry(sources, struct disasm_line, al.node))
2258 		prev = list_prev_entry(prev, al.node);
2259 
2260 	if (prev->al.offset == -1)
2261 		return NULL;
2262 
2263 	return prev;
2264 }
2265 
2266 static struct disasm_line *
2267 annotation__next_asm_line(struct annotation *notes, struct disasm_line *curr)
2268 {
2269 	struct list_head *sources = &notes->src->source;
2270 	struct disasm_line *next;
2271 
2272 	if (curr == list_last_entry(sources, struct disasm_line, al.node))
2273 		return NULL;
2274 
2275 	next = list_next_entry(curr, al.node);
2276 	while (next->al.offset == -1 &&
2277 	       next != list_last_entry(sources, struct disasm_line, al.node))
2278 		next = list_next_entry(next, al.node);
2279 
2280 	if (next->al.offset == -1)
2281 		return NULL;
2282 
2283 	return next;
2284 }
2285 
2286 u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset,
2287 			struct disasm_line *dl)
2288 {
2289 	struct annotation *notes;
2290 	struct disasm_line *next;
2291 	u64 addr;
2292 
2293 	notes = symbol__annotation(ms->sym);
2294 	/*
2295 	 * PC-relative addressing starts from the next instruction address
2296 	 * But the IP is for the current instruction.  Since disasm_line
2297 	 * doesn't have the instruction size, calculate it using the next
2298 	 * disasm_line.  If it's the last one, we can use symbol's end
2299 	 * address directly.
2300 	 */
2301 	next = annotation__next_asm_line(notes, dl);
2302 	if (next == NULL)
2303 		addr = ms->sym->end + offset;
2304 	else
2305 		addr = ip + (next->al.offset - dl->al.offset) + offset;
2306 
2307 	return map__rip_2objdump(ms->map, addr);
2308 }
2309 
2310 /**
2311  * hist_entry__get_data_type - find data type for given hist entry
2312  * @he: hist entry
2313  *
2314  * This function first annotates the instruction at @he->ip and extracts
2315  * register and offset info from it.  Then it searches the DWARF debug
2316  * info to get a variable and type information using the address, register,
2317  * and offset.
2318  */
2319 struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he)
2320 {
2321 	struct map_symbol *ms = &he->ms;
2322 	struct evsel *evsel = hists_to_evsel(he->hists);
2323 	struct arch *arch;
2324 	struct disasm_line *dl;
2325 	struct annotated_insn_loc loc;
2326 	struct annotated_op_loc *op_loc;
2327 	struct annotated_data_type *mem_type;
2328 	struct annotated_item_stat *istat;
2329 	u64 ip = he->ip;
2330 	int i;
2331 
2332 	ann_data_stat.total++;
2333 
2334 	if (ms->map == NULL || ms->sym == NULL) {
2335 		ann_data_stat.no_sym++;
2336 		return NULL;
2337 	}
2338 
2339 	if (!symbol_conf.init_annotation) {
2340 		ann_data_stat.no_sym++;
2341 		return NULL;
2342 	}
2343 
2344 	/* Make sure it has the disasm of the function */
2345 	if (symbol__annotate(ms, evsel, &arch) < 0) {
2346 		ann_data_stat.no_insn++;
2347 		return NULL;
2348 	}
2349 
2350 	/*
2351 	 * Get a disasm to extract the location from the insn.
2352 	 * This is too slow...
2353 	 */
2354 	dl = find_disasm_line(ms->sym, ip, /*allow_update=*/true);
2355 	if (dl == NULL) {
2356 		ann_data_stat.no_insn++;
2357 		return NULL;
2358 	}
2359 
2360 retry:
2361 	istat = annotate_data_stat(&ann_insn_stat, dl->ins.name);
2362 	if (istat == NULL) {
2363 		ann_data_stat.no_insn++;
2364 		return NULL;
2365 	}
2366 
2367 	if (annotate_get_insn_location(arch, dl, &loc) < 0) {
2368 		ann_data_stat.no_insn_ops++;
2369 		istat->bad++;
2370 		return NULL;
2371 	}
2372 
2373 	if (is_stack_operation(arch, dl)) {
2374 		istat->good++;
2375 		he->mem_type_off = 0;
2376 		return &stackop_type;
2377 	}
2378 
2379 	for_each_insn_op_loc(&loc, i, op_loc) {
2380 		struct data_loc_info dloc = {
2381 			.arch = arch,
2382 			.thread = he->thread,
2383 			.ms = ms,
2384 			/* Recalculate IP for LOCK prefix or insn fusion */
2385 			.ip = ms->sym->start + dl->al.offset,
2386 			.cpumode = he->cpumode,
2387 			.op = op_loc,
2388 		};
2389 
2390 		if (!op_loc->mem_ref && op_loc->segment == INSN_SEG_NONE)
2391 			continue;
2392 
2393 		/* Recalculate IP because of LOCK prefix or insn fusion */
2394 		ip = ms->sym->start + dl->al.offset;
2395 
2396 		/* PC-relative addressing */
2397 		if (op_loc->reg1 == DWARF_REG_PC) {
2398 			dloc.var_addr = annotate_calc_pcrel(ms, dloc.ip,
2399 							    op_loc->offset, dl);
2400 		}
2401 
2402 		/* This CPU access in kernel - pretend PC-relative addressing */
2403 		if (map__dso(ms->map)->kernel && arch__is(arch, "x86") &&
2404 		    op_loc->segment == INSN_SEG_X86_GS && op_loc->imm) {
2405 			dloc.var_addr = op_loc->offset;
2406 			op_loc->reg1 = DWARF_REG_PC;
2407 		}
2408 
2409 		mem_type = find_data_type(&dloc);
2410 
2411 		if (mem_type == NULL && is_stack_canary(arch, op_loc)) {
2412 			istat->good++;
2413 			he->mem_type_off = 0;
2414 			return &canary_type;
2415 		}
2416 
2417 		if (mem_type)
2418 			istat->good++;
2419 		else
2420 			istat->bad++;
2421 
2422 		if (symbol_conf.annotate_data_sample) {
2423 			annotated_data_type__update_samples(mem_type, evsel,
2424 							    dloc.type_offset,
2425 							    he->stat.nr_events,
2426 							    he->stat.period);
2427 		}
2428 		he->mem_type_off = dloc.type_offset;
2429 		return mem_type;
2430 	}
2431 
2432 	/*
2433 	 * Some instructions can be fused and the actual memory access came
2434 	 * from the previous instruction.
2435 	 */
2436 	if (dl->al.offset > 0) {
2437 		struct annotation *notes;
2438 		struct disasm_line *prev_dl;
2439 
2440 		notes = symbol__annotation(ms->sym);
2441 		prev_dl = annotation__prev_asm_line(notes, dl);
2442 
2443 		if (prev_dl && ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) {
2444 			dl = prev_dl;
2445 			goto retry;
2446 		}
2447 	}
2448 
2449 	ann_data_stat.no_mem_ops++;
2450 	istat->bad++;
2451 	return NULL;
2452 }
2453 
2454 /* Basic block traversal (BFS) data structure */
2455 struct basic_block_data {
2456 	struct list_head queue;
2457 	struct list_head visited;
2458 };
2459 
2460 /*
2461  * During the traversal, it needs to know the parent block where the current
2462  * block block started from.  Note that single basic block can be parent of
2463  * two child basic blocks (in case of condition jump).
2464  */
2465 struct basic_block_link {
2466 	struct list_head node;
2467 	struct basic_block_link *parent;
2468 	struct annotated_basic_block *bb;
2469 };
2470 
2471 /* Check any of basic block in the list already has the offset */
2472 static bool basic_block_has_offset(struct list_head *head, s64 offset)
2473 {
2474 	struct basic_block_link *link;
2475 
2476 	list_for_each_entry(link, head, node) {
2477 		s64 begin_offset = link->bb->begin->al.offset;
2478 		s64 end_offset = link->bb->end->al.offset;
2479 
2480 		if (begin_offset <= offset && offset <= end_offset)
2481 			return true;
2482 	}
2483 	return false;
2484 }
2485 
2486 static bool is_new_basic_block(struct basic_block_data *bb_data,
2487 			       struct disasm_line *dl)
2488 {
2489 	s64 offset = dl->al.offset;
2490 
2491 	if (basic_block_has_offset(&bb_data->visited, offset))
2492 		return false;
2493 	if (basic_block_has_offset(&bb_data->queue, offset))
2494 		return false;
2495 	return true;
2496 }
2497 
2498 /* Add a basic block starting from dl and link it to the parent */
2499 static int add_basic_block(struct basic_block_data *bb_data,
2500 			   struct basic_block_link *parent,
2501 			   struct disasm_line *dl)
2502 {
2503 	struct annotated_basic_block *bb;
2504 	struct basic_block_link *link;
2505 
2506 	if (dl == NULL)
2507 		return -1;
2508 
2509 	if (!is_new_basic_block(bb_data, dl))
2510 		return 0;
2511 
2512 	bb = zalloc(sizeof(*bb));
2513 	if (bb == NULL)
2514 		return -1;
2515 
2516 	bb->begin = dl;
2517 	bb->end = dl;
2518 	INIT_LIST_HEAD(&bb->list);
2519 
2520 	link = malloc(sizeof(*link));
2521 	if (link == NULL) {
2522 		free(bb);
2523 		return -1;
2524 	}
2525 
2526 	link->bb = bb;
2527 	link->parent = parent;
2528 	list_add_tail(&link->node, &bb_data->queue);
2529 	return 0;
2530 }
2531 
2532 /* Returns true when it finds the target in the current basic block */
2533 static bool process_basic_block(struct basic_block_data *bb_data,
2534 				struct basic_block_link *link,
2535 				struct symbol *sym, u64 target)
2536 {
2537 	struct disasm_line *dl, *next_dl, *last_dl;
2538 	struct annotation *notes = symbol__annotation(sym);
2539 	bool found = false;
2540 
2541 	dl = link->bb->begin;
2542 	/* Check if it's already visited */
2543 	if (basic_block_has_offset(&bb_data->visited, dl->al.offset))
2544 		return false;
2545 
2546 	last_dl = list_last_entry(&notes->src->source,
2547 				  struct disasm_line, al.node);
2548 	if (last_dl->al.offset == -1)
2549 		last_dl = annotation__prev_asm_line(notes, last_dl);
2550 
2551 	if (last_dl == NULL)
2552 		return false;
2553 
2554 	list_for_each_entry_from(dl, &notes->src->source, al.node) {
2555 		/* Skip comment or debug info line */
2556 		if (dl->al.offset == -1)
2557 			continue;
2558 		/* Found the target instruction */
2559 		if (sym->start + dl->al.offset == target) {
2560 			found = true;
2561 			break;
2562 		}
2563 		/* End of the function, finish the block */
2564 		if (dl == last_dl)
2565 			break;
2566 		/* 'return' instruction finishes the block */
2567 		if (ins__is_ret(&dl->ins))
2568 			break;
2569 		/* normal instructions are part of the basic block */
2570 		if (!ins__is_jump(&dl->ins))
2571 			continue;
2572 		/* jump to a different function, tail call or return */
2573 		if (dl->ops.target.outside)
2574 			break;
2575 		/* jump instruction creates new basic block(s) */
2576 		next_dl = find_disasm_line(sym, sym->start + dl->ops.target.offset,
2577 					   /*allow_update=*/false);
2578 		if (next_dl)
2579 			add_basic_block(bb_data, link, next_dl);
2580 
2581 		/*
2582 		 * FIXME: determine conditional jumps properly.
2583 		 * Conditional jumps create another basic block with the
2584 		 * next disasm line.
2585 		 */
2586 		if (!strstr(dl->ins.name, "jmp")) {
2587 			next_dl = annotation__next_asm_line(notes, dl);
2588 			if (next_dl)
2589 				add_basic_block(bb_data, link, next_dl);
2590 		}
2591 		break;
2592 
2593 	}
2594 	link->bb->end = dl;
2595 	return found;
2596 }
2597 
2598 /*
2599  * It founds a target basic block, build a proper linked list of basic blocks
2600  * by following the link recursively.
2601  */
2602 static void link_found_basic_blocks(struct basic_block_link *link,
2603 				    struct list_head *head)
2604 {
2605 	while (link) {
2606 		struct basic_block_link *parent = link->parent;
2607 
2608 		list_move(&link->bb->list, head);
2609 		list_del(&link->node);
2610 		free(link);
2611 
2612 		link = parent;
2613 	}
2614 }
2615 
2616 static void delete_basic_blocks(struct basic_block_data *bb_data)
2617 {
2618 	struct basic_block_link *link, *tmp;
2619 
2620 	list_for_each_entry_safe(link, tmp, &bb_data->queue, node) {
2621 		list_del(&link->node);
2622 		free(link->bb);
2623 		free(link);
2624 	}
2625 
2626 	list_for_each_entry_safe(link, tmp, &bb_data->visited, node) {
2627 		list_del(&link->node);
2628 		free(link->bb);
2629 		free(link);
2630 	}
2631 }
2632 
2633 /**
2634  * annotate_get_basic_blocks - Get basic blocks for given address range
2635  * @sym: symbol to annotate
2636  * @src: source address
2637  * @dst: destination address
2638  * @head: list head to save basic blocks
2639  *
2640  * This function traverses disasm_lines from @src to @dst and save them in a
2641  * list of annotated_basic_block to @head.  It uses BFS to find the shortest
2642  * path between two.  The basic_block_link is to maintain parent links so
2643  * that it can build a list of blocks from the start.
2644  */
2645 int annotate_get_basic_blocks(struct symbol *sym, s64 src, s64 dst,
2646 			      struct list_head *head)
2647 {
2648 	struct basic_block_data bb_data = {
2649 		.queue = LIST_HEAD_INIT(bb_data.queue),
2650 		.visited = LIST_HEAD_INIT(bb_data.visited),
2651 	};
2652 	struct basic_block_link *link;
2653 	struct disasm_line *dl;
2654 	int ret = -1;
2655 
2656 	dl = find_disasm_line(sym, src, /*allow_update=*/false);
2657 	if (dl == NULL)
2658 		return -1;
2659 
2660 	if (add_basic_block(&bb_data, /*parent=*/NULL, dl) < 0)
2661 		return -1;
2662 
2663 	/* Find shortest path from src to dst using BFS */
2664 	while (!list_empty(&bb_data.queue)) {
2665 		link = list_first_entry(&bb_data.queue, struct basic_block_link, node);
2666 
2667 		if (process_basic_block(&bb_data, link, sym, dst)) {
2668 			link_found_basic_blocks(link, head);
2669 			ret = 0;
2670 			break;
2671 		}
2672 		list_move(&link->node, &bb_data.visited);
2673 	}
2674 	delete_basic_blocks(&bb_data);
2675 	return ret;
2676 }
2677