xref: /linux/tools/perf/util/symbol.c (revision 8520a98dbab61e9e340cdfb72dd17ccc8a98961e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <stdio.h>
6 #include <string.h>
7 #include <linux/capability.h>
8 #include <linux/kernel.h>
9 #include <linux/mman.h>
10 #include <linux/string.h>
11 #include <linux/time64.h>
12 #include <sys/types.h>
13 #include <sys/stat.h>
14 #include <sys/param.h>
15 #include <fcntl.h>
16 #include <unistd.h>
17 #include <inttypes.h>
18 #include "annotate.h"
19 #include "build-id.h"
20 #include "cap.h"
21 #include "util.h"
22 #include "debug.h"
23 #include "event.h"
24 #include "machine.h"
25 #include "map.h"
26 #include "symbol.h"
27 #include "strlist.h"
28 #include "intlist.h"
29 #include "namespaces.h"
30 #include "header.h"
31 #include "path.h"
32 #include <linux/ctype.h>
33 #include <linux/zalloc.h>
34 
35 #include <elf.h>
36 #include <limits.h>
37 #include <symbol/kallsyms.h>
38 #include <sys/utsname.h>
39 
40 static int dso__load_kernel_sym(struct dso *dso, struct map *map);
41 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
42 static bool symbol__is_idle(const char *name);
43 
44 int vmlinux_path__nr_entries;
45 char **vmlinux_path;
46 
47 struct symbol_conf symbol_conf = {
48 	.nanosecs		= false,
49 	.use_modules		= true,
50 	.try_vmlinux_path	= true,
51 	.demangle		= true,
52 	.demangle_kernel	= false,
53 	.cumulate_callchain	= true,
54 	.time_quantum		= 100 * NSEC_PER_MSEC, /* 100ms */
55 	.show_hist_headers	= true,
56 	.symfs			= "",
57 	.event_group		= true,
58 	.inline_name		= true,
59 	.res_sample		= 0,
60 };
61 
62 static enum dso_binary_type binary_type_symtab[] = {
63 	DSO_BINARY_TYPE__KALLSYMS,
64 	DSO_BINARY_TYPE__GUEST_KALLSYMS,
65 	DSO_BINARY_TYPE__JAVA_JIT,
66 	DSO_BINARY_TYPE__DEBUGLINK,
67 	DSO_BINARY_TYPE__BUILD_ID_CACHE,
68 	DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
69 	DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
70 	DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
71 	DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
72 	DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
73 	DSO_BINARY_TYPE__GUEST_KMODULE,
74 	DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
75 	DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
76 	DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
77 	DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
78 	DSO_BINARY_TYPE__NOT_FOUND,
79 };
80 
81 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
82 
83 static bool symbol_type__filter(char symbol_type)
84 {
85 	symbol_type = toupper(symbol_type);
86 	return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
87 }
88 
89 static int prefix_underscores_count(const char *str)
90 {
91 	const char *tail = str;
92 
93 	while (*tail == '_')
94 		tail++;
95 
96 	return tail - str;
97 }
98 
99 void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
100 {
101 	p->end = c->start;
102 }
103 
104 const char * __weak arch__normalize_symbol_name(const char *name)
105 {
106 	return name;
107 }
108 
109 int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
110 {
111 	return strcmp(namea, nameb);
112 }
113 
114 int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb,
115 					unsigned int n)
116 {
117 	return strncmp(namea, nameb, n);
118 }
119 
120 int __weak arch__choose_best_symbol(struct symbol *syma,
121 				    struct symbol *symb __maybe_unused)
122 {
123 	/* Avoid "SyS" kernel syscall aliases */
124 	if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
125 		return SYMBOL_B;
126 	if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
127 		return SYMBOL_B;
128 
129 	return SYMBOL_A;
130 }
131 
132 static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
133 {
134 	s64 a;
135 	s64 b;
136 	size_t na, nb;
137 
138 	/* Prefer a symbol with non zero length */
139 	a = syma->end - syma->start;
140 	b = symb->end - symb->start;
141 	if ((b == 0) && (a > 0))
142 		return SYMBOL_A;
143 	else if ((a == 0) && (b > 0))
144 		return SYMBOL_B;
145 
146 	/* Prefer a non weak symbol over a weak one */
147 	a = syma->binding == STB_WEAK;
148 	b = symb->binding == STB_WEAK;
149 	if (b && !a)
150 		return SYMBOL_A;
151 	if (a && !b)
152 		return SYMBOL_B;
153 
154 	/* Prefer a global symbol over a non global one */
155 	a = syma->binding == STB_GLOBAL;
156 	b = symb->binding == STB_GLOBAL;
157 	if (a && !b)
158 		return SYMBOL_A;
159 	if (b && !a)
160 		return SYMBOL_B;
161 
162 	/* Prefer a symbol with less underscores */
163 	a = prefix_underscores_count(syma->name);
164 	b = prefix_underscores_count(symb->name);
165 	if (b > a)
166 		return SYMBOL_A;
167 	else if (a > b)
168 		return SYMBOL_B;
169 
170 	/* Choose the symbol with the longest name */
171 	na = strlen(syma->name);
172 	nb = strlen(symb->name);
173 	if (na > nb)
174 		return SYMBOL_A;
175 	else if (na < nb)
176 		return SYMBOL_B;
177 
178 	return arch__choose_best_symbol(syma, symb);
179 }
180 
181 void symbols__fixup_duplicate(struct rb_root_cached *symbols)
182 {
183 	struct rb_node *nd;
184 	struct symbol *curr, *next;
185 
186 	if (symbol_conf.allow_aliases)
187 		return;
188 
189 	nd = rb_first_cached(symbols);
190 
191 	while (nd) {
192 		curr = rb_entry(nd, struct symbol, rb_node);
193 again:
194 		nd = rb_next(&curr->rb_node);
195 		next = rb_entry(nd, struct symbol, rb_node);
196 
197 		if (!nd)
198 			break;
199 
200 		if (curr->start != next->start)
201 			continue;
202 
203 		if (choose_best_symbol(curr, next) == SYMBOL_A) {
204 			rb_erase_cached(&next->rb_node, symbols);
205 			symbol__delete(next);
206 			goto again;
207 		} else {
208 			nd = rb_next(&curr->rb_node);
209 			rb_erase_cached(&curr->rb_node, symbols);
210 			symbol__delete(curr);
211 		}
212 	}
213 }
214 
215 void symbols__fixup_end(struct rb_root_cached *symbols)
216 {
217 	struct rb_node *nd, *prevnd = rb_first_cached(symbols);
218 	struct symbol *curr, *prev;
219 
220 	if (prevnd == NULL)
221 		return;
222 
223 	curr = rb_entry(prevnd, struct symbol, rb_node);
224 
225 	for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
226 		prev = curr;
227 		curr = rb_entry(nd, struct symbol, rb_node);
228 
229 		if (prev->end == prev->start && prev->end != curr->start)
230 			arch__symbols__fixup_end(prev, curr);
231 	}
232 
233 	/* Last entry */
234 	if (curr->end == curr->start)
235 		curr->end = roundup(curr->start, 4096) + 4096;
236 }
237 
238 void map_groups__fixup_end(struct map_groups *mg)
239 {
240 	struct maps *maps = &mg->maps;
241 	struct map *next, *curr;
242 
243 	down_write(&maps->lock);
244 
245 	curr = maps__first(maps);
246 	if (curr == NULL)
247 		goto out_unlock;
248 
249 	for (next = map__next(curr); next; next = map__next(curr)) {
250 		if (!curr->end)
251 			curr->end = next->start;
252 		curr = next;
253 	}
254 
255 	/*
256 	 * We still haven't the actual symbols, so guess the
257 	 * last map final address.
258 	 */
259 	if (!curr->end)
260 		curr->end = ~0ULL;
261 
262 out_unlock:
263 	up_write(&maps->lock);
264 }
265 
266 struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
267 {
268 	size_t namelen = strlen(name) + 1;
269 	struct symbol *sym = calloc(1, (symbol_conf.priv_size +
270 					sizeof(*sym) + namelen));
271 	if (sym == NULL)
272 		return NULL;
273 
274 	if (symbol_conf.priv_size) {
275 		if (symbol_conf.init_annotation) {
276 			struct annotation *notes = (void *)sym;
277 			pthread_mutex_init(&notes->lock, NULL);
278 		}
279 		sym = ((void *)sym) + symbol_conf.priv_size;
280 	}
281 
282 	sym->start   = start;
283 	sym->end     = len ? start + len : start;
284 	sym->type    = type;
285 	sym->binding = binding;
286 	sym->namelen = namelen - 1;
287 
288 	pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
289 		  __func__, name, start, sym->end);
290 	memcpy(sym->name, name, namelen);
291 
292 	return sym;
293 }
294 
295 void symbol__delete(struct symbol *sym)
296 {
297 	free(((void *)sym) - symbol_conf.priv_size);
298 }
299 
300 void symbols__delete(struct rb_root_cached *symbols)
301 {
302 	struct symbol *pos;
303 	struct rb_node *next = rb_first_cached(symbols);
304 
305 	while (next) {
306 		pos = rb_entry(next, struct symbol, rb_node);
307 		next = rb_next(&pos->rb_node);
308 		rb_erase_cached(&pos->rb_node, symbols);
309 		symbol__delete(pos);
310 	}
311 }
312 
313 void __symbols__insert(struct rb_root_cached *symbols,
314 		       struct symbol *sym, bool kernel)
315 {
316 	struct rb_node **p = &symbols->rb_root.rb_node;
317 	struct rb_node *parent = NULL;
318 	const u64 ip = sym->start;
319 	struct symbol *s;
320 	bool leftmost = true;
321 
322 	if (kernel) {
323 		const char *name = sym->name;
324 		/*
325 		 * ppc64 uses function descriptors and appends a '.' to the
326 		 * start of every instruction address. Remove it.
327 		 */
328 		if (name[0] == '.')
329 			name++;
330 		sym->idle = symbol__is_idle(name);
331 	}
332 
333 	while (*p != NULL) {
334 		parent = *p;
335 		s = rb_entry(parent, struct symbol, rb_node);
336 		if (ip < s->start)
337 			p = &(*p)->rb_left;
338 		else {
339 			p = &(*p)->rb_right;
340 			leftmost = false;
341 		}
342 	}
343 	rb_link_node(&sym->rb_node, parent, p);
344 	rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
345 }
346 
347 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
348 {
349 	__symbols__insert(symbols, sym, false);
350 }
351 
352 static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
353 {
354 	struct rb_node *n;
355 
356 	if (symbols == NULL)
357 		return NULL;
358 
359 	n = symbols->rb_root.rb_node;
360 
361 	while (n) {
362 		struct symbol *s = rb_entry(n, struct symbol, rb_node);
363 
364 		if (ip < s->start)
365 			n = n->rb_left;
366 		else if (ip > s->end || (ip == s->end && ip != s->start))
367 			n = n->rb_right;
368 		else
369 			return s;
370 	}
371 
372 	return NULL;
373 }
374 
375 static struct symbol *symbols__first(struct rb_root_cached *symbols)
376 {
377 	struct rb_node *n = rb_first_cached(symbols);
378 
379 	if (n)
380 		return rb_entry(n, struct symbol, rb_node);
381 
382 	return NULL;
383 }
384 
385 static struct symbol *symbols__last(struct rb_root_cached *symbols)
386 {
387 	struct rb_node *n = rb_last(&symbols->rb_root);
388 
389 	if (n)
390 		return rb_entry(n, struct symbol, rb_node);
391 
392 	return NULL;
393 }
394 
395 static struct symbol *symbols__next(struct symbol *sym)
396 {
397 	struct rb_node *n = rb_next(&sym->rb_node);
398 
399 	if (n)
400 		return rb_entry(n, struct symbol, rb_node);
401 
402 	return NULL;
403 }
404 
405 static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym)
406 {
407 	struct rb_node **p = &symbols->rb_root.rb_node;
408 	struct rb_node *parent = NULL;
409 	struct symbol_name_rb_node *symn, *s;
410 	bool leftmost = true;
411 
412 	symn = container_of(sym, struct symbol_name_rb_node, sym);
413 
414 	while (*p != NULL) {
415 		parent = *p;
416 		s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
417 		if (strcmp(sym->name, s->sym.name) < 0)
418 			p = &(*p)->rb_left;
419 		else {
420 			p = &(*p)->rb_right;
421 			leftmost = false;
422 		}
423 	}
424 	rb_link_node(&symn->rb_node, parent, p);
425 	rb_insert_color_cached(&symn->rb_node, symbols, leftmost);
426 }
427 
428 static void symbols__sort_by_name(struct rb_root_cached *symbols,
429 				  struct rb_root_cached *source)
430 {
431 	struct rb_node *nd;
432 
433 	for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
434 		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
435 		symbols__insert_by_name(symbols, pos);
436 	}
437 }
438 
439 int symbol__match_symbol_name(const char *name, const char *str,
440 			      enum symbol_tag_include includes)
441 {
442 	const char *versioning;
443 
444 	if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY &&
445 	    (versioning = strstr(name, "@@"))) {
446 		int len = strlen(str);
447 
448 		if (len < versioning - name)
449 			len = versioning - name;
450 
451 		return arch__compare_symbol_names_n(name, str, len);
452 	} else
453 		return arch__compare_symbol_names(name, str);
454 }
455 
456 static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols,
457 					    const char *name,
458 					    enum symbol_tag_include includes)
459 {
460 	struct rb_node *n;
461 	struct symbol_name_rb_node *s = NULL;
462 
463 	if (symbols == NULL)
464 		return NULL;
465 
466 	n = symbols->rb_root.rb_node;
467 
468 	while (n) {
469 		int cmp;
470 
471 		s = rb_entry(n, struct symbol_name_rb_node, rb_node);
472 		cmp = symbol__match_symbol_name(s->sym.name, name, includes);
473 
474 		if (cmp > 0)
475 			n = n->rb_left;
476 		else if (cmp < 0)
477 			n = n->rb_right;
478 		else
479 			break;
480 	}
481 
482 	if (n == NULL)
483 		return NULL;
484 
485 	if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY)
486 		/* return first symbol that has same name (if any) */
487 		for (n = rb_prev(n); n; n = rb_prev(n)) {
488 			struct symbol_name_rb_node *tmp;
489 
490 			tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
491 			if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
492 				break;
493 
494 			s = tmp;
495 		}
496 
497 	return &s->sym;
498 }
499 
500 void dso__reset_find_symbol_cache(struct dso *dso)
501 {
502 	dso->last_find_result.addr   = 0;
503 	dso->last_find_result.symbol = NULL;
504 }
505 
506 void dso__insert_symbol(struct dso *dso, struct symbol *sym)
507 {
508 	__symbols__insert(&dso->symbols, sym, dso->kernel);
509 
510 	/* update the symbol cache if necessary */
511 	if (dso->last_find_result.addr >= sym->start &&
512 	    (dso->last_find_result.addr < sym->end ||
513 	    sym->start == sym->end)) {
514 		dso->last_find_result.symbol = sym;
515 	}
516 }
517 
518 struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
519 {
520 	if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {
521 		dso->last_find_result.addr   = addr;
522 		dso->last_find_result.symbol = symbols__find(&dso->symbols, addr);
523 	}
524 
525 	return dso->last_find_result.symbol;
526 }
527 
528 struct symbol *dso__first_symbol(struct dso *dso)
529 {
530 	return symbols__first(&dso->symbols);
531 }
532 
533 struct symbol *dso__last_symbol(struct dso *dso)
534 {
535 	return symbols__last(&dso->symbols);
536 }
537 
538 struct symbol *dso__next_symbol(struct symbol *sym)
539 {
540 	return symbols__next(sym);
541 }
542 
543 struct symbol *symbol__next_by_name(struct symbol *sym)
544 {
545 	struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
546 	struct rb_node *n = rb_next(&s->rb_node);
547 
548 	return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
549 }
550 
551  /*
552   * Returns first symbol that matched with @name.
553   */
554 struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name)
555 {
556 	struct symbol *s = symbols__find_by_name(&dso->symbol_names, name,
557 						 SYMBOL_TAG_INCLUDE__NONE);
558 	if (!s)
559 		s = symbols__find_by_name(&dso->symbol_names, name,
560 					  SYMBOL_TAG_INCLUDE__DEFAULT_ONLY);
561 	return s;
562 }
563 
564 void dso__sort_by_name(struct dso *dso)
565 {
566 	dso__set_sorted_by_name(dso);
567 	return symbols__sort_by_name(&dso->symbol_names, &dso->symbols);
568 }
569 
570 int modules__parse(const char *filename, void *arg,
571 		   int (*process_module)(void *arg, const char *name,
572 					 u64 start, u64 size))
573 {
574 	char *line = NULL;
575 	size_t n;
576 	FILE *file;
577 	int err = 0;
578 
579 	file = fopen(filename, "r");
580 	if (file == NULL)
581 		return -1;
582 
583 	while (1) {
584 		char name[PATH_MAX];
585 		u64 start, size;
586 		char *sep, *endptr;
587 		ssize_t line_len;
588 
589 		line_len = getline(&line, &n, file);
590 		if (line_len < 0) {
591 			if (feof(file))
592 				break;
593 			err = -1;
594 			goto out;
595 		}
596 
597 		if (!line) {
598 			err = -1;
599 			goto out;
600 		}
601 
602 		line[--line_len] = '\0'; /* \n */
603 
604 		sep = strrchr(line, 'x');
605 		if (sep == NULL)
606 			continue;
607 
608 		hex2u64(sep + 1, &start);
609 
610 		sep = strchr(line, ' ');
611 		if (sep == NULL)
612 			continue;
613 
614 		*sep = '\0';
615 
616 		scnprintf(name, sizeof(name), "[%s]", line);
617 
618 		size = strtoul(sep + 1, &endptr, 0);
619 		if (*endptr != ' ' && *endptr != '\t')
620 			continue;
621 
622 		err = process_module(arg, name, start, size);
623 		if (err)
624 			break;
625 	}
626 out:
627 	free(line);
628 	fclose(file);
629 	return err;
630 }
631 
632 /*
633  * These are symbols in the kernel image, so make sure that
634  * sym is from a kernel DSO.
635  */
636 static bool symbol__is_idle(const char *name)
637 {
638 	const char * const idle_symbols[] = {
639 		"arch_cpu_idle",
640 		"cpu_idle",
641 		"cpu_startup_entry",
642 		"intel_idle",
643 		"default_idle",
644 		"native_safe_halt",
645 		"enter_idle",
646 		"exit_idle",
647 		"mwait_idle",
648 		"mwait_idle_with_hints",
649 		"poll_idle",
650 		"ppc64_runlatch_off",
651 		"pseries_dedicated_idle_sleep",
652 		NULL
653 	};
654 	int i;
655 
656 	for (i = 0; idle_symbols[i]; i++) {
657 		if (!strcmp(idle_symbols[i], name))
658 			return true;
659 	}
660 
661 	return false;
662 }
663 
664 static int map__process_kallsym_symbol(void *arg, const char *name,
665 				       char type, u64 start)
666 {
667 	struct symbol *sym;
668 	struct dso *dso = arg;
669 	struct rb_root_cached *root = &dso->symbols;
670 
671 	if (!symbol_type__filter(type))
672 		return 0;
673 
674 	/*
675 	 * module symbols are not sorted so we add all
676 	 * symbols, setting length to 0, and rely on
677 	 * symbols__fixup_end() to fix it up.
678 	 */
679 	sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name);
680 	if (sym == NULL)
681 		return -ENOMEM;
682 	/*
683 	 * We will pass the symbols to the filter later, in
684 	 * map__split_kallsyms, when we have split the maps per module
685 	 */
686 	__symbols__insert(root, sym, !strchr(name, '['));
687 
688 	return 0;
689 }
690 
691 /*
692  * Loads the function entries in /proc/kallsyms into kernel_map->dso,
693  * so that we can in the next step set the symbol ->end address and then
694  * call kernel_maps__split_kallsyms.
695  */
696 static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
697 {
698 	return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
699 }
700 
701 static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct dso *dso)
702 {
703 	struct map *curr_map;
704 	struct symbol *pos;
705 	int count = 0;
706 	struct rb_root_cached old_root = dso->symbols;
707 	struct rb_root_cached *root = &dso->symbols;
708 	struct rb_node *next = rb_first_cached(root);
709 
710 	if (!kmaps)
711 		return -1;
712 
713 	*root = RB_ROOT_CACHED;
714 
715 	while (next) {
716 		char *module;
717 
718 		pos = rb_entry(next, struct symbol, rb_node);
719 		next = rb_next(&pos->rb_node);
720 
721 		rb_erase_cached(&pos->rb_node, &old_root);
722 		RB_CLEAR_NODE(&pos->rb_node);
723 		module = strchr(pos->name, '\t');
724 		if (module)
725 			*module = '\0';
726 
727 		curr_map = map_groups__find(kmaps, pos->start);
728 
729 		if (!curr_map) {
730 			symbol__delete(pos);
731 			continue;
732 		}
733 
734 		pos->start -= curr_map->start - curr_map->pgoff;
735 		if (pos->end > curr_map->end)
736 			pos->end = curr_map->end;
737 		if (pos->end)
738 			pos->end -= curr_map->start - curr_map->pgoff;
739 		symbols__insert(&curr_map->dso->symbols, pos);
740 		++count;
741 	}
742 
743 	/* Symbols have been adjusted */
744 	dso->adjust_symbols = 1;
745 
746 	return count;
747 }
748 
749 /*
750  * Split the symbols into maps, making sure there are no overlaps, i.e. the
751  * kernel range is broken in several maps, named [kernel].N, as we don't have
752  * the original ELF section names vmlinux have.
753  */
754 static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, u64 delta,
755 				      struct map *initial_map)
756 {
757 	struct machine *machine;
758 	struct map *curr_map = initial_map;
759 	struct symbol *pos;
760 	int count = 0, moved = 0;
761 	struct rb_root_cached *root = &dso->symbols;
762 	struct rb_node *next = rb_first_cached(root);
763 	int kernel_range = 0;
764 	bool x86_64;
765 
766 	if (!kmaps)
767 		return -1;
768 
769 	machine = kmaps->machine;
770 
771 	x86_64 = machine__is(machine, "x86_64");
772 
773 	while (next) {
774 		char *module;
775 
776 		pos = rb_entry(next, struct symbol, rb_node);
777 		next = rb_next(&pos->rb_node);
778 
779 		module = strchr(pos->name, '\t');
780 		if (module) {
781 			if (!symbol_conf.use_modules)
782 				goto discard_symbol;
783 
784 			*module++ = '\0';
785 
786 			if (strcmp(curr_map->dso->short_name, module)) {
787 				if (curr_map != initial_map &&
788 				    dso->kernel == DSO_TYPE_GUEST_KERNEL &&
789 				    machine__is_default_guest(machine)) {
790 					/*
791 					 * We assume all symbols of a module are
792 					 * continuous in * kallsyms, so curr_map
793 					 * points to a module and all its
794 					 * symbols are in its kmap. Mark it as
795 					 * loaded.
796 					 */
797 					dso__set_loaded(curr_map->dso);
798 				}
799 
800 				curr_map = map_groups__find_by_name(kmaps, module);
801 				if (curr_map == NULL) {
802 					pr_debug("%s/proc/{kallsyms,modules} "
803 					         "inconsistency while looking "
804 						 "for \"%s\" module!\n",
805 						 machine->root_dir, module);
806 					curr_map = initial_map;
807 					goto discard_symbol;
808 				}
809 
810 				if (curr_map->dso->loaded &&
811 				    !machine__is_default_guest(machine))
812 					goto discard_symbol;
813 			}
814 			/*
815 			 * So that we look just like we get from .ko files,
816 			 * i.e. not prelinked, relative to initial_map->start.
817 			 */
818 			pos->start = curr_map->map_ip(curr_map, pos->start);
819 			pos->end   = curr_map->map_ip(curr_map, pos->end);
820 		} else if (x86_64 && is_entry_trampoline(pos->name)) {
821 			/*
822 			 * These symbols are not needed anymore since the
823 			 * trampoline maps refer to the text section and it's
824 			 * symbols instead. Avoid having to deal with
825 			 * relocations, and the assumption that the first symbol
826 			 * is the start of kernel text, by simply removing the
827 			 * symbols at this point.
828 			 */
829 			goto discard_symbol;
830 		} else if (curr_map != initial_map) {
831 			char dso_name[PATH_MAX];
832 			struct dso *ndso;
833 
834 			if (delta) {
835 				/* Kernel was relocated at boot time */
836 				pos->start -= delta;
837 				pos->end -= delta;
838 			}
839 
840 			if (count == 0) {
841 				curr_map = initial_map;
842 				goto add_symbol;
843 			}
844 
845 			if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
846 				snprintf(dso_name, sizeof(dso_name),
847 					"[guest.kernel].%d",
848 					kernel_range++);
849 			else
850 				snprintf(dso_name, sizeof(dso_name),
851 					"[kernel].%d",
852 					kernel_range++);
853 
854 			ndso = dso__new(dso_name);
855 			if (ndso == NULL)
856 				return -1;
857 
858 			ndso->kernel = dso->kernel;
859 
860 			curr_map = map__new2(pos->start, ndso);
861 			if (curr_map == NULL) {
862 				dso__put(ndso);
863 				return -1;
864 			}
865 
866 			curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
867 			map_groups__insert(kmaps, curr_map);
868 			++kernel_range;
869 		} else if (delta) {
870 			/* Kernel was relocated at boot time */
871 			pos->start -= delta;
872 			pos->end -= delta;
873 		}
874 add_symbol:
875 		if (curr_map != initial_map) {
876 			rb_erase_cached(&pos->rb_node, root);
877 			symbols__insert(&curr_map->dso->symbols, pos);
878 			++moved;
879 		} else
880 			++count;
881 
882 		continue;
883 discard_symbol:
884 		rb_erase_cached(&pos->rb_node, root);
885 		symbol__delete(pos);
886 	}
887 
888 	if (curr_map != initial_map &&
889 	    dso->kernel == DSO_TYPE_GUEST_KERNEL &&
890 	    machine__is_default_guest(kmaps->machine)) {
891 		dso__set_loaded(curr_map->dso);
892 	}
893 
894 	return count + moved;
895 }
896 
897 bool symbol__restricted_filename(const char *filename,
898 				 const char *restricted_filename)
899 {
900 	bool restricted = false;
901 
902 	if (symbol_conf.kptr_restrict) {
903 		char *r = realpath(filename, NULL);
904 
905 		if (r != NULL) {
906 			restricted = strcmp(r, restricted_filename) == 0;
907 			free(r);
908 			return restricted;
909 		}
910 	}
911 
912 	return restricted;
913 }
914 
915 struct module_info {
916 	struct rb_node rb_node;
917 	char *name;
918 	u64 start;
919 };
920 
921 static void add_module(struct module_info *mi, struct rb_root *modules)
922 {
923 	struct rb_node **p = &modules->rb_node;
924 	struct rb_node *parent = NULL;
925 	struct module_info *m;
926 
927 	while (*p != NULL) {
928 		parent = *p;
929 		m = rb_entry(parent, struct module_info, rb_node);
930 		if (strcmp(mi->name, m->name) < 0)
931 			p = &(*p)->rb_left;
932 		else
933 			p = &(*p)->rb_right;
934 	}
935 	rb_link_node(&mi->rb_node, parent, p);
936 	rb_insert_color(&mi->rb_node, modules);
937 }
938 
939 static void delete_modules(struct rb_root *modules)
940 {
941 	struct module_info *mi;
942 	struct rb_node *next = rb_first(modules);
943 
944 	while (next) {
945 		mi = rb_entry(next, struct module_info, rb_node);
946 		next = rb_next(&mi->rb_node);
947 		rb_erase(&mi->rb_node, modules);
948 		zfree(&mi->name);
949 		free(mi);
950 	}
951 }
952 
953 static struct module_info *find_module(const char *name,
954 				       struct rb_root *modules)
955 {
956 	struct rb_node *n = modules->rb_node;
957 
958 	while (n) {
959 		struct module_info *m;
960 		int cmp;
961 
962 		m = rb_entry(n, struct module_info, rb_node);
963 		cmp = strcmp(name, m->name);
964 		if (cmp < 0)
965 			n = n->rb_left;
966 		else if (cmp > 0)
967 			n = n->rb_right;
968 		else
969 			return m;
970 	}
971 
972 	return NULL;
973 }
974 
975 static int __read_proc_modules(void *arg, const char *name, u64 start,
976 			       u64 size __maybe_unused)
977 {
978 	struct rb_root *modules = arg;
979 	struct module_info *mi;
980 
981 	mi = zalloc(sizeof(struct module_info));
982 	if (!mi)
983 		return -ENOMEM;
984 
985 	mi->name = strdup(name);
986 	mi->start = start;
987 
988 	if (!mi->name) {
989 		free(mi);
990 		return -ENOMEM;
991 	}
992 
993 	add_module(mi, modules);
994 
995 	return 0;
996 }
997 
998 static int read_proc_modules(const char *filename, struct rb_root *modules)
999 {
1000 	if (symbol__restricted_filename(filename, "/proc/modules"))
1001 		return -1;
1002 
1003 	if (modules__parse(filename, modules, __read_proc_modules)) {
1004 		delete_modules(modules);
1005 		return -1;
1006 	}
1007 
1008 	return 0;
1009 }
1010 
1011 int compare_proc_modules(const char *from, const char *to)
1012 {
1013 	struct rb_root from_modules = RB_ROOT;
1014 	struct rb_root to_modules = RB_ROOT;
1015 	struct rb_node *from_node, *to_node;
1016 	struct module_info *from_m, *to_m;
1017 	int ret = -1;
1018 
1019 	if (read_proc_modules(from, &from_modules))
1020 		return -1;
1021 
1022 	if (read_proc_modules(to, &to_modules))
1023 		goto out_delete_from;
1024 
1025 	from_node = rb_first(&from_modules);
1026 	to_node = rb_first(&to_modules);
1027 	while (from_node) {
1028 		if (!to_node)
1029 			break;
1030 
1031 		from_m = rb_entry(from_node, struct module_info, rb_node);
1032 		to_m = rb_entry(to_node, struct module_info, rb_node);
1033 
1034 		if (from_m->start != to_m->start ||
1035 		    strcmp(from_m->name, to_m->name))
1036 			break;
1037 
1038 		from_node = rb_next(from_node);
1039 		to_node = rb_next(to_node);
1040 	}
1041 
1042 	if (!from_node && !to_node)
1043 		ret = 0;
1044 
1045 	delete_modules(&to_modules);
1046 out_delete_from:
1047 	delete_modules(&from_modules);
1048 
1049 	return ret;
1050 }
1051 
1052 struct map *map_groups__first(struct map_groups *mg)
1053 {
1054 	return maps__first(&mg->maps);
1055 }
1056 
1057 static int do_validate_kcore_modules(const char *filename,
1058 				  struct map_groups *kmaps)
1059 {
1060 	struct rb_root modules = RB_ROOT;
1061 	struct map *old_map;
1062 	int err;
1063 
1064 	err = read_proc_modules(filename, &modules);
1065 	if (err)
1066 		return err;
1067 
1068 	old_map = map_groups__first(kmaps);
1069 	while (old_map) {
1070 		struct map *next = map_groups__next(old_map);
1071 		struct module_info *mi;
1072 
1073 		if (!__map__is_kmodule(old_map)) {
1074 			old_map = next;
1075 			continue;
1076 		}
1077 
1078 		/* Module must be in memory at the same address */
1079 		mi = find_module(old_map->dso->short_name, &modules);
1080 		if (!mi || mi->start != old_map->start) {
1081 			err = -EINVAL;
1082 			goto out;
1083 		}
1084 
1085 		old_map = next;
1086 	}
1087 out:
1088 	delete_modules(&modules);
1089 	return err;
1090 }
1091 
1092 /*
1093  * If kallsyms is referenced by name then we look for filename in the same
1094  * directory.
1095  */
1096 static bool filename_from_kallsyms_filename(char *filename,
1097 					    const char *base_name,
1098 					    const char *kallsyms_filename)
1099 {
1100 	char *name;
1101 
1102 	strcpy(filename, kallsyms_filename);
1103 	name = strrchr(filename, '/');
1104 	if (!name)
1105 		return false;
1106 
1107 	name += 1;
1108 
1109 	if (!strcmp(name, "kallsyms")) {
1110 		strcpy(name, base_name);
1111 		return true;
1112 	}
1113 
1114 	return false;
1115 }
1116 
1117 static int validate_kcore_modules(const char *kallsyms_filename,
1118 				  struct map *map)
1119 {
1120 	struct map_groups *kmaps = map__kmaps(map);
1121 	char modules_filename[PATH_MAX];
1122 
1123 	if (!kmaps)
1124 		return -EINVAL;
1125 
1126 	if (!filename_from_kallsyms_filename(modules_filename, "modules",
1127 					     kallsyms_filename))
1128 		return -EINVAL;
1129 
1130 	if (do_validate_kcore_modules(modules_filename, kmaps))
1131 		return -EINVAL;
1132 
1133 	return 0;
1134 }
1135 
1136 static int validate_kcore_addresses(const char *kallsyms_filename,
1137 				    struct map *map)
1138 {
1139 	struct kmap *kmap = map__kmap(map);
1140 
1141 	if (!kmap)
1142 		return -EINVAL;
1143 
1144 	if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
1145 		u64 start;
1146 
1147 		if (kallsyms__get_function_start(kallsyms_filename,
1148 						 kmap->ref_reloc_sym->name, &start))
1149 			return -ENOENT;
1150 		if (start != kmap->ref_reloc_sym->addr)
1151 			return -EINVAL;
1152 	}
1153 
1154 	return validate_kcore_modules(kallsyms_filename, map);
1155 }
1156 
1157 struct kcore_mapfn_data {
1158 	struct dso *dso;
1159 	struct list_head maps;
1160 };
1161 
1162 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1163 {
1164 	struct kcore_mapfn_data *md = data;
1165 	struct map *map;
1166 
1167 	map = map__new2(start, md->dso);
1168 	if (map == NULL)
1169 		return -ENOMEM;
1170 
1171 	map->end = map->start + len;
1172 	map->pgoff = pgoff;
1173 
1174 	list_add(&map->node, &md->maps);
1175 
1176 	return 0;
1177 }
1178 
1179 /*
1180  * Merges map into map_groups by splitting the new map
1181  * within the existing map regions.
1182  */
1183 int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map)
1184 {
1185 	struct map *old_map;
1186 	LIST_HEAD(merged);
1187 
1188 	for (old_map = map_groups__first(kmaps); old_map;
1189 	     old_map = map_groups__next(old_map)) {
1190 
1191 		/* no overload with this one */
1192 		if (new_map->end < old_map->start ||
1193 		    new_map->start >= old_map->end)
1194 			continue;
1195 
1196 		if (new_map->start < old_map->start) {
1197 			/*
1198 			 * |new......
1199 			 *       |old....
1200 			 */
1201 			if (new_map->end < old_map->end) {
1202 				/*
1203 				 * |new......|     -> |new..|
1204 				 *       |old....| ->       |old....|
1205 				 */
1206 				new_map->end = old_map->start;
1207 			} else {
1208 				/*
1209 				 * |new.............| -> |new..|       |new..|
1210 				 *       |old....|    ->       |old....|
1211 				 */
1212 				struct map *m = map__clone(new_map);
1213 
1214 				if (!m)
1215 					return -ENOMEM;
1216 
1217 				m->end = old_map->start;
1218 				list_add_tail(&m->node, &merged);
1219 				new_map->start = old_map->end;
1220 			}
1221 		} else {
1222 			/*
1223 			 *      |new......
1224 			 * |old....
1225 			 */
1226 			if (new_map->end < old_map->end) {
1227 				/*
1228 				 *      |new..|   -> x
1229 				 * |old.........| -> |old.........|
1230 				 */
1231 				map__put(new_map);
1232 				new_map = NULL;
1233 				break;
1234 			} else {
1235 				/*
1236 				 *      |new......| ->         |new...|
1237 				 * |old....|        -> |old....|
1238 				 */
1239 				new_map->start = old_map->end;
1240 			}
1241 		}
1242 	}
1243 
1244 	while (!list_empty(&merged)) {
1245 		old_map = list_entry(merged.next, struct map, node);
1246 		list_del_init(&old_map->node);
1247 		map_groups__insert(kmaps, old_map);
1248 		map__put(old_map);
1249 	}
1250 
1251 	if (new_map) {
1252 		map_groups__insert(kmaps, new_map);
1253 		map__put(new_map);
1254 	}
1255 	return 0;
1256 }
1257 
1258 static int dso__load_kcore(struct dso *dso, struct map *map,
1259 			   const char *kallsyms_filename)
1260 {
1261 	struct map_groups *kmaps = map__kmaps(map);
1262 	struct kcore_mapfn_data md;
1263 	struct map *old_map, *new_map, *replacement_map = NULL;
1264 	struct machine *machine;
1265 	bool is_64_bit;
1266 	int err, fd;
1267 	char kcore_filename[PATH_MAX];
1268 	u64 stext;
1269 
1270 	if (!kmaps)
1271 		return -EINVAL;
1272 
1273 	machine = kmaps->machine;
1274 
1275 	/* This function requires that the map is the kernel map */
1276 	if (!__map__is_kernel(map))
1277 		return -EINVAL;
1278 
1279 	if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
1280 					     kallsyms_filename))
1281 		return -EINVAL;
1282 
1283 	/* Modules and kernel must be present at their original addresses */
1284 	if (validate_kcore_addresses(kallsyms_filename, map))
1285 		return -EINVAL;
1286 
1287 	md.dso = dso;
1288 	INIT_LIST_HEAD(&md.maps);
1289 
1290 	fd = open(kcore_filename, O_RDONLY);
1291 	if (fd < 0) {
1292 		pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
1293 			 kcore_filename);
1294 		return -EINVAL;
1295 	}
1296 
1297 	/* Read new maps into temporary lists */
1298 	err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md,
1299 			      &is_64_bit);
1300 	if (err)
1301 		goto out_err;
1302 	dso->is_64_bit = is_64_bit;
1303 
1304 	if (list_empty(&md.maps)) {
1305 		err = -EINVAL;
1306 		goto out_err;
1307 	}
1308 
1309 	/* Remove old maps */
1310 	old_map = map_groups__first(kmaps);
1311 	while (old_map) {
1312 		struct map *next = map_groups__next(old_map);
1313 
1314 		/*
1315 		 * We need to preserve eBPF maps even if they are
1316 		 * covered by kcore, because we need to access
1317 		 * eBPF dso for source data.
1318 		 */
1319 		if (old_map != map && !__map__is_bpf_prog(old_map))
1320 			map_groups__remove(kmaps, old_map);
1321 		old_map = next;
1322 	}
1323 	machine->trampolines_mapped = false;
1324 
1325 	/* Find the kernel map using the '_stext' symbol */
1326 	if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
1327 		list_for_each_entry(new_map, &md.maps, node) {
1328 			if (stext >= new_map->start && stext < new_map->end) {
1329 				replacement_map = new_map;
1330 				break;
1331 			}
1332 		}
1333 	}
1334 
1335 	if (!replacement_map)
1336 		replacement_map = list_entry(md.maps.next, struct map, node);
1337 
1338 	/* Add new maps */
1339 	while (!list_empty(&md.maps)) {
1340 		new_map = list_entry(md.maps.next, struct map, node);
1341 		list_del_init(&new_map->node);
1342 		if (new_map == replacement_map) {
1343 			map->start	= new_map->start;
1344 			map->end	= new_map->end;
1345 			map->pgoff	= new_map->pgoff;
1346 			map->map_ip	= new_map->map_ip;
1347 			map->unmap_ip	= new_map->unmap_ip;
1348 			/* Ensure maps are correctly ordered */
1349 			map__get(map);
1350 			map_groups__remove(kmaps, map);
1351 			map_groups__insert(kmaps, map);
1352 			map__put(map);
1353 			map__put(new_map);
1354 		} else {
1355 			/*
1356 			 * Merge kcore map into existing maps,
1357 			 * and ensure that current maps (eBPF)
1358 			 * stay intact.
1359 			 */
1360 			if (map_groups__merge_in(kmaps, new_map))
1361 				goto out_err;
1362 		}
1363 	}
1364 
1365 	if (machine__is(machine, "x86_64")) {
1366 		u64 addr;
1367 
1368 		/*
1369 		 * If one of the corresponding symbols is there, assume the
1370 		 * entry trampoline maps are too.
1371 		 */
1372 		if (!kallsyms__get_function_start(kallsyms_filename,
1373 						  ENTRY_TRAMPOLINE_NAME,
1374 						  &addr))
1375 			machine->trampolines_mapped = true;
1376 	}
1377 
1378 	/*
1379 	 * Set the data type and long name so that kcore can be read via
1380 	 * dso__data_read_addr().
1381 	 */
1382 	if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1383 		dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
1384 	else
1385 		dso->binary_type = DSO_BINARY_TYPE__KCORE;
1386 	dso__set_long_name(dso, strdup(kcore_filename), true);
1387 
1388 	close(fd);
1389 
1390 	if (map->prot & PROT_EXEC)
1391 		pr_debug("Using %s for kernel object code\n", kcore_filename);
1392 	else
1393 		pr_debug("Using %s for kernel data\n", kcore_filename);
1394 
1395 	return 0;
1396 
1397 out_err:
1398 	while (!list_empty(&md.maps)) {
1399 		map = list_entry(md.maps.next, struct map, node);
1400 		list_del_init(&map->node);
1401 		map__put(map);
1402 	}
1403 	close(fd);
1404 	return -EINVAL;
1405 }
1406 
1407 /*
1408  * If the kernel is relocated at boot time, kallsyms won't match.  Compute the
1409  * delta based on the relocation reference symbol.
1410  */
1411 static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta)
1412 {
1413 	u64 addr;
1414 
1415 	if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1416 		return 0;
1417 
1418 	if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr))
1419 		return -1;
1420 
1421 	*delta = addr - kmap->ref_reloc_sym->addr;
1422 	return 0;
1423 }
1424 
1425 int __dso__load_kallsyms(struct dso *dso, const char *filename,
1426 			 struct map *map, bool no_kcore)
1427 {
1428 	struct kmap *kmap = map__kmap(map);
1429 	u64 delta = 0;
1430 
1431 	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1432 		return -1;
1433 
1434 	if (!kmap || !kmap->kmaps)
1435 		return -1;
1436 
1437 	if (dso__load_all_kallsyms(dso, filename) < 0)
1438 		return -1;
1439 
1440 	if (kallsyms__delta(kmap, filename, &delta))
1441 		return -1;
1442 
1443 	symbols__fixup_end(&dso->symbols);
1444 	symbols__fixup_duplicate(&dso->symbols);
1445 
1446 	if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1447 		dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1448 	else
1449 		dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1450 
1451 	if (!no_kcore && !dso__load_kcore(dso, map, filename))
1452 		return map_groups__split_kallsyms_for_kcore(kmap->kmaps, dso);
1453 	else
1454 		return map_groups__split_kallsyms(kmap->kmaps, dso, delta, map);
1455 }
1456 
1457 int dso__load_kallsyms(struct dso *dso, const char *filename,
1458 		       struct map *map)
1459 {
1460 	return __dso__load_kallsyms(dso, filename, map, false);
1461 }
1462 
1463 static int dso__load_perf_map(const char *map_path, struct dso *dso)
1464 {
1465 	char *line = NULL;
1466 	size_t n;
1467 	FILE *file;
1468 	int nr_syms = 0;
1469 
1470 	file = fopen(map_path, "r");
1471 	if (file == NULL)
1472 		goto out_failure;
1473 
1474 	while (!feof(file)) {
1475 		u64 start, size;
1476 		struct symbol *sym;
1477 		int line_len, len;
1478 
1479 		line_len = getline(&line, &n, file);
1480 		if (line_len < 0)
1481 			break;
1482 
1483 		if (!line)
1484 			goto out_failure;
1485 
1486 		line[--line_len] = '\0'; /* \n */
1487 
1488 		len = hex2u64(line, &start);
1489 
1490 		len++;
1491 		if (len + 2 >= line_len)
1492 			continue;
1493 
1494 		len += hex2u64(line + len, &size);
1495 
1496 		len++;
1497 		if (len + 2 >= line_len)
1498 			continue;
1499 
1500 		sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len);
1501 
1502 		if (sym == NULL)
1503 			goto out_delete_line;
1504 
1505 		symbols__insert(&dso->symbols, sym);
1506 		nr_syms++;
1507 	}
1508 
1509 	free(line);
1510 	fclose(file);
1511 
1512 	return nr_syms;
1513 
1514 out_delete_line:
1515 	free(line);
1516 out_failure:
1517 	return -1;
1518 }
1519 
1520 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1521 					   enum dso_binary_type type)
1522 {
1523 	switch (type) {
1524 	case DSO_BINARY_TYPE__JAVA_JIT:
1525 	case DSO_BINARY_TYPE__DEBUGLINK:
1526 	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1527 	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1528 	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1529 	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1530 	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1531 		return !kmod && dso->kernel == DSO_TYPE_USER;
1532 
1533 	case DSO_BINARY_TYPE__KALLSYMS:
1534 	case DSO_BINARY_TYPE__VMLINUX:
1535 	case DSO_BINARY_TYPE__KCORE:
1536 		return dso->kernel == DSO_TYPE_KERNEL;
1537 
1538 	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1539 	case DSO_BINARY_TYPE__GUEST_VMLINUX:
1540 	case DSO_BINARY_TYPE__GUEST_KCORE:
1541 		return dso->kernel == DSO_TYPE_GUEST_KERNEL;
1542 
1543 	case DSO_BINARY_TYPE__GUEST_KMODULE:
1544 	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1545 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1546 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1547 		/*
1548 		 * kernel modules know their symtab type - it's set when
1549 		 * creating a module dso in machine__findnew_module_map().
1550 		 */
1551 		return kmod && dso->symtab_type == type;
1552 
1553 	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1554 	case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
1555 		return true;
1556 
1557 	case DSO_BINARY_TYPE__BPF_PROG_INFO:
1558 	case DSO_BINARY_TYPE__NOT_FOUND:
1559 	default:
1560 		return false;
1561 	}
1562 }
1563 
1564 /* Checks for the existence of the perf-<pid>.map file in two different
1565  * locations.  First, if the process is a separate mount namespace, check in
1566  * that namespace using the pid of the innermost pid namespace.  If's not in a
1567  * namespace, or the file can't be found there, try in the mount namespace of
1568  * the tracing process using our view of its pid.
1569  */
1570 static int dso__find_perf_map(char *filebuf, size_t bufsz,
1571 			      struct nsinfo **nsip)
1572 {
1573 	struct nscookie nsc;
1574 	struct nsinfo *nsi;
1575 	struct nsinfo *nnsi;
1576 	int rc = -1;
1577 
1578 	nsi = *nsip;
1579 
1580 	if (nsi->need_setns) {
1581 		snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsi->nstgid);
1582 		nsinfo__mountns_enter(nsi, &nsc);
1583 		rc = access(filebuf, R_OK);
1584 		nsinfo__mountns_exit(&nsc);
1585 		if (rc == 0)
1586 			return rc;
1587 	}
1588 
1589 	nnsi = nsinfo__copy(nsi);
1590 	if (nnsi) {
1591 		nsinfo__put(nsi);
1592 
1593 		nnsi->need_setns = false;
1594 		snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nnsi->tgid);
1595 		*nsip = nnsi;
1596 		rc = 0;
1597 	}
1598 
1599 	return rc;
1600 }
1601 
1602 int dso__load(struct dso *dso, struct map *map)
1603 {
1604 	char *name;
1605 	int ret = -1;
1606 	u_int i;
1607 	struct machine *machine;
1608 	char *root_dir = (char *) "";
1609 	int ss_pos = 0;
1610 	struct symsrc ss_[2];
1611 	struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1612 	bool kmod;
1613 	bool perfmap;
1614 	unsigned char build_id[BUILD_ID_SIZE];
1615 	struct nscookie nsc;
1616 	char newmapname[PATH_MAX];
1617 	const char *map_path = dso->long_name;
1618 
1619 	perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0;
1620 	if (perfmap) {
1621 		if (dso->nsinfo && (dso__find_perf_map(newmapname,
1622 		    sizeof(newmapname), &dso->nsinfo) == 0)) {
1623 			map_path = newmapname;
1624 		}
1625 	}
1626 
1627 	nsinfo__mountns_enter(dso->nsinfo, &nsc);
1628 	pthread_mutex_lock(&dso->lock);
1629 
1630 	/* check again under the dso->lock */
1631 	if (dso__loaded(dso)) {
1632 		ret = 1;
1633 		goto out;
1634 	}
1635 
1636 	if (map->groups && map->groups->machine)
1637 		machine = map->groups->machine;
1638 	else
1639 		machine = NULL;
1640 
1641 	if (dso->kernel) {
1642 		if (dso->kernel == DSO_TYPE_KERNEL)
1643 			ret = dso__load_kernel_sym(dso, map);
1644 		else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1645 			ret = dso__load_guest_kernel_sym(dso, map);
1646 
1647 		if (machine__is(machine, "x86_64"))
1648 			machine__map_x86_64_entry_trampolines(machine, dso);
1649 		goto out;
1650 	}
1651 
1652 	dso->adjust_symbols = 0;
1653 
1654 	if (perfmap) {
1655 		ret = dso__load_perf_map(map_path, dso);
1656 		dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1657 					     DSO_BINARY_TYPE__NOT_FOUND;
1658 		goto out;
1659 	}
1660 
1661 	if (machine)
1662 		root_dir = machine->root_dir;
1663 
1664 	name = malloc(PATH_MAX);
1665 	if (!name)
1666 		goto out;
1667 
1668 	kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1669 		dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1670 		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1671 		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1672 
1673 
1674 	/*
1675 	 * Read the build id if possible. This is required for
1676 	 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
1677 	 */
1678 	if (!dso->has_build_id &&
1679 	    is_regular_file(dso->long_name)) {
1680 	    __symbol__join_symfs(name, PATH_MAX, dso->long_name);
1681 	    if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0)
1682 		dso__set_build_id(dso, build_id);
1683 	}
1684 
1685 	/*
1686 	 * Iterate over candidate debug images.
1687 	 * Keep track of "interesting" ones (those which have a symtab, dynsym,
1688 	 * and/or opd section) for processing.
1689 	 */
1690 	for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1691 		struct symsrc *ss = &ss_[ss_pos];
1692 		bool next_slot = false;
1693 		bool is_reg;
1694 		bool nsexit;
1695 		int sirc = -1;
1696 
1697 		enum dso_binary_type symtab_type = binary_type_symtab[i];
1698 
1699 		nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE ||
1700 		    symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO);
1701 
1702 		if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
1703 			continue;
1704 
1705 		if (dso__read_binary_type_filename(dso, symtab_type,
1706 						   root_dir, name, PATH_MAX))
1707 			continue;
1708 
1709 		if (nsexit)
1710 			nsinfo__mountns_exit(&nsc);
1711 
1712 		is_reg = is_regular_file(name);
1713 		if (is_reg)
1714 			sirc = symsrc__init(ss, dso, name, symtab_type);
1715 
1716 		if (nsexit)
1717 			nsinfo__mountns_enter(dso->nsinfo, &nsc);
1718 
1719 		if (!is_reg || sirc < 0)
1720 			continue;
1721 
1722 		if (!syms_ss && symsrc__has_symtab(ss)) {
1723 			syms_ss = ss;
1724 			next_slot = true;
1725 			if (!dso->symsrc_filename)
1726 				dso->symsrc_filename = strdup(name);
1727 		}
1728 
1729 		if (!runtime_ss && symsrc__possibly_runtime(ss)) {
1730 			runtime_ss = ss;
1731 			next_slot = true;
1732 		}
1733 
1734 		if (next_slot) {
1735 			ss_pos++;
1736 
1737 			if (syms_ss && runtime_ss)
1738 				break;
1739 		} else {
1740 			symsrc__destroy(ss);
1741 		}
1742 
1743 	}
1744 
1745 	if (!runtime_ss && !syms_ss)
1746 		goto out_free;
1747 
1748 	if (runtime_ss && !syms_ss) {
1749 		syms_ss = runtime_ss;
1750 	}
1751 
1752 	/* We'll have to hope for the best */
1753 	if (!runtime_ss && syms_ss)
1754 		runtime_ss = syms_ss;
1755 
1756 	if (syms_ss)
1757 		ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
1758 	else
1759 		ret = -1;
1760 
1761 	if (ret > 0) {
1762 		int nr_plt;
1763 
1764 		nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss);
1765 		if (nr_plt > 0)
1766 			ret += nr_plt;
1767 	}
1768 
1769 	for (; ss_pos > 0; ss_pos--)
1770 		symsrc__destroy(&ss_[ss_pos - 1]);
1771 out_free:
1772 	free(name);
1773 	if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1774 		ret = 0;
1775 out:
1776 	dso__set_loaded(dso);
1777 	pthread_mutex_unlock(&dso->lock);
1778 	nsinfo__mountns_exit(&nsc);
1779 
1780 	return ret;
1781 }
1782 
1783 struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)
1784 {
1785 	struct maps *maps = &mg->maps;
1786 	struct map *map;
1787 	struct rb_node *node;
1788 
1789 	down_read(&maps->lock);
1790 
1791 	for (node = maps->names.rb_node; node; ) {
1792 		int rc;
1793 
1794 		map = rb_entry(node, struct map, rb_node_name);
1795 
1796 		rc = strcmp(map->dso->short_name, name);
1797 		if (rc < 0)
1798 			node = node->rb_left;
1799 		else if (rc > 0)
1800 			node = node->rb_right;
1801 		else
1802 
1803 			goto out_unlock;
1804 	}
1805 
1806 	map = NULL;
1807 
1808 out_unlock:
1809 	up_read(&maps->lock);
1810 	return map;
1811 }
1812 
1813 int dso__load_vmlinux(struct dso *dso, struct map *map,
1814 		      const char *vmlinux, bool vmlinux_allocated)
1815 {
1816 	int err = -1;
1817 	struct symsrc ss;
1818 	char symfs_vmlinux[PATH_MAX];
1819 	enum dso_binary_type symtab_type;
1820 
1821 	if (vmlinux[0] == '/')
1822 		snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
1823 	else
1824 		symbol__join_symfs(symfs_vmlinux, vmlinux);
1825 
1826 	if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1827 		symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1828 	else
1829 		symtab_type = DSO_BINARY_TYPE__VMLINUX;
1830 
1831 	if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
1832 		return -1;
1833 
1834 	err = dso__load_sym(dso, map, &ss, &ss, 0);
1835 	symsrc__destroy(&ss);
1836 
1837 	if (err > 0) {
1838 		if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1839 			dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1840 		else
1841 			dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
1842 		dso__set_long_name(dso, vmlinux, vmlinux_allocated);
1843 		dso__set_loaded(dso);
1844 		pr_debug("Using %s for symbols\n", symfs_vmlinux);
1845 	}
1846 
1847 	return err;
1848 }
1849 
1850 int dso__load_vmlinux_path(struct dso *dso, struct map *map)
1851 {
1852 	int i, err = 0;
1853 	char *filename = NULL;
1854 
1855 	pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1856 		 vmlinux_path__nr_entries + 1);
1857 
1858 	for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1859 		err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
1860 		if (err > 0)
1861 			goto out;
1862 	}
1863 
1864 	if (!symbol_conf.ignore_vmlinux_buildid)
1865 		filename = dso__build_id_filename(dso, NULL, 0, false);
1866 	if (filename != NULL) {
1867 		err = dso__load_vmlinux(dso, map, filename, true);
1868 		if (err > 0)
1869 			goto out;
1870 		free(filename);
1871 	}
1872 out:
1873 	return err;
1874 }
1875 
1876 static bool visible_dir_filter(const char *name, struct dirent *d)
1877 {
1878 	if (d->d_type != DT_DIR)
1879 		return false;
1880 	return lsdir_no_dot_filter(name, d);
1881 }
1882 
1883 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
1884 {
1885 	char kallsyms_filename[PATH_MAX];
1886 	int ret = -1;
1887 	struct strlist *dirs;
1888 	struct str_node *nd;
1889 
1890 	dirs = lsdir(dir, visible_dir_filter);
1891 	if (!dirs)
1892 		return -1;
1893 
1894 	strlist__for_each_entry(nd, dirs) {
1895 		scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
1896 			  "%s/%s/kallsyms", dir, nd->s);
1897 		if (!validate_kcore_addresses(kallsyms_filename, map)) {
1898 			strlcpy(dir, kallsyms_filename, dir_sz);
1899 			ret = 0;
1900 			break;
1901 		}
1902 	}
1903 
1904 	strlist__delete(dirs);
1905 
1906 	return ret;
1907 }
1908 
1909 /*
1910  * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
1911  * since access(R_OK) only checks with real UID/GID but open() use effective
1912  * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
1913  */
1914 static bool filename__readable(const char *file)
1915 {
1916 	int fd = open(file, O_RDONLY);
1917 	if (fd < 0)
1918 		return false;
1919 	close(fd);
1920 	return true;
1921 }
1922 
1923 static char *dso__find_kallsyms(struct dso *dso, struct map *map)
1924 {
1925 	u8 host_build_id[BUILD_ID_SIZE];
1926 	char sbuild_id[SBUILD_ID_SIZE];
1927 	bool is_host = false;
1928 	char path[PATH_MAX];
1929 
1930 	if (!dso->has_build_id) {
1931 		/*
1932 		 * Last resort, if we don't have a build-id and couldn't find
1933 		 * any vmlinux file, try the running kernel kallsyms table.
1934 		 */
1935 		goto proc_kallsyms;
1936 	}
1937 
1938 	if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
1939 				 sizeof(host_build_id)) == 0)
1940 		is_host = dso__build_id_equal(dso, host_build_id);
1941 
1942 	/* Try a fast path for /proc/kallsyms if possible */
1943 	if (is_host) {
1944 		/*
1945 		 * Do not check the build-id cache, unless we know we cannot use
1946 		 * /proc/kcore or module maps don't match to /proc/kallsyms.
1947 		 * To check readability of /proc/kcore, do not use access(R_OK)
1948 		 * since /proc/kcore requires CAP_SYS_RAWIO to read and access
1949 		 * can't check it.
1950 		 */
1951 		if (filename__readable("/proc/kcore") &&
1952 		    !validate_kcore_addresses("/proc/kallsyms", map))
1953 			goto proc_kallsyms;
1954 	}
1955 
1956 	build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1957 
1958 	/* Find kallsyms in build-id cache with kcore */
1959 	scnprintf(path, sizeof(path), "%s/%s/%s",
1960 		  buildid_dir, DSO__NAME_KCORE, sbuild_id);
1961 
1962 	if (!find_matching_kcore(map, path, sizeof(path)))
1963 		return strdup(path);
1964 
1965 	/* Use current /proc/kallsyms if possible */
1966 	if (is_host) {
1967 proc_kallsyms:
1968 		return strdup("/proc/kallsyms");
1969 	}
1970 
1971 	/* Finally, find a cache of kallsyms */
1972 	if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
1973 		pr_err("No kallsyms or vmlinux with build-id %s was found\n",
1974 		       sbuild_id);
1975 		return NULL;
1976 	}
1977 
1978 	return strdup(path);
1979 }
1980 
1981 static int dso__load_kernel_sym(struct dso *dso, struct map *map)
1982 {
1983 	int err;
1984 	const char *kallsyms_filename = NULL;
1985 	char *kallsyms_allocated_filename = NULL;
1986 	/*
1987 	 * Step 1: if the user specified a kallsyms or vmlinux filename, use
1988 	 * it and only it, reporting errors to the user if it cannot be used.
1989 	 *
1990 	 * For instance, try to analyse an ARM perf.data file _without_ a
1991 	 * build-id, or if the user specifies the wrong path to the right
1992 	 * vmlinux file, obviously we can't fallback to another vmlinux (a
1993 	 * x86_86 one, on the machine where analysis is being performed, say),
1994 	 * or worse, /proc/kallsyms.
1995 	 *
1996 	 * If the specified file _has_ a build-id and there is a build-id
1997 	 * section in the perf.data file, we will still do the expected
1998 	 * validation in dso__load_vmlinux and will bail out if they don't
1999 	 * match.
2000 	 */
2001 	if (symbol_conf.kallsyms_name != NULL) {
2002 		kallsyms_filename = symbol_conf.kallsyms_name;
2003 		goto do_kallsyms;
2004 	}
2005 
2006 	if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
2007 		return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
2008 	}
2009 
2010 	if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
2011 		err = dso__load_vmlinux_path(dso, map);
2012 		if (err > 0)
2013 			return err;
2014 	}
2015 
2016 	/* do not try local files if a symfs was given */
2017 	if (symbol_conf.symfs[0] != 0)
2018 		return -1;
2019 
2020 	kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
2021 	if (!kallsyms_allocated_filename)
2022 		return -1;
2023 
2024 	kallsyms_filename = kallsyms_allocated_filename;
2025 
2026 do_kallsyms:
2027 	err = dso__load_kallsyms(dso, kallsyms_filename, map);
2028 	if (err > 0)
2029 		pr_debug("Using %s for symbols\n", kallsyms_filename);
2030 	free(kallsyms_allocated_filename);
2031 
2032 	if (err > 0 && !dso__is_kcore(dso)) {
2033 		dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
2034 		dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
2035 		map__fixup_start(map);
2036 		map__fixup_end(map);
2037 	}
2038 
2039 	return err;
2040 }
2041 
2042 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
2043 {
2044 	int err;
2045 	const char *kallsyms_filename = NULL;
2046 	struct machine *machine;
2047 	char path[PATH_MAX];
2048 
2049 	if (!map->groups) {
2050 		pr_debug("Guest kernel map hasn't the point to groups\n");
2051 		return -1;
2052 	}
2053 	machine = map->groups->machine;
2054 
2055 	if (machine__is_default_guest(machine)) {
2056 		/*
2057 		 * if the user specified a vmlinux filename, use it and only
2058 		 * it, reporting errors to the user if it cannot be used.
2059 		 * Or use file guest_kallsyms inputted by user on commandline
2060 		 */
2061 		if (symbol_conf.default_guest_vmlinux_name != NULL) {
2062 			err = dso__load_vmlinux(dso, map,
2063 						symbol_conf.default_guest_vmlinux_name,
2064 						false);
2065 			return err;
2066 		}
2067 
2068 		kallsyms_filename = symbol_conf.default_guest_kallsyms;
2069 		if (!kallsyms_filename)
2070 			return -1;
2071 	} else {
2072 		sprintf(path, "%s/proc/kallsyms", machine->root_dir);
2073 		kallsyms_filename = path;
2074 	}
2075 
2076 	err = dso__load_kallsyms(dso, kallsyms_filename, map);
2077 	if (err > 0)
2078 		pr_debug("Using %s for symbols\n", kallsyms_filename);
2079 	if (err > 0 && !dso__is_kcore(dso)) {
2080 		dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
2081 		dso__set_long_name(dso, machine->mmap_name, false);
2082 		map__fixup_start(map);
2083 		map__fixup_end(map);
2084 	}
2085 
2086 	return err;
2087 }
2088 
2089 static void vmlinux_path__exit(void)
2090 {
2091 	while (--vmlinux_path__nr_entries >= 0)
2092 		zfree(&vmlinux_path[vmlinux_path__nr_entries]);
2093 	vmlinux_path__nr_entries = 0;
2094 
2095 	zfree(&vmlinux_path);
2096 }
2097 
2098 static const char * const vmlinux_paths[] = {
2099 	"vmlinux",
2100 	"/boot/vmlinux"
2101 };
2102 
2103 static const char * const vmlinux_paths_upd[] = {
2104 	"/boot/vmlinux-%s",
2105 	"/usr/lib/debug/boot/vmlinux-%s",
2106 	"/lib/modules/%s/build/vmlinux",
2107 	"/usr/lib/debug/lib/modules/%s/vmlinux",
2108 	"/usr/lib/debug/boot/vmlinux-%s.debug"
2109 };
2110 
2111 static int vmlinux_path__add(const char *new_entry)
2112 {
2113 	vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
2114 	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2115 		return -1;
2116 	++vmlinux_path__nr_entries;
2117 
2118 	return 0;
2119 }
2120 
2121 static int vmlinux_path__init(struct perf_env *env)
2122 {
2123 	struct utsname uts;
2124 	char bf[PATH_MAX];
2125 	char *kernel_version;
2126 	unsigned int i;
2127 
2128 	vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
2129 			      ARRAY_SIZE(vmlinux_paths_upd)));
2130 	if (vmlinux_path == NULL)
2131 		return -1;
2132 
2133 	for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
2134 		if (vmlinux_path__add(vmlinux_paths[i]) < 0)
2135 			goto out_fail;
2136 
2137 	/* only try kernel version if no symfs was given */
2138 	if (symbol_conf.symfs[0] != 0)
2139 		return 0;
2140 
2141 	if (env) {
2142 		kernel_version = env->os_release;
2143 	} else {
2144 		if (uname(&uts) < 0)
2145 			goto out_fail;
2146 
2147 		kernel_version = uts.release;
2148 	}
2149 
2150 	for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
2151 		snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
2152 		if (vmlinux_path__add(bf) < 0)
2153 			goto out_fail;
2154 	}
2155 
2156 	return 0;
2157 
2158 out_fail:
2159 	vmlinux_path__exit();
2160 	return -1;
2161 }
2162 
2163 int setup_list(struct strlist **list, const char *list_str,
2164 		      const char *list_name)
2165 {
2166 	if (list_str == NULL)
2167 		return 0;
2168 
2169 	*list = strlist__new(list_str, NULL);
2170 	if (!*list) {
2171 		pr_err("problems parsing %s list\n", list_name);
2172 		return -1;
2173 	}
2174 
2175 	symbol_conf.has_filter = true;
2176 	return 0;
2177 }
2178 
2179 int setup_intlist(struct intlist **list, const char *list_str,
2180 		  const char *list_name)
2181 {
2182 	if (list_str == NULL)
2183 		return 0;
2184 
2185 	*list = intlist__new(list_str);
2186 	if (!*list) {
2187 		pr_err("problems parsing %s list\n", list_name);
2188 		return -1;
2189 	}
2190 	return 0;
2191 }
2192 
2193 static bool symbol__read_kptr_restrict(void)
2194 {
2195 	bool value = false;
2196 	FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
2197 
2198 	if (fp != NULL) {
2199 		char line[8];
2200 
2201 		if (fgets(line, sizeof(line), fp) != NULL)
2202 			value = perf_cap__capable(CAP_SYSLOG) ?
2203 					(atoi(line) >= 2) :
2204 					(atoi(line) != 0);
2205 
2206 		fclose(fp);
2207 	}
2208 
2209 	/* Per kernel/kallsyms.c:
2210 	 * we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG
2211 	 */
2212 	if (perf_event_paranoid() > 1 && !perf_cap__capable(CAP_SYSLOG))
2213 		value = true;
2214 
2215 	return value;
2216 }
2217 
2218 int symbol__annotation_init(void)
2219 {
2220 	if (symbol_conf.init_annotation)
2221 		return 0;
2222 
2223 	if (symbol_conf.initialized) {
2224 		pr_err("Annotation needs to be init before symbol__init()\n");
2225 		return -1;
2226 	}
2227 
2228 	symbol_conf.priv_size += sizeof(struct annotation);
2229 	symbol_conf.init_annotation = true;
2230 	return 0;
2231 }
2232 
2233 int symbol__init(struct perf_env *env)
2234 {
2235 	const char *symfs;
2236 
2237 	if (symbol_conf.initialized)
2238 		return 0;
2239 
2240 	symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
2241 
2242 	symbol__elf_init();
2243 
2244 	if (symbol_conf.sort_by_name)
2245 		symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
2246 					  sizeof(struct symbol));
2247 
2248 	if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
2249 		return -1;
2250 
2251 	if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
2252 		pr_err("'.' is the only non valid --field-separator argument\n");
2253 		return -1;
2254 	}
2255 
2256 	if (setup_list(&symbol_conf.dso_list,
2257 		       symbol_conf.dso_list_str, "dso") < 0)
2258 		return -1;
2259 
2260 	if (setup_list(&symbol_conf.comm_list,
2261 		       symbol_conf.comm_list_str, "comm") < 0)
2262 		goto out_free_dso_list;
2263 
2264 	if (setup_intlist(&symbol_conf.pid_list,
2265 		       symbol_conf.pid_list_str, "pid") < 0)
2266 		goto out_free_comm_list;
2267 
2268 	if (setup_intlist(&symbol_conf.tid_list,
2269 		       symbol_conf.tid_list_str, "tid") < 0)
2270 		goto out_free_pid_list;
2271 
2272 	if (setup_list(&symbol_conf.sym_list,
2273 		       symbol_conf.sym_list_str, "symbol") < 0)
2274 		goto out_free_tid_list;
2275 
2276 	if (setup_list(&symbol_conf.bt_stop_list,
2277 		       symbol_conf.bt_stop_list_str, "symbol") < 0)
2278 		goto out_free_sym_list;
2279 
2280 	/*
2281 	 * A path to symbols of "/" is identical to ""
2282 	 * reset here for simplicity.
2283 	 */
2284 	symfs = realpath(symbol_conf.symfs, NULL);
2285 	if (symfs == NULL)
2286 		symfs = symbol_conf.symfs;
2287 	if (strcmp(symfs, "/") == 0)
2288 		symbol_conf.symfs = "";
2289 	if (symfs != symbol_conf.symfs)
2290 		free((void *)symfs);
2291 
2292 	symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
2293 
2294 	symbol_conf.initialized = true;
2295 	return 0;
2296 
2297 out_free_sym_list:
2298 	strlist__delete(symbol_conf.sym_list);
2299 out_free_tid_list:
2300 	intlist__delete(symbol_conf.tid_list);
2301 out_free_pid_list:
2302 	intlist__delete(symbol_conf.pid_list);
2303 out_free_comm_list:
2304 	strlist__delete(symbol_conf.comm_list);
2305 out_free_dso_list:
2306 	strlist__delete(symbol_conf.dso_list);
2307 	return -1;
2308 }
2309 
2310 void symbol__exit(void)
2311 {
2312 	if (!symbol_conf.initialized)
2313 		return;
2314 	strlist__delete(symbol_conf.bt_stop_list);
2315 	strlist__delete(symbol_conf.sym_list);
2316 	strlist__delete(symbol_conf.dso_list);
2317 	strlist__delete(symbol_conf.comm_list);
2318 	intlist__delete(symbol_conf.tid_list);
2319 	intlist__delete(symbol_conf.pid_list);
2320 	vmlinux_path__exit();
2321 	symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2322 	symbol_conf.bt_stop_list = NULL;
2323 	symbol_conf.initialized = false;
2324 }
2325 
2326 int symbol__config_symfs(const struct option *opt __maybe_unused,
2327 			 const char *dir, int unset __maybe_unused)
2328 {
2329 	char *bf = NULL;
2330 	int ret;
2331 
2332 	symbol_conf.symfs = strdup(dir);
2333 	if (symbol_conf.symfs == NULL)
2334 		return -ENOMEM;
2335 
2336 	/* skip the locally configured cache if a symfs is given, and
2337 	 * config buildid dir to symfs/.debug
2338 	 */
2339 	ret = asprintf(&bf, "%s/%s", dir, ".debug");
2340 	if (ret < 0)
2341 		return -ENOMEM;
2342 
2343 	set_buildid_dir(bf);
2344 
2345 	free(bf);
2346 	return 0;
2347 }
2348 
2349 struct mem_info *mem_info__get(struct mem_info *mi)
2350 {
2351 	if (mi)
2352 		refcount_inc(&mi->refcnt);
2353 	return mi;
2354 }
2355 
2356 void mem_info__put(struct mem_info *mi)
2357 {
2358 	if (mi && refcount_dec_and_test(&mi->refcnt))
2359 		free(mi);
2360 }
2361 
2362 struct mem_info *mem_info__new(void)
2363 {
2364 	struct mem_info *mi = zalloc(sizeof(*mi));
2365 
2366 	if (mi)
2367 		refcount_set(&mi->refcnt, 1);
2368 	return mi;
2369 }
2370 
2371 struct block_info *block_info__get(struct block_info *bi)
2372 {
2373 	if (bi)
2374 		refcount_inc(&bi->refcnt);
2375 	return bi;
2376 }
2377 
2378 void block_info__put(struct block_info *bi)
2379 {
2380 	if (bi && refcount_dec_and_test(&bi->refcnt))
2381 		free(bi);
2382 }
2383 
2384 struct block_info *block_info__new(void)
2385 {
2386 	struct block_info *bi = zalloc(sizeof(*bi));
2387 
2388 	if (bi)
2389 		refcount_set(&bi->refcnt, 1);
2390 	return bi;
2391 }
2392