xref: /linux/tools/perf/util/map.c (revision b31e3e3316a78e4a2cf23be8e0d47e5e5a025bde)
1 #include "symbol.h"
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <limits.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <stdio.h>
8 #include <unistd.h>
9 #include "map.h"
10 #include "thread.h"
11 #include "strlist.h"
12 #include "vdso.h"
13 #include "build-id.h"
14 #include "util.h"
15 #include "debug.h"
16 #include "machine.h"
17 #include <linux/string.h>
18 #include "unwind.h"
19 
20 static void __maps__insert(struct maps *maps, struct map *map);
21 
22 const char *map_type__name[MAP__NR_TYPES] = {
23 	[MAP__FUNCTION] = "Functions",
24 	[MAP__VARIABLE] = "Variables",
25 };
26 
27 static inline int is_anon_memory(const char *filename)
28 {
29 	return !strcmp(filename, "//anon") ||
30 	       !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) ||
31 	       !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1);
32 }
33 
34 static inline int is_no_dso_memory(const char *filename)
35 {
36 	return !strncmp(filename, "[stack", 6) ||
37 	       !strncmp(filename, "/SYSV",5)   ||
38 	       !strcmp(filename, "[heap]");
39 }
40 
41 static inline int is_android_lib(const char *filename)
42 {
43 	return !strncmp(filename, "/data/app-lib", 13) ||
44 	       !strncmp(filename, "/system/lib", 11);
45 }
46 
47 static inline bool replace_android_lib(const char *filename, char *newfilename)
48 {
49 	const char *libname;
50 	char *app_abi;
51 	size_t app_abi_length, new_length;
52 	size_t lib_length = 0;
53 
54 	libname  = strrchr(filename, '/');
55 	if (libname)
56 		lib_length = strlen(libname);
57 
58 	app_abi = getenv("APP_ABI");
59 	if (!app_abi)
60 		return false;
61 
62 	app_abi_length = strlen(app_abi);
63 
64 	if (!strncmp(filename, "/data/app-lib", 13)) {
65 		char *apk_path;
66 
67 		if (!app_abi_length)
68 			return false;
69 
70 		new_length = 7 + app_abi_length + lib_length;
71 
72 		apk_path = getenv("APK_PATH");
73 		if (apk_path) {
74 			new_length += strlen(apk_path) + 1;
75 			if (new_length > PATH_MAX)
76 				return false;
77 			snprintf(newfilename, new_length,
78 				 "%s/libs/%s/%s", apk_path, app_abi, libname);
79 		} else {
80 			if (new_length > PATH_MAX)
81 				return false;
82 			snprintf(newfilename, new_length,
83 				 "libs/%s/%s", app_abi, libname);
84 		}
85 		return true;
86 	}
87 
88 	if (!strncmp(filename, "/system/lib/", 11)) {
89 		char *ndk, *app;
90 		const char *arch;
91 		size_t ndk_length;
92 		size_t app_length;
93 
94 		ndk = getenv("NDK_ROOT");
95 		app = getenv("APP_PLATFORM");
96 
97 		if (!(ndk && app))
98 			return false;
99 
100 		ndk_length = strlen(ndk);
101 		app_length = strlen(app);
102 
103 		if (!(ndk_length && app_length && app_abi_length))
104 			return false;
105 
106 		arch = !strncmp(app_abi, "arm", 3) ? "arm" :
107 		       !strncmp(app_abi, "mips", 4) ? "mips" :
108 		       !strncmp(app_abi, "x86", 3) ? "x86" : NULL;
109 
110 		if (!arch)
111 			return false;
112 
113 		new_length = 27 + ndk_length +
114 			     app_length + lib_length
115 			   + strlen(arch);
116 
117 		if (new_length > PATH_MAX)
118 			return false;
119 		snprintf(newfilename, new_length,
120 			"%s/platforms/%s/arch-%s/usr/lib/%s",
121 			ndk, app, arch, libname);
122 
123 		return true;
124 	}
125 	return false;
126 }
127 
128 void map__init(struct map *map, enum map_type type,
129 	       u64 start, u64 end, u64 pgoff, struct dso *dso)
130 {
131 	map->type     = type;
132 	map->start    = start;
133 	map->end      = end;
134 	map->pgoff    = pgoff;
135 	map->reloc    = 0;
136 	map->dso      = dso__get(dso);
137 	map->map_ip   = map__map_ip;
138 	map->unmap_ip = map__unmap_ip;
139 	RB_CLEAR_NODE(&map->rb_node);
140 	map->groups   = NULL;
141 	map->erange_warned = false;
142 	atomic_set(&map->refcnt, 1);
143 }
144 
145 struct map *map__new(struct machine *machine, u64 start, u64 len,
146 		     u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino,
147 		     u64 ino_gen, u32 prot, u32 flags, char *filename,
148 		     enum map_type type, struct thread *thread)
149 {
150 	struct map *map = malloc(sizeof(*map));
151 
152 	if (map != NULL) {
153 		char newfilename[PATH_MAX];
154 		struct dso *dso;
155 		int anon, no_dso, vdso, android;
156 
157 		android = is_android_lib(filename);
158 		anon = is_anon_memory(filename);
159 		vdso = is_vdso_map(filename);
160 		no_dso = is_no_dso_memory(filename);
161 
162 		map->maj = d_maj;
163 		map->min = d_min;
164 		map->ino = ino;
165 		map->ino_generation = ino_gen;
166 		map->prot = prot;
167 		map->flags = flags;
168 
169 		if ((anon || no_dso) && type == MAP__FUNCTION) {
170 			snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
171 			filename = newfilename;
172 		}
173 
174 		if (android) {
175 			if (replace_android_lib(filename, newfilename))
176 				filename = newfilename;
177 		}
178 
179 		if (vdso) {
180 			pgoff = 0;
181 			dso = machine__findnew_vdso(machine, thread);
182 		} else
183 			dso = machine__findnew_dso(machine, filename);
184 
185 		if (dso == NULL)
186 			goto out_delete;
187 
188 		map__init(map, type, start, start + len, pgoff, dso);
189 
190 		if (anon || no_dso) {
191 			map->map_ip = map->unmap_ip = identity__map_ip;
192 
193 			/*
194 			 * Set memory without DSO as loaded. All map__find_*
195 			 * functions still return NULL, and we avoid the
196 			 * unnecessary map__load warning.
197 			 */
198 			if (type != MAP__FUNCTION)
199 				dso__set_loaded(dso, map->type);
200 		}
201 		dso__put(dso);
202 	}
203 	return map;
204 out_delete:
205 	free(map);
206 	return NULL;
207 }
208 
209 /*
210  * Constructor variant for modules (where we know from /proc/modules where
211  * they are loaded) and for vmlinux, where only after we load all the
212  * symbols we'll know where it starts and ends.
213  */
214 struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
215 {
216 	struct map *map = calloc(1, (sizeof(*map) +
217 				     (dso->kernel ? sizeof(struct kmap) : 0)));
218 	if (map != NULL) {
219 		/*
220 		 * ->end will be filled after we load all the symbols
221 		 */
222 		map__init(map, type, start, 0, 0, dso);
223 	}
224 
225 	return map;
226 }
227 
228 /*
229  * Use this and __map__is_kmodule() for map instances that are in
230  * machine->kmaps, and thus have map->groups->machine all properly set, to
231  * disambiguate between the kernel and modules.
232  *
233  * When the need arises, introduce map__is_{kernel,kmodule)() that
234  * checks (map->groups != NULL && map->groups->machine != NULL &&
235  * map->dso->kernel) before calling __map__is_{kernel,kmodule}())
236  */
237 bool __map__is_kernel(const struct map *map)
238 {
239 	return __machine__kernel_map(map->groups->machine, map->type) == map;
240 }
241 
242 static void map__exit(struct map *map)
243 {
244 	BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
245 	dso__zput(map->dso);
246 }
247 
248 void map__delete(struct map *map)
249 {
250 	map__exit(map);
251 	free(map);
252 }
253 
254 void map__put(struct map *map)
255 {
256 	if (map && atomic_dec_and_test(&map->refcnt))
257 		map__delete(map);
258 }
259 
260 void map__fixup_start(struct map *map)
261 {
262 	struct rb_root *symbols = &map->dso->symbols[map->type];
263 	struct rb_node *nd = rb_first(symbols);
264 	if (nd != NULL) {
265 		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
266 		map->start = sym->start;
267 	}
268 }
269 
270 void map__fixup_end(struct map *map)
271 {
272 	struct rb_root *symbols = &map->dso->symbols[map->type];
273 	struct rb_node *nd = rb_last(symbols);
274 	if (nd != NULL) {
275 		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
276 		map->end = sym->end;
277 	}
278 }
279 
280 #define DSO__DELETED "(deleted)"
281 
282 int map__load(struct map *map, symbol_filter_t filter)
283 {
284 	const char *name = map->dso->long_name;
285 	int nr;
286 
287 	if (dso__loaded(map->dso, map->type))
288 		return 0;
289 
290 	nr = dso__load(map->dso, map, filter);
291 	if (nr < 0) {
292 		if (map->dso->has_build_id) {
293 			char sbuild_id[SBUILD_ID_SIZE];
294 
295 			build_id__sprintf(map->dso->build_id,
296 					  sizeof(map->dso->build_id),
297 					  sbuild_id);
298 			pr_warning("%s with build id %s not found",
299 				   name, sbuild_id);
300 		} else
301 			pr_warning("Failed to open %s", name);
302 
303 		pr_warning(", continuing without symbols\n");
304 		return -1;
305 	} else if (nr == 0) {
306 #ifdef HAVE_LIBELF_SUPPORT
307 		const size_t len = strlen(name);
308 		const size_t real_len = len - sizeof(DSO__DELETED);
309 
310 		if (len > sizeof(DSO__DELETED) &&
311 		    strcmp(name + real_len + 1, DSO__DELETED) == 0) {
312 			pr_warning("%.*s was updated (is prelink enabled?). "
313 				"Restart the long running apps that use it!\n",
314 				   (int)real_len, name);
315 		} else {
316 			pr_warning("no symbols found in %s, maybe install "
317 				   "a debug package?\n", name);
318 		}
319 #endif
320 		return -1;
321 	}
322 
323 	return 0;
324 }
325 
326 int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
327 {
328 	return strcmp(namea, nameb);
329 }
330 
331 struct symbol *map__find_symbol(struct map *map, u64 addr,
332 				symbol_filter_t filter)
333 {
334 	if (map__load(map, filter) < 0)
335 		return NULL;
336 
337 	return dso__find_symbol(map->dso, map->type, addr);
338 }
339 
340 struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
341 					symbol_filter_t filter)
342 {
343 	if (map__load(map, filter) < 0)
344 		return NULL;
345 
346 	if (!dso__sorted_by_name(map->dso, map->type))
347 		dso__sort_by_name(map->dso, map->type);
348 
349 	return dso__find_symbol_by_name(map->dso, map->type, name);
350 }
351 
352 struct map *map__clone(struct map *from)
353 {
354 	struct map *map = memdup(from, sizeof(*map));
355 
356 	if (map != NULL) {
357 		atomic_set(&map->refcnt, 1);
358 		RB_CLEAR_NODE(&map->rb_node);
359 		dso__get(map->dso);
360 		map->groups = NULL;
361 	}
362 
363 	return map;
364 }
365 
366 int map__overlap(struct map *l, struct map *r)
367 {
368 	if (l->start > r->start) {
369 		struct map *t = l;
370 		l = r;
371 		r = t;
372 	}
373 
374 	if (l->end > r->start)
375 		return 1;
376 
377 	return 0;
378 }
379 
380 size_t map__fprintf(struct map *map, FILE *fp)
381 {
382 	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
383 		       map->start, map->end, map->pgoff, map->dso->name);
384 }
385 
386 size_t map__fprintf_dsoname(struct map *map, FILE *fp)
387 {
388 	const char *dsoname = "[unknown]";
389 
390 	if (map && map->dso && (map->dso->name || map->dso->long_name)) {
391 		if (symbol_conf.show_kernel_path && map->dso->long_name)
392 			dsoname = map->dso->long_name;
393 		else if (map->dso->name)
394 			dsoname = map->dso->name;
395 	}
396 
397 	return fprintf(fp, "%s", dsoname);
398 }
399 
400 int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
401 			 FILE *fp)
402 {
403 	char *srcline;
404 	int ret = 0;
405 
406 	if (map && map->dso) {
407 		srcline = get_srcline(map->dso,
408 				      map__rip_2objdump(map, addr), NULL, true);
409 		if (srcline != SRCLINE_UNKNOWN)
410 			ret = fprintf(fp, "%s%s", prefix, srcline);
411 		free_srcline(srcline);
412 	}
413 	return ret;
414 }
415 
416 /**
417  * map__rip_2objdump - convert symbol start address to objdump address.
418  * @map: memory map
419  * @rip: symbol start address
420  *
421  * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
422  * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
423  * relative to section start.
424  *
425  * Return: Address suitable for passing to "objdump --start-address="
426  */
427 u64 map__rip_2objdump(struct map *map, u64 rip)
428 {
429 	if (!map->dso->adjust_symbols)
430 		return rip;
431 
432 	if (map->dso->rel)
433 		return rip - map->pgoff;
434 
435 	/*
436 	 * kernel modules also have DSO_TYPE_USER in dso->kernel,
437 	 * but all kernel modules are ET_REL, so won't get here.
438 	 */
439 	if (map->dso->kernel == DSO_TYPE_USER)
440 		return rip + map->dso->text_offset;
441 
442 	return map->unmap_ip(map, rip) - map->reloc;
443 }
444 
445 /**
446  * map__objdump_2mem - convert objdump address to a memory address.
447  * @map: memory map
448  * @ip: objdump address
449  *
450  * Closely related to map__rip_2objdump(), this function takes an address from
451  * objdump and converts it to a memory address.  Note this assumes that @map
452  * contains the address.  To be sure the result is valid, check it forwards
453  * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
454  *
455  * Return: Memory address.
456  */
457 u64 map__objdump_2mem(struct map *map, u64 ip)
458 {
459 	if (!map->dso->adjust_symbols)
460 		return map->unmap_ip(map, ip);
461 
462 	if (map->dso->rel)
463 		return map->unmap_ip(map, ip + map->pgoff);
464 
465 	/*
466 	 * kernel modules also have DSO_TYPE_USER in dso->kernel,
467 	 * but all kernel modules are ET_REL, so won't get here.
468 	 */
469 	if (map->dso->kernel == DSO_TYPE_USER)
470 		return map->unmap_ip(map, ip - map->dso->text_offset);
471 
472 	return ip + map->reloc;
473 }
474 
475 static void maps__init(struct maps *maps)
476 {
477 	maps->entries = RB_ROOT;
478 	pthread_rwlock_init(&maps->lock, NULL);
479 }
480 
481 void map_groups__init(struct map_groups *mg, struct machine *machine)
482 {
483 	int i;
484 	for (i = 0; i < MAP__NR_TYPES; ++i) {
485 		maps__init(&mg->maps[i]);
486 	}
487 	mg->machine = machine;
488 	atomic_set(&mg->refcnt, 1);
489 }
490 
491 static void __maps__purge(struct maps *maps)
492 {
493 	struct rb_root *root = &maps->entries;
494 	struct rb_node *next = rb_first(root);
495 
496 	while (next) {
497 		struct map *pos = rb_entry(next, struct map, rb_node);
498 
499 		next = rb_next(&pos->rb_node);
500 		rb_erase_init(&pos->rb_node, root);
501 		map__put(pos);
502 	}
503 }
504 
505 static void maps__exit(struct maps *maps)
506 {
507 	pthread_rwlock_wrlock(&maps->lock);
508 	__maps__purge(maps);
509 	pthread_rwlock_unlock(&maps->lock);
510 }
511 
512 void map_groups__exit(struct map_groups *mg)
513 {
514 	int i;
515 
516 	for (i = 0; i < MAP__NR_TYPES; ++i)
517 		maps__exit(&mg->maps[i]);
518 }
519 
520 bool map_groups__empty(struct map_groups *mg)
521 {
522 	int i;
523 
524 	for (i = 0; i < MAP__NR_TYPES; ++i) {
525 		if (maps__first(&mg->maps[i]))
526 			return false;
527 	}
528 
529 	return true;
530 }
531 
532 struct map_groups *map_groups__new(struct machine *machine)
533 {
534 	struct map_groups *mg = malloc(sizeof(*mg));
535 
536 	if (mg != NULL)
537 		map_groups__init(mg, machine);
538 
539 	return mg;
540 }
541 
542 void map_groups__delete(struct map_groups *mg)
543 {
544 	map_groups__exit(mg);
545 	free(mg);
546 }
547 
548 void map_groups__put(struct map_groups *mg)
549 {
550 	if (mg && atomic_dec_and_test(&mg->refcnt))
551 		map_groups__delete(mg);
552 }
553 
554 struct symbol *map_groups__find_symbol(struct map_groups *mg,
555 				       enum map_type type, u64 addr,
556 				       struct map **mapp,
557 				       symbol_filter_t filter)
558 {
559 	struct map *map = map_groups__find(mg, type, addr);
560 
561 	/* Ensure map is loaded before using map->map_ip */
562 	if (map != NULL && map__load(map, filter) >= 0) {
563 		if (mapp != NULL)
564 			*mapp = map;
565 		return map__find_symbol(map, map->map_ip(map, addr), filter);
566 	}
567 
568 	return NULL;
569 }
570 
571 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
572 					 struct map **mapp, symbol_filter_t filter)
573 {
574 	struct symbol *sym;
575 	struct rb_node *nd;
576 
577 	pthread_rwlock_rdlock(&maps->lock);
578 
579 	for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
580 		struct map *pos = rb_entry(nd, struct map, rb_node);
581 
582 		sym = map__find_symbol_by_name(pos, name, filter);
583 
584 		if (sym == NULL)
585 			continue;
586 		if (mapp != NULL)
587 			*mapp = pos;
588 		goto out;
589 	}
590 
591 	sym = NULL;
592 out:
593 	pthread_rwlock_unlock(&maps->lock);
594 	return sym;
595 }
596 
597 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
598 					       enum map_type type,
599 					       const char *name,
600 					       struct map **mapp,
601 					       symbol_filter_t filter)
602 {
603 	struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp, filter);
604 
605 	return sym;
606 }
607 
608 int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
609 {
610 	if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
611 		if (ams->map->groups == NULL)
612 			return -1;
613 		ams->map = map_groups__find(ams->map->groups, ams->map->type,
614 					    ams->addr);
615 		if (ams->map == NULL)
616 			return -1;
617 	}
618 
619 	ams->al_addr = ams->map->map_ip(ams->map, ams->addr);
620 	ams->sym = map__find_symbol(ams->map, ams->al_addr, filter);
621 
622 	return ams->sym ? 0 : -1;
623 }
624 
625 static size_t maps__fprintf(struct maps *maps, FILE *fp)
626 {
627 	size_t printed = 0;
628 	struct rb_node *nd;
629 
630 	pthread_rwlock_rdlock(&maps->lock);
631 
632 	for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
633 		struct map *pos = rb_entry(nd, struct map, rb_node);
634 		printed += fprintf(fp, "Map:");
635 		printed += map__fprintf(pos, fp);
636 		if (verbose > 2) {
637 			printed += dso__fprintf(pos->dso, pos->type, fp);
638 			printed += fprintf(fp, "--\n");
639 		}
640 	}
641 
642 	pthread_rwlock_unlock(&maps->lock);
643 
644 	return printed;
645 }
646 
647 size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
648 				  FILE *fp)
649 {
650 	size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
651 	return printed += maps__fprintf(&mg->maps[type], fp);
652 }
653 
654 size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
655 {
656 	size_t printed = 0, i;
657 	for (i = 0; i < MAP__NR_TYPES; ++i)
658 		printed += __map_groups__fprintf_maps(mg, i, fp);
659 	return printed;
660 }
661 
662 static void __map_groups__insert(struct map_groups *mg, struct map *map)
663 {
664 	__maps__insert(&mg->maps[map->type], map);
665 	map->groups = mg;
666 }
667 
668 static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
669 {
670 	struct rb_root *root;
671 	struct rb_node *next;
672 	int err = 0;
673 
674 	pthread_rwlock_wrlock(&maps->lock);
675 
676 	root = &maps->entries;
677 	next = rb_first(root);
678 
679 	while (next) {
680 		struct map *pos = rb_entry(next, struct map, rb_node);
681 		next = rb_next(&pos->rb_node);
682 
683 		if (!map__overlap(pos, map))
684 			continue;
685 
686 		if (verbose >= 2) {
687 			fputs("overlapping maps:\n", fp);
688 			map__fprintf(map, fp);
689 			map__fprintf(pos, fp);
690 		}
691 
692 		rb_erase_init(&pos->rb_node, root);
693 		/*
694 		 * Now check if we need to create new maps for areas not
695 		 * overlapped by the new map:
696 		 */
697 		if (map->start > pos->start) {
698 			struct map *before = map__clone(pos);
699 
700 			if (before == NULL) {
701 				err = -ENOMEM;
702 				goto put_map;
703 			}
704 
705 			before->end = map->start;
706 			__map_groups__insert(pos->groups, before);
707 			if (verbose >= 2)
708 				map__fprintf(before, fp);
709 			map__put(before);
710 		}
711 
712 		if (map->end < pos->end) {
713 			struct map *after = map__clone(pos);
714 
715 			if (after == NULL) {
716 				err = -ENOMEM;
717 				goto put_map;
718 			}
719 
720 			after->start = map->end;
721 			__map_groups__insert(pos->groups, after);
722 			if (verbose >= 2)
723 				map__fprintf(after, fp);
724 			map__put(after);
725 		}
726 put_map:
727 		map__put(pos);
728 
729 		if (err)
730 			goto out;
731 	}
732 
733 	err = 0;
734 out:
735 	pthread_rwlock_unlock(&maps->lock);
736 	return err;
737 }
738 
739 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
740 				   FILE *fp)
741 {
742 	return maps__fixup_overlappings(&mg->maps[map->type], map, fp);
743 }
744 
745 /*
746  * XXX This should not really _copy_ te maps, but refcount them.
747  */
748 int map_groups__clone(struct thread *thread,
749 		      struct map_groups *parent, enum map_type type)
750 {
751 	struct map_groups *mg = thread->mg;
752 	int err = -ENOMEM;
753 	struct map *map;
754 	struct maps *maps = &parent->maps[type];
755 
756 	pthread_rwlock_rdlock(&maps->lock);
757 
758 	for (map = maps__first(maps); map; map = map__next(map)) {
759 		struct map *new = map__clone(map);
760 		if (new == NULL)
761 			goto out_unlock;
762 
763 		err = unwind__prepare_access(thread, new, NULL);
764 		if (err)
765 			goto out_unlock;
766 
767 		map_groups__insert(mg, new);
768 		map__put(new);
769 	}
770 
771 	err = 0;
772 out_unlock:
773 	pthread_rwlock_unlock(&maps->lock);
774 	return err;
775 }
776 
777 static void __maps__insert(struct maps *maps, struct map *map)
778 {
779 	struct rb_node **p = &maps->entries.rb_node;
780 	struct rb_node *parent = NULL;
781 	const u64 ip = map->start;
782 	struct map *m;
783 
784 	while (*p != NULL) {
785 		parent = *p;
786 		m = rb_entry(parent, struct map, rb_node);
787 		if (ip < m->start)
788 			p = &(*p)->rb_left;
789 		else
790 			p = &(*p)->rb_right;
791 	}
792 
793 	rb_link_node(&map->rb_node, parent, p);
794 	rb_insert_color(&map->rb_node, &maps->entries);
795 	map__get(map);
796 }
797 
798 void maps__insert(struct maps *maps, struct map *map)
799 {
800 	pthread_rwlock_wrlock(&maps->lock);
801 	__maps__insert(maps, map);
802 	pthread_rwlock_unlock(&maps->lock);
803 }
804 
805 static void __maps__remove(struct maps *maps, struct map *map)
806 {
807 	rb_erase_init(&map->rb_node, &maps->entries);
808 	map__put(map);
809 }
810 
811 void maps__remove(struct maps *maps, struct map *map)
812 {
813 	pthread_rwlock_wrlock(&maps->lock);
814 	__maps__remove(maps, map);
815 	pthread_rwlock_unlock(&maps->lock);
816 }
817 
818 struct map *maps__find(struct maps *maps, u64 ip)
819 {
820 	struct rb_node **p, *parent = NULL;
821 	struct map *m;
822 
823 	pthread_rwlock_rdlock(&maps->lock);
824 
825 	p = &maps->entries.rb_node;
826 	while (*p != NULL) {
827 		parent = *p;
828 		m = rb_entry(parent, struct map, rb_node);
829 		if (ip < m->start)
830 			p = &(*p)->rb_left;
831 		else if (ip >= m->end)
832 			p = &(*p)->rb_right;
833 		else
834 			goto out;
835 	}
836 
837 	m = NULL;
838 out:
839 	pthread_rwlock_unlock(&maps->lock);
840 	return m;
841 }
842 
843 struct map *maps__first(struct maps *maps)
844 {
845 	struct rb_node *first = rb_first(&maps->entries);
846 
847 	if (first)
848 		return rb_entry(first, struct map, rb_node);
849 	return NULL;
850 }
851 
852 struct map *map__next(struct map *map)
853 {
854 	struct rb_node *next = rb_next(&map->rb_node);
855 
856 	if (next)
857 		return rb_entry(next, struct map, rb_node);
858 	return NULL;
859 }
860 
861 struct kmap *map__kmap(struct map *map)
862 {
863 	if (!map->dso || !map->dso->kernel) {
864 		pr_err("Internal error: map__kmap with a non-kernel map\n");
865 		return NULL;
866 	}
867 	return (struct kmap *)(map + 1);
868 }
869 
870 struct map_groups *map__kmaps(struct map *map)
871 {
872 	struct kmap *kmap = map__kmap(map);
873 
874 	if (!kmap || !kmap->kmaps) {
875 		pr_err("Internal error: map__kmaps with a non-kernel map\n");
876 		return NULL;
877 	}
878 	return kmap->kmaps;
879 }
880