xref: /linux/tools/perf/util/maps.c (revision c745b15c1f9cea5680c2906ae868302108f8daf0)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <linux/zalloc.h>
5 #include "debug.h"
6 #include "dso.h"
7 #include "map.h"
8 #include "maps.h"
9 #include "thread.h"
10 #include "ui/ui.h"
11 #include "unwind.h"
12 
13 struct map_rb_node {
14 	struct rb_node rb_node;
15 	struct map *map;
16 };
17 
18 #define maps__for_each_entry(maps, map) \
19 	for (map = maps__first(maps); map; map = map_rb_node__next(map))
20 
21 #define maps__for_each_entry_safe(maps, map, next) \
22 	for (map = maps__first(maps), next = map_rb_node__next(map); map; \
23 	     map = next, next = map_rb_node__next(map))
24 
25 static struct rb_root *maps__entries(struct maps *maps)
26 {
27 	return &RC_CHK_ACCESS(maps)->entries;
28 }
29 
30 static struct rw_semaphore *maps__lock(struct maps *maps)
31 {
32 	return &RC_CHK_ACCESS(maps)->lock;
33 }
34 
35 static struct map **maps__maps_by_name(struct maps *maps)
36 {
37 	return RC_CHK_ACCESS(maps)->maps_by_name;
38 }
39 
40 static struct map_rb_node *maps__first(struct maps *maps)
41 {
42 	struct rb_node *first = rb_first(maps__entries(maps));
43 
44 	if (first)
45 		return rb_entry(first, struct map_rb_node, rb_node);
46 	return NULL;
47 }
48 
49 static struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
50 {
51 	struct rb_node *next;
52 
53 	if (!node)
54 		return NULL;
55 
56 	next = rb_next(&node->rb_node);
57 
58 	if (!next)
59 		return NULL;
60 
61 	return rb_entry(next, struct map_rb_node, rb_node);
62 }
63 
64 static struct map_rb_node *maps__find_node(struct maps *maps, struct map *map)
65 {
66 	struct map_rb_node *rb_node;
67 
68 	maps__for_each_entry(maps, rb_node) {
69 		if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map))
70 			return rb_node;
71 	}
72 	return NULL;
73 }
74 
75 static void maps__init(struct maps *maps, struct machine *machine)
76 {
77 	refcount_set(maps__refcnt(maps), 1);
78 	init_rwsem(maps__lock(maps));
79 	RC_CHK_ACCESS(maps)->entries = RB_ROOT;
80 	RC_CHK_ACCESS(maps)->machine = machine;
81 	RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
82 	RC_CHK_ACCESS(maps)->nr_maps = 0;
83 	RC_CHK_ACCESS(maps)->maps_by_name = NULL;
84 }
85 
86 static void __maps__free_maps_by_name(struct maps *maps)
87 {
88 	/*
89 	 * Free everything to try to do it from the rbtree in the next search
90 	 */
91 	for (unsigned int i = 0; i < maps__nr_maps(maps); i++)
92 		map__put(maps__maps_by_name(maps)[i]);
93 
94 	zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
95 	RC_CHK_ACCESS(maps)->nr_maps_allocated = 0;
96 }
97 
98 static int __maps__insert(struct maps *maps, struct map *map)
99 {
100 	struct rb_node **p = &maps__entries(maps)->rb_node;
101 	struct rb_node *parent = NULL;
102 	const u64 ip = map__start(map);
103 	struct map_rb_node *m, *new_rb_node;
104 
105 	new_rb_node = malloc(sizeof(*new_rb_node));
106 	if (!new_rb_node)
107 		return -ENOMEM;
108 
109 	RB_CLEAR_NODE(&new_rb_node->rb_node);
110 	new_rb_node->map = map__get(map);
111 
112 	while (*p != NULL) {
113 		parent = *p;
114 		m = rb_entry(parent, struct map_rb_node, rb_node);
115 		if (ip < map__start(m->map))
116 			p = &(*p)->rb_left;
117 		else
118 			p = &(*p)->rb_right;
119 	}
120 
121 	rb_link_node(&new_rb_node->rb_node, parent, p);
122 	rb_insert_color(&new_rb_node->rb_node, maps__entries(maps));
123 	return 0;
124 }
125 
126 int maps__insert(struct maps *maps, struct map *map)
127 {
128 	int err;
129 	const struct dso *dso = map__dso(map);
130 
131 	down_write(maps__lock(maps));
132 	err = __maps__insert(maps, map);
133 	if (err)
134 		goto out;
135 
136 	++RC_CHK_ACCESS(maps)->nr_maps;
137 
138 	if (dso && dso->kernel) {
139 		struct kmap *kmap = map__kmap(map);
140 
141 		if (kmap)
142 			kmap->kmaps = maps;
143 		else
144 			pr_err("Internal error: kernel dso with non kernel map\n");
145 	}
146 
147 
148 	/*
149 	 * If we already performed some search by name, then we need to add the just
150 	 * inserted map and resort.
151 	 */
152 	if (maps__maps_by_name(maps)) {
153 		if (maps__nr_maps(maps) > RC_CHK_ACCESS(maps)->nr_maps_allocated) {
154 			int nr_allocate = maps__nr_maps(maps) * 2;
155 			struct map **maps_by_name = realloc(maps__maps_by_name(maps),
156 							    nr_allocate * sizeof(map));
157 
158 			if (maps_by_name == NULL) {
159 				__maps__free_maps_by_name(maps);
160 				err = -ENOMEM;
161 				goto out;
162 			}
163 
164 			RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
165 			RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
166 		}
167 		maps__maps_by_name(maps)[maps__nr_maps(maps) - 1] = map__get(map);
168 		__maps__sort_by_name(maps);
169 	}
170  out:
171 	up_write(maps__lock(maps));
172 	return err;
173 }
174 
175 static void __maps__remove(struct maps *maps, struct map_rb_node *rb_node)
176 {
177 	rb_erase_init(&rb_node->rb_node, maps__entries(maps));
178 	map__put(rb_node->map);
179 	free(rb_node);
180 }
181 
182 void maps__remove(struct maps *maps, struct map *map)
183 {
184 	struct map_rb_node *rb_node;
185 
186 	down_write(maps__lock(maps));
187 	if (RC_CHK_ACCESS(maps)->last_search_by_name == map)
188 		RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
189 
190 	rb_node = maps__find_node(maps, map);
191 	assert(rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map));
192 	__maps__remove(maps, rb_node);
193 	if (maps__maps_by_name(maps))
194 		__maps__free_maps_by_name(maps);
195 	--RC_CHK_ACCESS(maps)->nr_maps;
196 	up_write(maps__lock(maps));
197 }
198 
199 static void __maps__purge(struct maps *maps)
200 {
201 	struct map_rb_node *pos, *next;
202 
203 	if (maps__maps_by_name(maps))
204 		__maps__free_maps_by_name(maps);
205 
206 	maps__for_each_entry_safe(maps, pos, next) {
207 		rb_erase_init(&pos->rb_node,  maps__entries(maps));
208 		map__put(pos->map);
209 		free(pos);
210 	}
211 }
212 
213 static void maps__exit(struct maps *maps)
214 {
215 	down_write(maps__lock(maps));
216 	__maps__purge(maps);
217 	up_write(maps__lock(maps));
218 }
219 
220 bool maps__empty(struct maps *maps)
221 {
222 	return !maps__first(maps);
223 }
224 
225 struct maps *maps__new(struct machine *machine)
226 {
227 	struct maps *result;
228 	RC_STRUCT(maps) *maps = zalloc(sizeof(*maps));
229 
230 	if (ADD_RC_CHK(result, maps))
231 		maps__init(result, machine);
232 
233 	return result;
234 }
235 
236 static void maps__delete(struct maps *maps)
237 {
238 	maps__exit(maps);
239 	unwind__finish_access(maps);
240 	RC_CHK_FREE(maps);
241 }
242 
243 struct maps *maps__get(struct maps *maps)
244 {
245 	struct maps *result;
246 
247 	if (RC_CHK_GET(result, maps))
248 		refcount_inc(maps__refcnt(maps));
249 
250 	return result;
251 }
252 
253 void maps__put(struct maps *maps)
254 {
255 	if (maps && refcount_dec_and_test(maps__refcnt(maps)))
256 		maps__delete(maps);
257 	else
258 		RC_CHK_PUT(maps);
259 }
260 
261 int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
262 {
263 	struct map_rb_node *pos;
264 	int ret = 0;
265 
266 	down_read(maps__lock(maps));
267 	maps__for_each_entry(maps, pos)	{
268 		ret = cb(pos->map, data);
269 		if (ret)
270 			break;
271 	}
272 	up_read(maps__lock(maps));
273 	return ret;
274 }
275 
276 void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data)
277 {
278 	struct map_rb_node *pos, *next;
279 	unsigned int start_nr_maps;
280 
281 	down_write(maps__lock(maps));
282 
283 	start_nr_maps = maps__nr_maps(maps);
284 	maps__for_each_entry_safe(maps, pos, next)	{
285 		if (cb(pos->map, data)) {
286 			__maps__remove(maps, pos);
287 			--RC_CHK_ACCESS(maps)->nr_maps;
288 		}
289 	}
290 	if (maps__maps_by_name(maps) && start_nr_maps != maps__nr_maps(maps))
291 		__maps__free_maps_by_name(maps);
292 
293 	up_write(maps__lock(maps));
294 }
295 
296 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
297 {
298 	struct map *map = maps__find(maps, addr);
299 
300 	/* Ensure map is loaded before using map->map_ip */
301 	if (map != NULL && map__load(map) >= 0) {
302 		if (mapp != NULL)
303 			*mapp = map;
304 		return map__find_symbol(map, map__map_ip(map, addr));
305 	}
306 
307 	return NULL;
308 }
309 
310 struct maps__find_symbol_by_name_args {
311 	struct map **mapp;
312 	const char *name;
313 	struct symbol *sym;
314 };
315 
316 static int maps__find_symbol_by_name_cb(struct map *map, void *data)
317 {
318 	struct maps__find_symbol_by_name_args *args = data;
319 
320 	args->sym = map__find_symbol_by_name(map, args->name);
321 	if (!args->sym)
322 		return 0;
323 
324 	if (!map__contains_symbol(map, args->sym)) {
325 		args->sym = NULL;
326 		return 0;
327 	}
328 
329 	if (args->mapp != NULL)
330 		*args->mapp = map__get(map);
331 	return 1;
332 }
333 
334 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
335 {
336 	struct maps__find_symbol_by_name_args args = {
337 		.mapp = mapp,
338 		.name = name,
339 		.sym = NULL,
340 	};
341 
342 	maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args);
343 	return args.sym;
344 }
345 
346 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
347 {
348 	if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
349 		if (maps == NULL)
350 			return -1;
351 		ams->ms.map = maps__find(maps, ams->addr);
352 		if (ams->ms.map == NULL)
353 			return -1;
354 	}
355 
356 	ams->al_addr = map__map_ip(ams->ms.map, ams->addr);
357 	ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
358 
359 	return ams->ms.sym ? 0 : -1;
360 }
361 
362 struct maps__fprintf_args {
363 	FILE *fp;
364 	size_t printed;
365 };
366 
367 static int maps__fprintf_cb(struct map *map, void *data)
368 {
369 	struct maps__fprintf_args *args = data;
370 
371 	args->printed += fprintf(args->fp, "Map:");
372 	args->printed += map__fprintf(map, args->fp);
373 	if (verbose > 2) {
374 		args->printed += dso__fprintf(map__dso(map), args->fp);
375 		args->printed += fprintf(args->fp, "--\n");
376 	}
377 	return 0;
378 }
379 
380 size_t maps__fprintf(struct maps *maps, FILE *fp)
381 {
382 	struct maps__fprintf_args args = {
383 		.fp = fp,
384 		.printed = 0,
385 	};
386 
387 	maps__for_each_map(maps, maps__fprintf_cb, &args);
388 
389 	return args.printed;
390 }
391 
392 /*
393  * Find first map where end > map->start.
394  * Same as find_vma() in kernel.
395  */
396 static struct rb_node *first_ending_after(struct maps *maps, const struct map *map)
397 {
398 	struct rb_root *root;
399 	struct rb_node *next, *first;
400 
401 	root = maps__entries(maps);
402 	next = root->rb_node;
403 	first = NULL;
404 	while (next) {
405 		struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
406 
407 		if (map__end(pos->map) > map__start(map)) {
408 			first = next;
409 			if (map__start(pos->map) <= map__start(map))
410 				break;
411 			next = next->rb_left;
412 		} else
413 			next = next->rb_right;
414 	}
415 	return first;
416 }
417 
418 /*
419  * Adds new to maps, if new overlaps existing entries then the existing maps are
420  * adjusted or removed so that new fits without overlapping any entries.
421  */
422 int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
423 {
424 
425 	struct rb_node *next;
426 	int err = 0;
427 	FILE *fp = debug_file();
428 
429 	down_write(maps__lock(maps));
430 
431 	next = first_ending_after(maps, new);
432 	while (next && !err) {
433 		struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
434 		next = rb_next(&pos->rb_node);
435 
436 		/*
437 		 * Stop if current map starts after map->end.
438 		 * Maps are ordered by start: next will not overlap for sure.
439 		 */
440 		if (map__start(pos->map) >= map__end(new))
441 			break;
442 
443 		if (verbose >= 2) {
444 
445 			if (use_browser) {
446 				pr_debug("overlapping maps in %s (disable tui for more info)\n",
447 					 map__dso(new)->name);
448 			} else {
449 				pr_debug("overlapping maps:\n");
450 				map__fprintf(new, fp);
451 				map__fprintf(pos->map, fp);
452 			}
453 		}
454 
455 		rb_erase_init(&pos->rb_node, maps__entries(maps));
456 		/*
457 		 * Now check if we need to create new maps for areas not
458 		 * overlapped by the new map:
459 		 */
460 		if (map__start(new) > map__start(pos->map)) {
461 			struct map *before = map__clone(pos->map);
462 
463 			if (before == NULL) {
464 				err = -ENOMEM;
465 				goto put_map;
466 			}
467 
468 			map__set_end(before, map__start(new));
469 			err = __maps__insert(maps, before);
470 			if (err) {
471 				map__put(before);
472 				goto put_map;
473 			}
474 
475 			if (verbose >= 2 && !use_browser)
476 				map__fprintf(before, fp);
477 			map__put(before);
478 		}
479 
480 		if (map__end(new) < map__end(pos->map)) {
481 			struct map *after = map__clone(pos->map);
482 
483 			if (after == NULL) {
484 				err = -ENOMEM;
485 				goto put_map;
486 			}
487 
488 			map__set_start(after, map__end(new));
489 			map__add_pgoff(after, map__end(new) - map__start(pos->map));
490 			assert(map__map_ip(pos->map, map__end(new)) ==
491 				map__map_ip(after, map__end(new)));
492 			err = __maps__insert(maps, after);
493 			if (err) {
494 				map__put(after);
495 				goto put_map;
496 			}
497 			if (verbose >= 2 && !use_browser)
498 				map__fprintf(after, fp);
499 			map__put(after);
500 		}
501 put_map:
502 		map__put(pos->map);
503 		free(pos);
504 	}
505 	/* Add the map. */
506 	err = __maps__insert(maps, new);
507 	up_write(maps__lock(maps));
508 	return err;
509 }
510 
511 int maps__copy_from(struct maps *maps, struct maps *parent)
512 {
513 	int err;
514 	struct map_rb_node *rb_node;
515 
516 	down_read(maps__lock(parent));
517 
518 	maps__for_each_entry(parent, rb_node) {
519 		struct map *new = map__clone(rb_node->map);
520 
521 		if (new == NULL) {
522 			err = -ENOMEM;
523 			goto out_unlock;
524 		}
525 
526 		err = unwind__prepare_access(maps, new, NULL);
527 		if (err)
528 			goto out_unlock;
529 
530 		err = maps__insert(maps, new);
531 		if (err)
532 			goto out_unlock;
533 
534 		map__put(new);
535 	}
536 
537 	err = 0;
538 out_unlock:
539 	up_read(maps__lock(parent));
540 	return err;
541 }
542 
543 struct map *maps__find(struct maps *maps, u64 ip)
544 {
545 	struct rb_node *p;
546 	struct map_rb_node *m;
547 
548 
549 	down_read(maps__lock(maps));
550 
551 	p = maps__entries(maps)->rb_node;
552 	while (p != NULL) {
553 		m = rb_entry(p, struct map_rb_node, rb_node);
554 		if (ip < map__start(m->map))
555 			p = p->rb_left;
556 		else if (ip >= map__end(m->map))
557 			p = p->rb_right;
558 		else
559 			goto out;
560 	}
561 
562 	m = NULL;
563 out:
564 	up_read(maps__lock(maps));
565 	return m ? m->map : NULL;
566 }
567 
568 static int map__strcmp(const void *a, const void *b)
569 {
570 	const struct map *map_a = *(const struct map **)a;
571 	const struct map *map_b = *(const struct map **)b;
572 	const struct dso *dso_a = map__dso(map_a);
573 	const struct dso *dso_b = map__dso(map_b);
574 	int ret = strcmp(dso_a->short_name, dso_b->short_name);
575 
576 	if (ret == 0 && map_a != map_b) {
577 		/*
578 		 * Ensure distinct but name equal maps have an order in part to
579 		 * aid reference counting.
580 		 */
581 		ret = (int)map__start(map_a) - (int)map__start(map_b);
582 		if (ret == 0)
583 			ret = (int)((intptr_t)map_a - (intptr_t)map_b);
584 	}
585 
586 	return ret;
587 }
588 
589 static int map__strcmp_name(const void *name, const void *b)
590 {
591 	const struct dso *dso = map__dso(*(const struct map **)b);
592 
593 	return strcmp(name, dso->short_name);
594 }
595 
596 void __maps__sort_by_name(struct maps *maps)
597 {
598 	qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
599 }
600 
601 static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
602 {
603 	struct map_rb_node *rb_node;
604 	struct map **maps_by_name = realloc(maps__maps_by_name(maps),
605 					    maps__nr_maps(maps) * sizeof(struct map *));
606 	int i = 0;
607 
608 	if (maps_by_name == NULL)
609 		return -1;
610 
611 	up_read(maps__lock(maps));
612 	down_write(maps__lock(maps));
613 
614 	RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
615 	RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps);
616 
617 	maps__for_each_entry(maps, rb_node)
618 		maps_by_name[i++] = map__get(rb_node->map);
619 
620 	__maps__sort_by_name(maps);
621 
622 	up_write(maps__lock(maps));
623 	down_read(maps__lock(maps));
624 
625 	return 0;
626 }
627 
628 static struct map *__maps__find_by_name(struct maps *maps, const char *name)
629 {
630 	struct map **mapp;
631 
632 	if (maps__maps_by_name(maps) == NULL &&
633 	    map__groups__sort_by_name_from_rbtree(maps))
634 		return NULL;
635 
636 	mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
637 		       sizeof(*mapp), map__strcmp_name);
638 	if (mapp)
639 		return *mapp;
640 	return NULL;
641 }
642 
643 struct map *maps__find_by_name(struct maps *maps, const char *name)
644 {
645 	struct map_rb_node *rb_node;
646 	struct map *map;
647 
648 	down_read(maps__lock(maps));
649 
650 
651 	if (RC_CHK_ACCESS(maps)->last_search_by_name) {
652 		const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name);
653 
654 		if (strcmp(dso->short_name, name) == 0) {
655 			map = RC_CHK_ACCESS(maps)->last_search_by_name;
656 			goto out_unlock;
657 		}
658 	}
659 	/*
660 	 * If we have maps->maps_by_name, then the name isn't in the rbtree,
661 	 * as maps->maps_by_name mirrors the rbtree when lookups by name are
662 	 * made.
663 	 */
664 	map = __maps__find_by_name(maps, name);
665 	if (map || maps__maps_by_name(maps) != NULL)
666 		goto out_unlock;
667 
668 	/* Fallback to traversing the rbtree... */
669 	maps__for_each_entry(maps, rb_node) {
670 		struct dso *dso;
671 
672 		map = rb_node->map;
673 		dso = map__dso(map);
674 		if (strcmp(dso->short_name, name) == 0) {
675 			RC_CHK_ACCESS(maps)->last_search_by_name = map;
676 			goto out_unlock;
677 		}
678 	}
679 	map = NULL;
680 
681 out_unlock:
682 	up_read(maps__lock(maps));
683 	return map;
684 }
685 
686 struct map *maps__find_next_entry(struct maps *maps, struct map *map)
687 {
688 	struct map_rb_node *rb_node = maps__find_node(maps, map);
689 	struct map_rb_node *next = map_rb_node__next(rb_node);
690 
691 	if (next)
692 		return next->map;
693 
694 	return NULL;
695 }
696 
697 void maps__fixup_end(struct maps *maps)
698 {
699 	struct map_rb_node *prev = NULL, *curr;
700 
701 	down_write(maps__lock(maps));
702 
703 	maps__for_each_entry(maps, curr) {
704 		if (prev && (!map__end(prev->map) || map__end(prev->map) > map__start(curr->map)))
705 			map__set_end(prev->map, map__start(curr->map));
706 
707 		prev = curr;
708 	}
709 
710 	/*
711 	 * We still haven't the actual symbols, so guess the
712 	 * last map final address.
713 	 */
714 	if (curr && !map__end(curr->map))
715 		map__set_end(curr->map, ~0ULL);
716 
717 	up_write(maps__lock(maps));
718 }
719 
720 /*
721  * Merges map into maps by splitting the new map within the existing map
722  * regions.
723  */
724 int maps__merge_in(struct maps *kmaps, struct map *new_map)
725 {
726 	struct map_rb_node *rb_node;
727 	struct rb_node *first;
728 	bool overlaps;
729 	LIST_HEAD(merged);
730 	int err = 0;
731 
732 	down_read(maps__lock(kmaps));
733 	first = first_ending_after(kmaps, new_map);
734 	rb_node = first ? rb_entry(first, struct map_rb_node, rb_node) : NULL;
735 	overlaps = rb_node && map__start(rb_node->map) < map__end(new_map);
736 	up_read(maps__lock(kmaps));
737 
738 	if (!overlaps)
739 		return maps__insert(kmaps, new_map);
740 
741 	maps__for_each_entry(kmaps, rb_node) {
742 		struct map *old_map = rb_node->map;
743 
744 		/* no overload with this one */
745 		if (map__end(new_map) < map__start(old_map) ||
746 		    map__start(new_map) >= map__end(old_map))
747 			continue;
748 
749 		if (map__start(new_map) < map__start(old_map)) {
750 			/*
751 			 * |new......
752 			 *       |old....
753 			 */
754 			if (map__end(new_map) < map__end(old_map)) {
755 				/*
756 				 * |new......|     -> |new..|
757 				 *       |old....| ->       |old....|
758 				 */
759 				map__set_end(new_map, map__start(old_map));
760 			} else {
761 				/*
762 				 * |new.............| -> |new..|       |new..|
763 				 *       |old....|    ->       |old....|
764 				 */
765 				struct map_list_node *m = map_list_node__new();
766 
767 				if (!m) {
768 					err = -ENOMEM;
769 					goto out;
770 				}
771 
772 				m->map = map__clone(new_map);
773 				if (!m->map) {
774 					free(m);
775 					err = -ENOMEM;
776 					goto out;
777 				}
778 
779 				map__set_end(m->map, map__start(old_map));
780 				list_add_tail(&m->node, &merged);
781 				map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
782 				map__set_start(new_map, map__end(old_map));
783 			}
784 		} else {
785 			/*
786 			 *      |new......
787 			 * |old....
788 			 */
789 			if (map__end(new_map) < map__end(old_map)) {
790 				/*
791 				 *      |new..|   -> x
792 				 * |old.........| -> |old.........|
793 				 */
794 				map__put(new_map);
795 				new_map = NULL;
796 				break;
797 			} else {
798 				/*
799 				 *      |new......| ->         |new...|
800 				 * |old....|        -> |old....|
801 				 */
802 				map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
803 				map__set_start(new_map, map__end(old_map));
804 			}
805 		}
806 	}
807 
808 out:
809 	while (!list_empty(&merged)) {
810 		struct map_list_node *old_node;
811 
812 		old_node = list_entry(merged.next, struct map_list_node, node);
813 		list_del_init(&old_node->node);
814 		if (!err)
815 			err = maps__insert(kmaps, old_node->map);
816 		map__put(old_node->map);
817 		free(old_node);
818 	}
819 
820 	if (new_map) {
821 		if (!err)
822 			err = maps__insert(kmaps, new_map);
823 		map__put(new_map);
824 	}
825 	return err;
826 }
827 
828 void maps__load_first(struct maps *maps)
829 {
830 	struct map_rb_node *first;
831 
832 	down_read(maps__lock(maps));
833 
834 	first = maps__first(maps);
835 	if (first)
836 		map__load(first->map);
837 
838 	up_read(maps__lock(maps));
839 }
840