1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <stdlib.h> 4 #include <linux/zalloc.h> 5 #include "debug.h" 6 #include "dso.h" 7 #include "map.h" 8 #include "maps.h" 9 #include "thread.h" 10 #include "ui/ui.h" 11 #include "unwind.h" 12 13 static void maps__init(struct maps *maps, struct machine *machine) 14 { 15 maps->entries = RB_ROOT; 16 init_rwsem(maps__lock(maps)); 17 maps->machine = machine; 18 maps->last_search_by_name = NULL; 19 maps->nr_maps = 0; 20 maps->maps_by_name = NULL; 21 refcount_set(&maps->refcnt, 1); 22 } 23 24 static void __maps__free_maps_by_name(struct maps *maps) 25 { 26 /* 27 * Free everything to try to do it from the rbtree in the next search 28 */ 29 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) 30 map__put(maps__maps_by_name(maps)[i]); 31 32 zfree(&maps->maps_by_name); 33 maps->nr_maps_allocated = 0; 34 } 35 36 static int __maps__insert(struct maps *maps, struct map *map) 37 { 38 struct rb_node **p = &maps__entries(maps)->rb_node; 39 struct rb_node *parent = NULL; 40 const u64 ip = map__start(map); 41 struct map_rb_node *m, *new_rb_node; 42 43 new_rb_node = malloc(sizeof(*new_rb_node)); 44 if (!new_rb_node) 45 return -ENOMEM; 46 47 RB_CLEAR_NODE(&new_rb_node->rb_node); 48 new_rb_node->map = map__get(map); 49 50 while (*p != NULL) { 51 parent = *p; 52 m = rb_entry(parent, struct map_rb_node, rb_node); 53 if (ip < map__start(m->map)) 54 p = &(*p)->rb_left; 55 else 56 p = &(*p)->rb_right; 57 } 58 59 rb_link_node(&new_rb_node->rb_node, parent, p); 60 rb_insert_color(&new_rb_node->rb_node, maps__entries(maps)); 61 return 0; 62 } 63 64 int maps__insert(struct maps *maps, struct map *map) 65 { 66 int err; 67 const struct dso *dso = map__dso(map); 68 69 down_write(maps__lock(maps)); 70 err = __maps__insert(maps, map); 71 if (err) 72 goto out; 73 74 ++maps->nr_maps; 75 76 if (dso && dso->kernel) { 77 struct kmap *kmap = map__kmap(map); 78 79 if (kmap) 80 kmap->kmaps = maps; 81 else 82 pr_err("Internal error: kernel dso with non kernel map\n"); 83 } 84 85 86 /* 87 * If we already performed some search by name, then we need to add the just 88 * inserted map and resort. 89 */ 90 if (maps__maps_by_name(maps)) { 91 if (maps__nr_maps(maps) > maps->nr_maps_allocated) { 92 int nr_allocate = maps__nr_maps(maps) * 2; 93 struct map **maps_by_name = realloc(maps__maps_by_name(maps), 94 nr_allocate * sizeof(map)); 95 96 if (maps_by_name == NULL) { 97 __maps__free_maps_by_name(maps); 98 err = -ENOMEM; 99 goto out; 100 } 101 102 maps->maps_by_name = maps_by_name; 103 maps->nr_maps_allocated = nr_allocate; 104 } 105 maps__maps_by_name(maps)[maps__nr_maps(maps) - 1] = map__get(map); 106 __maps__sort_by_name(maps); 107 } 108 out: 109 up_write(maps__lock(maps)); 110 return err; 111 } 112 113 static void __maps__remove(struct maps *maps, struct map_rb_node *rb_node) 114 { 115 rb_erase_init(&rb_node->rb_node, maps__entries(maps)); 116 map__put(rb_node->map); 117 free(rb_node); 118 } 119 120 void maps__remove(struct maps *maps, struct map *map) 121 { 122 struct map_rb_node *rb_node; 123 124 down_write(maps__lock(maps)); 125 if (maps->last_search_by_name == map) 126 maps->last_search_by_name = NULL; 127 128 rb_node = maps__find_node(maps, map); 129 assert(rb_node->map == map); 130 __maps__remove(maps, rb_node); 131 if (maps__maps_by_name(maps)) 132 __maps__free_maps_by_name(maps); 133 --maps->nr_maps; 134 up_write(maps__lock(maps)); 135 } 136 137 static void __maps__purge(struct maps *maps) 138 { 139 struct map_rb_node *pos, *next; 140 141 if (maps__maps_by_name(maps)) 142 __maps__free_maps_by_name(maps); 143 144 maps__for_each_entry_safe(maps, pos, next) { 145 rb_erase_init(&pos->rb_node, maps__entries(maps)); 146 map__put(pos->map); 147 free(pos); 148 } 149 } 150 151 static void maps__exit(struct maps *maps) 152 { 153 down_write(maps__lock(maps)); 154 __maps__purge(maps); 155 up_write(maps__lock(maps)); 156 } 157 158 bool maps__empty(struct maps *maps) 159 { 160 return !maps__first(maps); 161 } 162 163 struct maps *maps__new(struct machine *machine) 164 { 165 struct maps *maps = zalloc(sizeof(*maps)); 166 167 if (maps != NULL) 168 maps__init(maps, machine); 169 170 return maps; 171 } 172 173 void maps__delete(struct maps *maps) 174 { 175 maps__exit(maps); 176 unwind__finish_access(maps); 177 free(maps); 178 } 179 180 struct maps *maps__get(struct maps *maps) 181 { 182 if (maps) 183 refcount_inc(&maps->refcnt); 184 185 return maps; 186 } 187 188 void maps__put(struct maps *maps) 189 { 190 if (maps && refcount_dec_and_test(&maps->refcnt)) 191 maps__delete(maps); 192 } 193 194 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp) 195 { 196 struct map *map = maps__find(maps, addr); 197 198 /* Ensure map is loaded before using map->map_ip */ 199 if (map != NULL && map__load(map) >= 0) { 200 if (mapp != NULL) 201 *mapp = map; 202 return map__find_symbol(map, map__map_ip(map, addr)); 203 } 204 205 return NULL; 206 } 207 208 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp) 209 { 210 struct symbol *sym; 211 struct map_rb_node *pos; 212 213 down_read(maps__lock(maps)); 214 215 maps__for_each_entry(maps, pos) { 216 sym = map__find_symbol_by_name(pos->map, name); 217 218 if (sym == NULL) 219 continue; 220 if (!map__contains_symbol(pos->map, sym)) { 221 sym = NULL; 222 continue; 223 } 224 if (mapp != NULL) 225 *mapp = pos->map; 226 goto out; 227 } 228 229 sym = NULL; 230 out: 231 up_read(maps__lock(maps)); 232 return sym; 233 } 234 235 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams) 236 { 237 if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) { 238 if (maps == NULL) 239 return -1; 240 ams->ms.map = maps__find(maps, ams->addr); 241 if (ams->ms.map == NULL) 242 return -1; 243 } 244 245 ams->al_addr = map__map_ip(ams->ms.map, ams->addr); 246 ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr); 247 248 return ams->ms.sym ? 0 : -1; 249 } 250 251 size_t maps__fprintf(struct maps *maps, FILE *fp) 252 { 253 size_t printed = 0; 254 struct map_rb_node *pos; 255 256 down_read(maps__lock(maps)); 257 258 maps__for_each_entry(maps, pos) { 259 printed += fprintf(fp, "Map:"); 260 printed += map__fprintf(pos->map, fp); 261 if (verbose > 2) { 262 printed += dso__fprintf(map__dso(pos->map), fp); 263 printed += fprintf(fp, "--\n"); 264 } 265 } 266 267 up_read(maps__lock(maps)); 268 269 return printed; 270 } 271 272 int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) 273 { 274 struct rb_root *root; 275 struct rb_node *next, *first; 276 int err = 0; 277 278 down_write(maps__lock(maps)); 279 280 root = maps__entries(maps); 281 282 /* 283 * Find first map where end > map->start. 284 * Same as find_vma() in kernel. 285 */ 286 next = root->rb_node; 287 first = NULL; 288 while (next) { 289 struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node); 290 291 if (map__end(pos->map) > map__start(map)) { 292 first = next; 293 if (map__start(pos->map) <= map__start(map)) 294 break; 295 next = next->rb_left; 296 } else 297 next = next->rb_right; 298 } 299 300 next = first; 301 while (next && !err) { 302 struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node); 303 next = rb_next(&pos->rb_node); 304 305 /* 306 * Stop if current map starts after map->end. 307 * Maps are ordered by start: next will not overlap for sure. 308 */ 309 if (map__start(pos->map) >= map__end(map)) 310 break; 311 312 if (verbose >= 2) { 313 314 if (use_browser) { 315 pr_debug("overlapping maps in %s (disable tui for more info)\n", 316 map__dso(map)->name); 317 } else { 318 fputs("overlapping maps:\n", fp); 319 map__fprintf(map, fp); 320 map__fprintf(pos->map, fp); 321 } 322 } 323 324 rb_erase_init(&pos->rb_node, root); 325 /* 326 * Now check if we need to create new maps for areas not 327 * overlapped by the new map: 328 */ 329 if (map__start(map) > map__start(pos->map)) { 330 struct map *before = map__clone(pos->map); 331 332 if (before == NULL) { 333 err = -ENOMEM; 334 goto put_map; 335 } 336 337 before->end = map__start(map); 338 err = __maps__insert(maps, before); 339 if (err) { 340 map__put(before); 341 goto put_map; 342 } 343 344 if (verbose >= 2 && !use_browser) 345 map__fprintf(before, fp); 346 map__put(before); 347 } 348 349 if (map__end(map) < map__end(pos->map)) { 350 struct map *after = map__clone(pos->map); 351 352 if (after == NULL) { 353 err = -ENOMEM; 354 goto put_map; 355 } 356 357 after->start = map__end(map); 358 after->pgoff += map__end(map) - map__start(pos->map); 359 assert(map__map_ip(pos->map, map__end(map)) == 360 map__map_ip(after, map__end(map))); 361 err = __maps__insert(maps, after); 362 if (err) { 363 map__put(after); 364 goto put_map; 365 } 366 if (verbose >= 2 && !use_browser) 367 map__fprintf(after, fp); 368 map__put(after); 369 } 370 put_map: 371 map__put(pos->map); 372 } 373 up_write(maps__lock(maps)); 374 return err; 375 } 376 377 /* 378 * XXX This should not really _copy_ te maps, but refcount them. 379 */ 380 int maps__clone(struct thread *thread, struct maps *parent) 381 { 382 struct maps *maps = thread->maps; 383 int err; 384 struct map_rb_node *rb_node; 385 386 down_read(maps__lock(parent)); 387 388 maps__for_each_entry(parent, rb_node) { 389 struct map *new = map__clone(rb_node->map); 390 391 if (new == NULL) { 392 err = -ENOMEM; 393 goto out_unlock; 394 } 395 396 err = unwind__prepare_access(maps, new, NULL); 397 if (err) 398 goto out_unlock; 399 400 err = maps__insert(maps, new); 401 if (err) 402 goto out_unlock; 403 404 map__put(new); 405 } 406 407 err = 0; 408 out_unlock: 409 up_read(maps__lock(parent)); 410 return err; 411 } 412 413 struct map_rb_node *maps__find_node(struct maps *maps, struct map *map) 414 { 415 struct map_rb_node *rb_node; 416 417 maps__for_each_entry(maps, rb_node) { 418 if (rb_node->map == map) 419 return rb_node; 420 } 421 return NULL; 422 } 423 424 struct map *maps__find(struct maps *maps, u64 ip) 425 { 426 struct rb_node *p; 427 struct map_rb_node *m; 428 429 430 down_read(maps__lock(maps)); 431 432 p = maps__entries(maps)->rb_node; 433 while (p != NULL) { 434 m = rb_entry(p, struct map_rb_node, rb_node); 435 if (ip < map__start(m->map)) 436 p = p->rb_left; 437 else if (ip >= map__end(m->map)) 438 p = p->rb_right; 439 else 440 goto out; 441 } 442 443 m = NULL; 444 out: 445 up_read(maps__lock(maps)); 446 return m ? m->map : NULL; 447 } 448 449 struct map_rb_node *maps__first(struct maps *maps) 450 { 451 struct rb_node *first = rb_first(maps__entries(maps)); 452 453 if (first) 454 return rb_entry(first, struct map_rb_node, rb_node); 455 return NULL; 456 } 457 458 struct map_rb_node *map_rb_node__next(struct map_rb_node *node) 459 { 460 struct rb_node *next; 461 462 if (!node) 463 return NULL; 464 465 next = rb_next(&node->rb_node); 466 467 if (!next) 468 return NULL; 469 470 return rb_entry(next, struct map_rb_node, rb_node); 471 } 472