1 /*- 2 * Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra. 3 * Copyright 2003 Alexander Kabaev <kan@FreeBSD.ORG>. 4 * Copyright 2009-2012 Konstantin Belousov <kib@FreeBSD.ORG>. 5 * Copyright 2012 John Marino <draco@marino.st>. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 /* 32 * Dynamic linker for ELF. 33 * 34 * John Polstra <jdp@polstra.com>. 35 */ 36 37 #ifndef __GNUC__ 38 #error "GCC is needed to compile this file" 39 #endif 40 41 #include <sys/param.h> 42 #include <sys/mount.h> 43 #include <sys/mman.h> 44 #include <sys/stat.h> 45 #include <sys/sysctl.h> 46 #include <sys/uio.h> 47 #include <sys/utsname.h> 48 #include <sys/ktrace.h> 49 50 #include <dlfcn.h> 51 #include <err.h> 52 #include <errno.h> 53 #include <fcntl.h> 54 #include <stdarg.h> 55 #include <stdio.h> 56 #include <stdlib.h> 57 #include <string.h> 58 #include <unistd.h> 59 60 #include "debug.h" 61 #include "rtld.h" 62 #include "libmap.h" 63 #include "rtld_tls.h" 64 #include "rtld_printf.h" 65 #include "notes.h" 66 67 #ifndef COMPAT_32BIT 68 #define PATH_RTLD "/libexec/ld-elf.so.1" 69 #else 70 #define PATH_RTLD "/libexec/ld-elf32.so.1" 71 #endif 72 73 /* Types. */ 74 typedef void (*func_ptr_type)(); 75 typedef void * (*path_enum_proc) (const char *path, size_t len, void *arg); 76 77 /* 78 * Function declarations. 79 */ 80 static const char *basename(const char *); 81 static void die(void) __dead2; 82 static void digest_dynamic1(Obj_Entry *, int, const Elf_Dyn **, 83 const Elf_Dyn **, const Elf_Dyn **); 84 static void digest_dynamic2(Obj_Entry *, const Elf_Dyn *, const Elf_Dyn *, 85 const Elf_Dyn *); 86 static void digest_dynamic(Obj_Entry *, int); 87 static Obj_Entry *digest_phdr(const Elf_Phdr *, int, caddr_t, const char *); 88 static Obj_Entry *dlcheck(void *); 89 static Obj_Entry *dlopen_object(const char *name, int fd, Obj_Entry *refobj, 90 int lo_flags, int mode, RtldLockState *lockstate); 91 static Obj_Entry *do_load_object(int, const char *, char *, struct stat *, int); 92 static int do_search_info(const Obj_Entry *obj, int, struct dl_serinfo *); 93 static bool donelist_check(DoneList *, const Obj_Entry *); 94 static void errmsg_restore(char *); 95 static char *errmsg_save(void); 96 static void *fill_search_info(const char *, size_t, void *); 97 static char *find_library(const char *, const Obj_Entry *); 98 static const char *gethints(bool); 99 static void init_dag(Obj_Entry *); 100 static void init_rtld(caddr_t, Elf_Auxinfo **); 101 static void initlist_add_neededs(Needed_Entry *, Objlist *); 102 static void initlist_add_objects(Obj_Entry *, Obj_Entry **, Objlist *); 103 static void linkmap_add(Obj_Entry *); 104 static void linkmap_delete(Obj_Entry *); 105 static void load_filtees(Obj_Entry *, int flags, RtldLockState *); 106 static void unload_filtees(Obj_Entry *); 107 static int load_needed_objects(Obj_Entry *, int); 108 static int load_preload_objects(void); 109 static Obj_Entry *load_object(const char *, int fd, const Obj_Entry *, int); 110 static void map_stacks_exec(RtldLockState *); 111 static Obj_Entry *obj_from_addr(const void *); 112 static void objlist_call_fini(Objlist *, Obj_Entry *, RtldLockState *); 113 static void objlist_call_init(Objlist *, RtldLockState *); 114 static void objlist_clear(Objlist *); 115 static Objlist_Entry *objlist_find(Objlist *, const Obj_Entry *); 116 static void objlist_init(Objlist *); 117 static void objlist_push_head(Objlist *, Obj_Entry *); 118 static void objlist_push_tail(Objlist *, Obj_Entry *); 119 static void objlist_put_after(Objlist *, Obj_Entry *, Obj_Entry *); 120 static void objlist_remove(Objlist *, Obj_Entry *); 121 static void *path_enumerate(const char *, path_enum_proc, void *); 122 static int relocate_object_dag(Obj_Entry *root, bool bind_now, 123 Obj_Entry *rtldobj, int flags, RtldLockState *lockstate); 124 static int relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj, 125 int flags, RtldLockState *lockstate); 126 static int relocate_objects(Obj_Entry *, bool, Obj_Entry *, int, 127 RtldLockState *); 128 static int resolve_objects_ifunc(Obj_Entry *first, bool bind_now, 129 int flags, RtldLockState *lockstate); 130 static int rtld_dirname(const char *, char *); 131 static int rtld_dirname_abs(const char *, char *); 132 static void *rtld_dlopen(const char *name, int fd, int mode); 133 static void rtld_exit(void); 134 static char *search_library_path(const char *, const char *); 135 static const void **get_program_var_addr(const char *, RtldLockState *); 136 static void set_program_var(const char *, const void *); 137 static int symlook_default(SymLook *, const Obj_Entry *refobj); 138 static int symlook_global(SymLook *, DoneList *); 139 static void symlook_init_from_req(SymLook *, const SymLook *); 140 static int symlook_list(SymLook *, const Objlist *, DoneList *); 141 static int symlook_needed(SymLook *, const Needed_Entry *, DoneList *); 142 static int symlook_obj1_sysv(SymLook *, const Obj_Entry *); 143 static int symlook_obj1_gnu(SymLook *, const Obj_Entry *); 144 static void trace_loaded_objects(Obj_Entry *); 145 static void unlink_object(Obj_Entry *); 146 static void unload_object(Obj_Entry *); 147 static void unref_dag(Obj_Entry *); 148 static void ref_dag(Obj_Entry *); 149 static char *origin_subst_one(char *, const char *, const char *, bool); 150 static char *origin_subst(char *, const char *); 151 static void preinit_main(void); 152 static int rtld_verify_versions(const Objlist *); 153 static int rtld_verify_object_versions(Obj_Entry *); 154 static void object_add_name(Obj_Entry *, const char *); 155 static int object_match_name(const Obj_Entry *, const char *); 156 static void ld_utrace_log(int, void *, void *, size_t, int, const char *); 157 static void rtld_fill_dl_phdr_info(const Obj_Entry *obj, 158 struct dl_phdr_info *phdr_info); 159 static uint32_t gnu_hash(const char *); 160 static bool matched_symbol(SymLook *, const Obj_Entry *, Sym_Match_Result *, 161 const unsigned long); 162 163 void r_debug_state(struct r_debug *, struct link_map *) __noinline; 164 165 /* 166 * Data declarations. 167 */ 168 static char *error_message; /* Message for dlerror(), or NULL */ 169 struct r_debug r_debug; /* for GDB; */ 170 static bool libmap_disable; /* Disable libmap */ 171 static bool ld_loadfltr; /* Immediate filters processing */ 172 static char *libmap_override; /* Maps to use in addition to libmap.conf */ 173 static bool trust; /* False for setuid and setgid programs */ 174 static bool dangerous_ld_env; /* True if environment variables have been 175 used to affect the libraries loaded */ 176 static char *ld_bind_now; /* Environment variable for immediate binding */ 177 static char *ld_debug; /* Environment variable for debugging */ 178 static char *ld_library_path; /* Environment variable for search path */ 179 static char *ld_preload; /* Environment variable for libraries to 180 load first */ 181 static char *ld_elf_hints_path; /* Environment variable for alternative hints path */ 182 static char *ld_tracing; /* Called from ldd to print libs */ 183 static char *ld_utrace; /* Use utrace() to log events. */ 184 static Obj_Entry *obj_list; /* Head of linked list of shared objects */ 185 static Obj_Entry **obj_tail; /* Link field of last object in list */ 186 static Obj_Entry *obj_main; /* The main program shared object */ 187 static Obj_Entry obj_rtld; /* The dynamic linker shared object */ 188 static unsigned int obj_count; /* Number of objects in obj_list */ 189 static unsigned int obj_loads; /* Number of objects in obj_list */ 190 191 static Objlist list_global = /* Objects dlopened with RTLD_GLOBAL */ 192 STAILQ_HEAD_INITIALIZER(list_global); 193 static Objlist list_main = /* Objects loaded at program startup */ 194 STAILQ_HEAD_INITIALIZER(list_main); 195 static Objlist list_fini = /* Objects needing fini() calls */ 196 STAILQ_HEAD_INITIALIZER(list_fini); 197 198 Elf_Sym sym_zero; /* For resolving undefined weak refs. */ 199 200 #define GDB_STATE(s,m) r_debug.r_state = s; r_debug_state(&r_debug,m); 201 202 extern Elf_Dyn _DYNAMIC; 203 #pragma weak _DYNAMIC 204 #ifndef RTLD_IS_DYNAMIC 205 #define RTLD_IS_DYNAMIC() (&_DYNAMIC != NULL) 206 #endif 207 208 int osreldate, pagesize; 209 210 long __stack_chk_guard[8] = {0, 0, 0, 0, 0, 0, 0, 0}; 211 212 static int stack_prot = PROT_READ | PROT_WRITE | RTLD_DEFAULT_STACK_EXEC; 213 static int max_stack_flags; 214 215 /* 216 * Global declarations normally provided by crt1. The dynamic linker is 217 * not built with crt1, so we have to provide them ourselves. 218 */ 219 char *__progname; 220 char **environ; 221 222 /* 223 * Used to pass argc, argv to init functions. 224 */ 225 int main_argc; 226 char **main_argv; 227 228 /* 229 * Globals to control TLS allocation. 230 */ 231 size_t tls_last_offset; /* Static TLS offset of last module */ 232 size_t tls_last_size; /* Static TLS size of last module */ 233 size_t tls_static_space; /* Static TLS space allocated */ 234 size_t tls_static_max_align; 235 int tls_dtv_generation = 1; /* Used to detect when dtv size changes */ 236 int tls_max_index = 1; /* Largest module index allocated */ 237 238 bool ld_library_path_rpath = false; 239 240 /* 241 * Fill in a DoneList with an allocation large enough to hold all of 242 * the currently-loaded objects. Keep this as a macro since it calls 243 * alloca and we want that to occur within the scope of the caller. 244 */ 245 #define donelist_init(dlp) \ 246 ((dlp)->objs = alloca(obj_count * sizeof (dlp)->objs[0]), \ 247 assert((dlp)->objs != NULL), \ 248 (dlp)->num_alloc = obj_count, \ 249 (dlp)->num_used = 0) 250 251 #define UTRACE_DLOPEN_START 1 252 #define UTRACE_DLOPEN_STOP 2 253 #define UTRACE_DLCLOSE_START 3 254 #define UTRACE_DLCLOSE_STOP 4 255 #define UTRACE_LOAD_OBJECT 5 256 #define UTRACE_UNLOAD_OBJECT 6 257 #define UTRACE_ADD_RUNDEP 7 258 #define UTRACE_PRELOAD_FINISHED 8 259 #define UTRACE_INIT_CALL 9 260 #define UTRACE_FINI_CALL 10 261 262 struct utrace_rtld { 263 char sig[4]; /* 'RTLD' */ 264 int event; 265 void *handle; 266 void *mapbase; /* Used for 'parent' and 'init/fini' */ 267 size_t mapsize; 268 int refcnt; /* Used for 'mode' */ 269 char name[MAXPATHLEN]; 270 }; 271 272 #define LD_UTRACE(e, h, mb, ms, r, n) do { \ 273 if (ld_utrace != NULL) \ 274 ld_utrace_log(e, h, mb, ms, r, n); \ 275 } while (0) 276 277 static void 278 ld_utrace_log(int event, void *handle, void *mapbase, size_t mapsize, 279 int refcnt, const char *name) 280 { 281 struct utrace_rtld ut; 282 283 ut.sig[0] = 'R'; 284 ut.sig[1] = 'T'; 285 ut.sig[2] = 'L'; 286 ut.sig[3] = 'D'; 287 ut.event = event; 288 ut.handle = handle; 289 ut.mapbase = mapbase; 290 ut.mapsize = mapsize; 291 ut.refcnt = refcnt; 292 bzero(ut.name, sizeof(ut.name)); 293 if (name) 294 strlcpy(ut.name, name, sizeof(ut.name)); 295 utrace(&ut, sizeof(ut)); 296 } 297 298 /* 299 * Main entry point for dynamic linking. The first argument is the 300 * stack pointer. The stack is expected to be laid out as described 301 * in the SVR4 ABI specification, Intel 386 Processor Supplement. 302 * Specifically, the stack pointer points to a word containing 303 * ARGC. Following that in the stack is a null-terminated sequence 304 * of pointers to argument strings. Then comes a null-terminated 305 * sequence of pointers to environment strings. Finally, there is a 306 * sequence of "auxiliary vector" entries. 307 * 308 * The second argument points to a place to store the dynamic linker's 309 * exit procedure pointer and the third to a place to store the main 310 * program's object. 311 * 312 * The return value is the main program's entry point. 313 */ 314 func_ptr_type 315 _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp) 316 { 317 Elf_Auxinfo *aux_info[AT_COUNT]; 318 int i; 319 int argc; 320 char **argv; 321 char **env; 322 Elf_Auxinfo *aux; 323 Elf_Auxinfo *auxp; 324 const char *argv0; 325 Objlist_Entry *entry; 326 Obj_Entry *obj; 327 Obj_Entry **preload_tail; 328 Obj_Entry *last_interposer; 329 Objlist initlist; 330 RtldLockState lockstate; 331 char *library_path_rpath; 332 int mib[2]; 333 size_t len; 334 335 /* 336 * On entry, the dynamic linker itself has not been relocated yet. 337 * Be very careful not to reference any global data until after 338 * init_rtld has returned. It is OK to reference file-scope statics 339 * and string constants, and to call static and global functions. 340 */ 341 342 /* Find the auxiliary vector on the stack. */ 343 argc = *sp++; 344 argv = (char **) sp; 345 sp += argc + 1; /* Skip over arguments and NULL terminator */ 346 env = (char **) sp; 347 while (*sp++ != 0) /* Skip over environment, and NULL terminator */ 348 ; 349 aux = (Elf_Auxinfo *) sp; 350 351 /* Digest the auxiliary vector. */ 352 for (i = 0; i < AT_COUNT; i++) 353 aux_info[i] = NULL; 354 for (auxp = aux; auxp->a_type != AT_NULL; auxp++) { 355 if (auxp->a_type < AT_COUNT) 356 aux_info[auxp->a_type] = auxp; 357 } 358 359 /* Initialize and relocate ourselves. */ 360 assert(aux_info[AT_BASE] != NULL); 361 init_rtld((caddr_t) aux_info[AT_BASE]->a_un.a_ptr, aux_info); 362 363 __progname = obj_rtld.path; 364 argv0 = argv[0] != NULL ? argv[0] : "(null)"; 365 environ = env; 366 main_argc = argc; 367 main_argv = argv; 368 369 if (aux_info[AT_CANARY] != NULL && 370 aux_info[AT_CANARY]->a_un.a_ptr != NULL) { 371 i = aux_info[AT_CANARYLEN]->a_un.a_val; 372 if (i > sizeof(__stack_chk_guard)) 373 i = sizeof(__stack_chk_guard); 374 memcpy(__stack_chk_guard, aux_info[AT_CANARY]->a_un.a_ptr, i); 375 } else { 376 mib[0] = CTL_KERN; 377 mib[1] = KERN_ARND; 378 379 len = sizeof(__stack_chk_guard); 380 if (sysctl(mib, 2, __stack_chk_guard, &len, NULL, 0) == -1 || 381 len != sizeof(__stack_chk_guard)) { 382 /* If sysctl was unsuccessful, use the "terminator canary". */ 383 ((unsigned char *)(void *)__stack_chk_guard)[0] = 0; 384 ((unsigned char *)(void *)__stack_chk_guard)[1] = 0; 385 ((unsigned char *)(void *)__stack_chk_guard)[2] = '\n'; 386 ((unsigned char *)(void *)__stack_chk_guard)[3] = 255; 387 } 388 } 389 390 trust = !issetugid(); 391 392 ld_bind_now = getenv(LD_ "BIND_NOW"); 393 /* 394 * If the process is tainted, then we un-set the dangerous environment 395 * variables. The process will be marked as tainted until setuid(2) 396 * is called. If any child process calls setuid(2) we do not want any 397 * future processes to honor the potentially un-safe variables. 398 */ 399 if (!trust) { 400 if (unsetenv(LD_ "PRELOAD") || unsetenv(LD_ "LIBMAP") || 401 unsetenv(LD_ "LIBRARY_PATH") || unsetenv(LD_ "LIBMAP_DISABLE") || 402 unsetenv(LD_ "DEBUG") || unsetenv(LD_ "ELF_HINTS_PATH") || 403 unsetenv(LD_ "LOADFLTR") || unsetenv(LD_ "LIBRARY_PATH_RPATH")) { 404 _rtld_error("environment corrupt; aborting"); 405 die(); 406 } 407 } 408 ld_debug = getenv(LD_ "DEBUG"); 409 libmap_disable = getenv(LD_ "LIBMAP_DISABLE") != NULL; 410 libmap_override = getenv(LD_ "LIBMAP"); 411 ld_library_path = getenv(LD_ "LIBRARY_PATH"); 412 ld_preload = getenv(LD_ "PRELOAD"); 413 ld_elf_hints_path = getenv(LD_ "ELF_HINTS_PATH"); 414 ld_loadfltr = getenv(LD_ "LOADFLTR") != NULL; 415 library_path_rpath = getenv(LD_ "LIBRARY_PATH_RPATH"); 416 if (library_path_rpath != NULL) { 417 if (library_path_rpath[0] == 'y' || 418 library_path_rpath[0] == 'Y' || 419 library_path_rpath[0] == '1') 420 ld_library_path_rpath = true; 421 else 422 ld_library_path_rpath = false; 423 } 424 dangerous_ld_env = libmap_disable || (libmap_override != NULL) || 425 (ld_library_path != NULL) || (ld_preload != NULL) || 426 (ld_elf_hints_path != NULL) || ld_loadfltr; 427 ld_tracing = getenv(LD_ "TRACE_LOADED_OBJECTS"); 428 ld_utrace = getenv(LD_ "UTRACE"); 429 430 if ((ld_elf_hints_path == NULL) || strlen(ld_elf_hints_path) == 0) 431 ld_elf_hints_path = _PATH_ELF_HINTS; 432 433 if (ld_debug != NULL && *ld_debug != '\0') 434 debug = 1; 435 dbg("%s is initialized, base address = %p", __progname, 436 (caddr_t) aux_info[AT_BASE]->a_un.a_ptr); 437 dbg("RTLD dynamic = %p", obj_rtld.dynamic); 438 dbg("RTLD pltgot = %p", obj_rtld.pltgot); 439 440 dbg("initializing thread locks"); 441 lockdflt_init(); 442 443 /* 444 * Load the main program, or process its program header if it is 445 * already loaded. 446 */ 447 if (aux_info[AT_EXECFD] != NULL) { /* Load the main program. */ 448 int fd = aux_info[AT_EXECFD]->a_un.a_val; 449 dbg("loading main program"); 450 obj_main = map_object(fd, argv0, NULL); 451 close(fd); 452 if (obj_main == NULL) 453 die(); 454 max_stack_flags = obj->stack_flags; 455 } else { /* Main program already loaded. */ 456 const Elf_Phdr *phdr; 457 int phnum; 458 caddr_t entry; 459 460 dbg("processing main program's program header"); 461 assert(aux_info[AT_PHDR] != NULL); 462 phdr = (const Elf_Phdr *) aux_info[AT_PHDR]->a_un.a_ptr; 463 assert(aux_info[AT_PHNUM] != NULL); 464 phnum = aux_info[AT_PHNUM]->a_un.a_val; 465 assert(aux_info[AT_PHENT] != NULL); 466 assert(aux_info[AT_PHENT]->a_un.a_val == sizeof(Elf_Phdr)); 467 assert(aux_info[AT_ENTRY] != NULL); 468 entry = (caddr_t) aux_info[AT_ENTRY]->a_un.a_ptr; 469 if ((obj_main = digest_phdr(phdr, phnum, entry, argv0)) == NULL) 470 die(); 471 } 472 473 if (aux_info[AT_EXECPATH] != 0) { 474 char *kexecpath; 475 char buf[MAXPATHLEN]; 476 477 kexecpath = aux_info[AT_EXECPATH]->a_un.a_ptr; 478 dbg("AT_EXECPATH %p %s", kexecpath, kexecpath); 479 if (kexecpath[0] == '/') 480 obj_main->path = kexecpath; 481 else if (getcwd(buf, sizeof(buf)) == NULL || 482 strlcat(buf, "/", sizeof(buf)) >= sizeof(buf) || 483 strlcat(buf, kexecpath, sizeof(buf)) >= sizeof(buf)) 484 obj_main->path = xstrdup(argv0); 485 else 486 obj_main->path = xstrdup(buf); 487 } else { 488 dbg("No AT_EXECPATH"); 489 obj_main->path = xstrdup(argv0); 490 } 491 dbg("obj_main path %s", obj_main->path); 492 obj_main->mainprog = true; 493 494 if (aux_info[AT_STACKPROT] != NULL && 495 aux_info[AT_STACKPROT]->a_un.a_val != 0) 496 stack_prot = aux_info[AT_STACKPROT]->a_un.a_val; 497 498 /* 499 * Get the actual dynamic linker pathname from the executable if 500 * possible. (It should always be possible.) That ensures that 501 * gdb will find the right dynamic linker even if a non-standard 502 * one is being used. 503 */ 504 if (obj_main->interp != NULL && 505 strcmp(obj_main->interp, obj_rtld.path) != 0) { 506 free(obj_rtld.path); 507 obj_rtld.path = xstrdup(obj_main->interp); 508 __progname = obj_rtld.path; 509 } 510 511 digest_dynamic(obj_main, 0); 512 dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d", 513 obj_main->path, obj_main->valid_hash_sysv, obj_main->valid_hash_gnu, 514 obj_main->dynsymcount); 515 516 linkmap_add(obj_main); 517 linkmap_add(&obj_rtld); 518 519 /* Link the main program into the list of objects. */ 520 *obj_tail = obj_main; 521 obj_tail = &obj_main->next; 522 obj_count++; 523 obj_loads++; 524 525 /* Initialize a fake symbol for resolving undefined weak references. */ 526 sym_zero.st_info = ELF_ST_INFO(STB_GLOBAL, STT_NOTYPE); 527 sym_zero.st_shndx = SHN_UNDEF; 528 sym_zero.st_value = -(uintptr_t)obj_main->relocbase; 529 530 if (!libmap_disable) 531 libmap_disable = (bool)lm_init(libmap_override); 532 533 dbg("loading LD_PRELOAD libraries"); 534 if (load_preload_objects() == -1) 535 die(); 536 preload_tail = obj_tail; 537 538 dbg("loading needed objects"); 539 if (load_needed_objects(obj_main, 0) == -1) 540 die(); 541 542 /* Make a list of all objects loaded at startup. */ 543 last_interposer = obj_main; 544 for (obj = obj_list; obj != NULL; obj = obj->next) { 545 if (obj->z_interpose && obj != obj_main) { 546 objlist_put_after(&list_main, last_interposer, obj); 547 last_interposer = obj; 548 } else { 549 objlist_push_tail(&list_main, obj); 550 } 551 obj->refcount++; 552 } 553 554 dbg("checking for required versions"); 555 if (rtld_verify_versions(&list_main) == -1 && !ld_tracing) 556 die(); 557 558 if (ld_tracing) { /* We're done */ 559 trace_loaded_objects(obj_main); 560 exit(0); 561 } 562 563 if (getenv(LD_ "DUMP_REL_PRE") != NULL) { 564 dump_relocations(obj_main); 565 exit (0); 566 } 567 568 /* 569 * Processing tls relocations requires having the tls offsets 570 * initialized. Prepare offsets before starting initial 571 * relocation processing. 572 */ 573 dbg("initializing initial thread local storage offsets"); 574 STAILQ_FOREACH(entry, &list_main, link) { 575 /* 576 * Allocate all the initial objects out of the static TLS 577 * block even if they didn't ask for it. 578 */ 579 allocate_tls_offset(entry->obj); 580 } 581 582 if (relocate_objects(obj_main, 583 ld_bind_now != NULL && *ld_bind_now != '\0', 584 &obj_rtld, SYMLOOK_EARLY, NULL) == -1) 585 die(); 586 587 dbg("doing copy relocations"); 588 if (do_copy_relocations(obj_main) == -1) 589 die(); 590 591 if (getenv(LD_ "DUMP_REL_POST") != NULL) { 592 dump_relocations(obj_main); 593 exit (0); 594 } 595 596 /* 597 * Setup TLS for main thread. This must be done after the 598 * relocations are processed, since tls initialization section 599 * might be the subject for relocations. 600 */ 601 dbg("initializing initial thread local storage"); 602 allocate_initial_tls(obj_list); 603 604 dbg("initializing key program variables"); 605 set_program_var("__progname", argv[0] != NULL ? basename(argv[0]) : ""); 606 set_program_var("environ", env); 607 set_program_var("__elf_aux_vector", aux); 608 609 /* Make a list of init functions to call. */ 610 objlist_init(&initlist); 611 initlist_add_objects(obj_list, preload_tail, &initlist); 612 613 r_debug_state(NULL, &obj_main->linkmap); /* say hello to gdb! */ 614 615 map_stacks_exec(NULL); 616 617 dbg("resolving ifuncs"); 618 if (resolve_objects_ifunc(obj_main, 619 ld_bind_now != NULL && *ld_bind_now != '\0', SYMLOOK_EARLY, 620 NULL) == -1) 621 die(); 622 623 if (!obj_main->crt_no_init) { 624 /* 625 * Make sure we don't call the main program's init and fini 626 * functions for binaries linked with old crt1 which calls 627 * _init itself. 628 */ 629 obj_main->init = obj_main->fini = (Elf_Addr)NULL; 630 obj_main->preinit_array = obj_main->init_array = 631 obj_main->fini_array = (Elf_Addr)NULL; 632 } 633 634 wlock_acquire(rtld_bind_lock, &lockstate); 635 if (obj_main->crt_no_init) 636 preinit_main(); 637 objlist_call_init(&initlist, &lockstate); 638 objlist_clear(&initlist); 639 dbg("loading filtees"); 640 for (obj = obj_list->next; obj != NULL; obj = obj->next) { 641 if (ld_loadfltr || obj->z_loadfltr) 642 load_filtees(obj, 0, &lockstate); 643 } 644 lock_release(rtld_bind_lock, &lockstate); 645 646 dbg("transferring control to program entry point = %p", obj_main->entry); 647 648 /* Return the exit procedure and the program entry point. */ 649 *exit_proc = rtld_exit; 650 *objp = obj_main; 651 return (func_ptr_type) obj_main->entry; 652 } 653 654 void * 655 rtld_resolve_ifunc(const Obj_Entry *obj, const Elf_Sym *def) 656 { 657 void *ptr; 658 Elf_Addr target; 659 660 ptr = (void *)make_function_pointer(def, obj); 661 target = ((Elf_Addr (*)(void))ptr)(); 662 return ((void *)target); 663 } 664 665 Elf_Addr 666 _rtld_bind(Obj_Entry *obj, Elf_Size reloff) 667 { 668 const Elf_Rel *rel; 669 const Elf_Sym *def; 670 const Obj_Entry *defobj; 671 Elf_Addr *where; 672 Elf_Addr target; 673 RtldLockState lockstate; 674 675 rlock_acquire(rtld_bind_lock, &lockstate); 676 if (sigsetjmp(lockstate.env, 0) != 0) 677 lock_upgrade(rtld_bind_lock, &lockstate); 678 if (obj->pltrel) 679 rel = (const Elf_Rel *) ((caddr_t) obj->pltrel + reloff); 680 else 681 rel = (const Elf_Rel *) ((caddr_t) obj->pltrela + reloff); 682 683 where = (Elf_Addr *) (obj->relocbase + rel->r_offset); 684 def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, true, NULL, 685 &lockstate); 686 if (def == NULL) 687 die(); 688 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) 689 target = (Elf_Addr)rtld_resolve_ifunc(defobj, def); 690 else 691 target = (Elf_Addr)(defobj->relocbase + def->st_value); 692 693 dbg("\"%s\" in \"%s\" ==> %p in \"%s\"", 694 defobj->strtab + def->st_name, basename(obj->path), 695 (void *)target, basename(defobj->path)); 696 697 /* 698 * Write the new contents for the jmpslot. Note that depending on 699 * architecture, the value which we need to return back to the 700 * lazy binding trampoline may or may not be the target 701 * address. The value returned from reloc_jmpslot() is the value 702 * that the trampoline needs. 703 */ 704 target = reloc_jmpslot(where, target, defobj, obj, rel); 705 lock_release(rtld_bind_lock, &lockstate); 706 return target; 707 } 708 709 /* 710 * Error reporting function. Use it like printf. If formats the message 711 * into a buffer, and sets things up so that the next call to dlerror() 712 * will return the message. 713 */ 714 void 715 _rtld_error(const char *fmt, ...) 716 { 717 static char buf[512]; 718 va_list ap; 719 720 va_start(ap, fmt); 721 rtld_vsnprintf(buf, sizeof buf, fmt, ap); 722 error_message = buf; 723 va_end(ap); 724 } 725 726 /* 727 * Return a dynamically-allocated copy of the current error message, if any. 728 */ 729 static char * 730 errmsg_save(void) 731 { 732 return error_message == NULL ? NULL : xstrdup(error_message); 733 } 734 735 /* 736 * Restore the current error message from a copy which was previously saved 737 * by errmsg_save(). The copy is freed. 738 */ 739 static void 740 errmsg_restore(char *saved_msg) 741 { 742 if (saved_msg == NULL) 743 error_message = NULL; 744 else { 745 _rtld_error("%s", saved_msg); 746 free(saved_msg); 747 } 748 } 749 750 static const char * 751 basename(const char *name) 752 { 753 const char *p = strrchr(name, '/'); 754 return p != NULL ? p + 1 : name; 755 } 756 757 static struct utsname uts; 758 759 static char * 760 origin_subst_one(char *real, const char *kw, const char *subst, 761 bool may_free) 762 { 763 char *p, *p1, *res, *resp; 764 int subst_len, kw_len, subst_count, old_len, new_len; 765 766 kw_len = strlen(kw); 767 768 /* 769 * First, count the number of the keyword occurences, to 770 * preallocate the final string. 771 */ 772 for (p = real, subst_count = 0;; p = p1 + kw_len, subst_count++) { 773 p1 = strstr(p, kw); 774 if (p1 == NULL) 775 break; 776 } 777 778 /* 779 * If the keyword is not found, just return. 780 */ 781 if (subst_count == 0) 782 return (may_free ? real : xstrdup(real)); 783 784 /* 785 * There is indeed something to substitute. Calculate the 786 * length of the resulting string, and allocate it. 787 */ 788 subst_len = strlen(subst); 789 old_len = strlen(real); 790 new_len = old_len + (subst_len - kw_len) * subst_count; 791 res = xmalloc(new_len + 1); 792 793 /* 794 * Now, execute the substitution loop. 795 */ 796 for (p = real, resp = res, *resp = '\0';;) { 797 p1 = strstr(p, kw); 798 if (p1 != NULL) { 799 /* Copy the prefix before keyword. */ 800 memcpy(resp, p, p1 - p); 801 resp += p1 - p; 802 /* Keyword replacement. */ 803 memcpy(resp, subst, subst_len); 804 resp += subst_len; 805 *resp = '\0'; 806 p = p1 + kw_len; 807 } else 808 break; 809 } 810 811 /* Copy to the end of string and finish. */ 812 strcat(resp, p); 813 if (may_free) 814 free(real); 815 return (res); 816 } 817 818 static char * 819 origin_subst(char *real, const char *origin_path) 820 { 821 char *res1, *res2, *res3, *res4; 822 823 if (uts.sysname[0] == '\0') { 824 if (uname(&uts) != 0) { 825 _rtld_error("utsname failed: %d", errno); 826 return (NULL); 827 } 828 } 829 res1 = origin_subst_one(real, "$ORIGIN", origin_path, false); 830 res2 = origin_subst_one(res1, "$OSNAME", uts.sysname, true); 831 res3 = origin_subst_one(res2, "$OSREL", uts.release, true); 832 res4 = origin_subst_one(res3, "$PLATFORM", uts.machine, true); 833 return (res4); 834 } 835 836 static void 837 die(void) 838 { 839 const char *msg = dlerror(); 840 841 if (msg == NULL) 842 msg = "Fatal error"; 843 rtld_fdputstr(STDERR_FILENO, msg); 844 rtld_fdputchar(STDERR_FILENO, '\n'); 845 _exit(1); 846 } 847 848 /* 849 * Process a shared object's DYNAMIC section, and save the important 850 * information in its Obj_Entry structure. 851 */ 852 static void 853 digest_dynamic1(Obj_Entry *obj, int early, const Elf_Dyn **dyn_rpath, 854 const Elf_Dyn **dyn_soname, const Elf_Dyn **dyn_runpath) 855 { 856 const Elf_Dyn *dynp; 857 Needed_Entry **needed_tail = &obj->needed; 858 Needed_Entry **needed_filtees_tail = &obj->needed_filtees; 859 Needed_Entry **needed_aux_filtees_tail = &obj->needed_aux_filtees; 860 const Elf_Hashelt *hashtab; 861 const Elf32_Word *hashval; 862 Elf32_Word bkt, nmaskwords; 863 int bloom_size32; 864 bool nmw_power2; 865 int plttype = DT_REL; 866 867 *dyn_rpath = NULL; 868 *dyn_soname = NULL; 869 *dyn_runpath = NULL; 870 871 obj->bind_now = false; 872 for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) { 873 switch (dynp->d_tag) { 874 875 case DT_REL: 876 obj->rel = (const Elf_Rel *) (obj->relocbase + dynp->d_un.d_ptr); 877 break; 878 879 case DT_RELSZ: 880 obj->relsize = dynp->d_un.d_val; 881 break; 882 883 case DT_RELENT: 884 assert(dynp->d_un.d_val == sizeof(Elf_Rel)); 885 break; 886 887 case DT_JMPREL: 888 obj->pltrel = (const Elf_Rel *) 889 (obj->relocbase + dynp->d_un.d_ptr); 890 break; 891 892 case DT_PLTRELSZ: 893 obj->pltrelsize = dynp->d_un.d_val; 894 break; 895 896 case DT_RELA: 897 obj->rela = (const Elf_Rela *) (obj->relocbase + dynp->d_un.d_ptr); 898 break; 899 900 case DT_RELASZ: 901 obj->relasize = dynp->d_un.d_val; 902 break; 903 904 case DT_RELAENT: 905 assert(dynp->d_un.d_val == sizeof(Elf_Rela)); 906 break; 907 908 case DT_PLTREL: 909 plttype = dynp->d_un.d_val; 910 assert(dynp->d_un.d_val == DT_REL || plttype == DT_RELA); 911 break; 912 913 case DT_SYMTAB: 914 obj->symtab = (const Elf_Sym *) 915 (obj->relocbase + dynp->d_un.d_ptr); 916 break; 917 918 case DT_SYMENT: 919 assert(dynp->d_un.d_val == sizeof(Elf_Sym)); 920 break; 921 922 case DT_STRTAB: 923 obj->strtab = (const char *) (obj->relocbase + dynp->d_un.d_ptr); 924 break; 925 926 case DT_STRSZ: 927 obj->strsize = dynp->d_un.d_val; 928 break; 929 930 case DT_VERNEED: 931 obj->verneed = (const Elf_Verneed *) (obj->relocbase + 932 dynp->d_un.d_val); 933 break; 934 935 case DT_VERNEEDNUM: 936 obj->verneednum = dynp->d_un.d_val; 937 break; 938 939 case DT_VERDEF: 940 obj->verdef = (const Elf_Verdef *) (obj->relocbase + 941 dynp->d_un.d_val); 942 break; 943 944 case DT_VERDEFNUM: 945 obj->verdefnum = dynp->d_un.d_val; 946 break; 947 948 case DT_VERSYM: 949 obj->versyms = (const Elf_Versym *)(obj->relocbase + 950 dynp->d_un.d_val); 951 break; 952 953 case DT_HASH: 954 { 955 hashtab = (const Elf_Hashelt *)(obj->relocbase + 956 dynp->d_un.d_ptr); 957 obj->nbuckets = hashtab[0]; 958 obj->nchains = hashtab[1]; 959 obj->buckets = hashtab + 2; 960 obj->chains = obj->buckets + obj->nbuckets; 961 obj->valid_hash_sysv = obj->nbuckets > 0 && obj->nchains > 0 && 962 obj->buckets != NULL; 963 } 964 break; 965 966 case DT_GNU_HASH: 967 { 968 hashtab = (const Elf_Hashelt *)(obj->relocbase + 969 dynp->d_un.d_ptr); 970 obj->nbuckets_gnu = hashtab[0]; 971 obj->symndx_gnu = hashtab[1]; 972 nmaskwords = hashtab[2]; 973 bloom_size32 = (__ELF_WORD_SIZE / 32) * nmaskwords; 974 /* Number of bitmask words is required to be power of 2 */ 975 nmw_power2 = ((nmaskwords & (nmaskwords - 1)) == 0); 976 obj->maskwords_bm_gnu = nmaskwords - 1; 977 obj->shift2_gnu = hashtab[3]; 978 obj->bloom_gnu = (Elf_Addr *) (hashtab + 4); 979 obj->buckets_gnu = hashtab + 4 + bloom_size32; 980 obj->chain_zero_gnu = obj->buckets_gnu + obj->nbuckets_gnu - 981 obj->symndx_gnu; 982 obj->valid_hash_gnu = nmw_power2 && obj->nbuckets_gnu > 0 && 983 obj->buckets_gnu != NULL; 984 } 985 break; 986 987 case DT_NEEDED: 988 if (!obj->rtld) { 989 Needed_Entry *nep = NEW(Needed_Entry); 990 nep->name = dynp->d_un.d_val; 991 nep->obj = NULL; 992 nep->next = NULL; 993 994 *needed_tail = nep; 995 needed_tail = &nep->next; 996 } 997 break; 998 999 case DT_FILTER: 1000 if (!obj->rtld) { 1001 Needed_Entry *nep = NEW(Needed_Entry); 1002 nep->name = dynp->d_un.d_val; 1003 nep->obj = NULL; 1004 nep->next = NULL; 1005 1006 *needed_filtees_tail = nep; 1007 needed_filtees_tail = &nep->next; 1008 } 1009 break; 1010 1011 case DT_AUXILIARY: 1012 if (!obj->rtld) { 1013 Needed_Entry *nep = NEW(Needed_Entry); 1014 nep->name = dynp->d_un.d_val; 1015 nep->obj = NULL; 1016 nep->next = NULL; 1017 1018 *needed_aux_filtees_tail = nep; 1019 needed_aux_filtees_tail = &nep->next; 1020 } 1021 break; 1022 1023 case DT_PLTGOT: 1024 obj->pltgot = (Elf_Addr *) (obj->relocbase + dynp->d_un.d_ptr); 1025 break; 1026 1027 case DT_TEXTREL: 1028 obj->textrel = true; 1029 break; 1030 1031 case DT_SYMBOLIC: 1032 obj->symbolic = true; 1033 break; 1034 1035 case DT_RPATH: 1036 /* 1037 * We have to wait until later to process this, because we 1038 * might not have gotten the address of the string table yet. 1039 */ 1040 *dyn_rpath = dynp; 1041 break; 1042 1043 case DT_SONAME: 1044 *dyn_soname = dynp; 1045 break; 1046 1047 case DT_RUNPATH: 1048 *dyn_runpath = dynp; 1049 break; 1050 1051 case DT_INIT: 1052 obj->init = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr); 1053 break; 1054 1055 case DT_PREINIT_ARRAY: 1056 obj->preinit_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr); 1057 break; 1058 1059 case DT_PREINIT_ARRAYSZ: 1060 obj->preinit_array_num = dynp->d_un.d_val / sizeof(Elf_Addr); 1061 break; 1062 1063 case DT_INIT_ARRAY: 1064 obj->init_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr); 1065 break; 1066 1067 case DT_INIT_ARRAYSZ: 1068 obj->init_array_num = dynp->d_un.d_val / sizeof(Elf_Addr); 1069 break; 1070 1071 case DT_FINI: 1072 obj->fini = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr); 1073 break; 1074 1075 case DT_FINI_ARRAY: 1076 obj->fini_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr); 1077 break; 1078 1079 case DT_FINI_ARRAYSZ: 1080 obj->fini_array_num = dynp->d_un.d_val / sizeof(Elf_Addr); 1081 break; 1082 1083 /* 1084 * Don't process DT_DEBUG on MIPS as the dynamic section 1085 * is mapped read-only. DT_MIPS_RLD_MAP is used instead. 1086 */ 1087 1088 #ifndef __mips__ 1089 case DT_DEBUG: 1090 /* XXX - not implemented yet */ 1091 if (!early) 1092 dbg("Filling in DT_DEBUG entry"); 1093 ((Elf_Dyn*)dynp)->d_un.d_ptr = (Elf_Addr) &r_debug; 1094 break; 1095 #endif 1096 1097 case DT_FLAGS: 1098 if ((dynp->d_un.d_val & DF_ORIGIN) && trust) 1099 obj->z_origin = true; 1100 if (dynp->d_un.d_val & DF_SYMBOLIC) 1101 obj->symbolic = true; 1102 if (dynp->d_un.d_val & DF_TEXTREL) 1103 obj->textrel = true; 1104 if (dynp->d_un.d_val & DF_BIND_NOW) 1105 obj->bind_now = true; 1106 /*if (dynp->d_un.d_val & DF_STATIC_TLS) 1107 ;*/ 1108 break; 1109 #ifdef __mips__ 1110 case DT_MIPS_LOCAL_GOTNO: 1111 obj->local_gotno = dynp->d_un.d_val; 1112 break; 1113 1114 case DT_MIPS_SYMTABNO: 1115 obj->symtabno = dynp->d_un.d_val; 1116 break; 1117 1118 case DT_MIPS_GOTSYM: 1119 obj->gotsym = dynp->d_un.d_val; 1120 break; 1121 1122 case DT_MIPS_RLD_MAP: 1123 *((Elf_Addr *)(dynp->d_un.d_ptr)) = (Elf_Addr) &r_debug; 1124 break; 1125 #endif 1126 1127 case DT_FLAGS_1: 1128 if (dynp->d_un.d_val & DF_1_NOOPEN) 1129 obj->z_noopen = true; 1130 if ((dynp->d_un.d_val & DF_1_ORIGIN) && trust) 1131 obj->z_origin = true; 1132 /*if (dynp->d_un.d_val & DF_1_GLOBAL) 1133 XXX ;*/ 1134 if (dynp->d_un.d_val & DF_1_BIND_NOW) 1135 obj->bind_now = true; 1136 if (dynp->d_un.d_val & DF_1_NODELETE) 1137 obj->z_nodelete = true; 1138 if (dynp->d_un.d_val & DF_1_LOADFLTR) 1139 obj->z_loadfltr = true; 1140 if (dynp->d_un.d_val & DF_1_INTERPOSE) 1141 obj->z_interpose = true; 1142 if (dynp->d_un.d_val & DF_1_NODEFLIB) 1143 obj->z_nodeflib = true; 1144 break; 1145 1146 default: 1147 if (!early) { 1148 dbg("Ignoring d_tag %ld = %#lx", (long)dynp->d_tag, 1149 (long)dynp->d_tag); 1150 } 1151 break; 1152 } 1153 } 1154 1155 obj->traced = false; 1156 1157 if (plttype == DT_RELA) { 1158 obj->pltrela = (const Elf_Rela *) obj->pltrel; 1159 obj->pltrel = NULL; 1160 obj->pltrelasize = obj->pltrelsize; 1161 obj->pltrelsize = 0; 1162 } 1163 1164 /* Determine size of dynsym table (equal to nchains of sysv hash) */ 1165 if (obj->valid_hash_sysv) 1166 obj->dynsymcount = obj->nchains; 1167 else if (obj->valid_hash_gnu) { 1168 obj->dynsymcount = 0; 1169 for (bkt = 0; bkt < obj->nbuckets_gnu; bkt++) { 1170 if (obj->buckets_gnu[bkt] == 0) 1171 continue; 1172 hashval = &obj->chain_zero_gnu[obj->buckets_gnu[bkt]]; 1173 do 1174 obj->dynsymcount++; 1175 while ((*hashval++ & 1u) == 0); 1176 } 1177 obj->dynsymcount += obj->symndx_gnu; 1178 } 1179 } 1180 1181 static void 1182 digest_dynamic2(Obj_Entry *obj, const Elf_Dyn *dyn_rpath, 1183 const Elf_Dyn *dyn_soname, const Elf_Dyn *dyn_runpath) 1184 { 1185 1186 if (obj->z_origin && obj->origin_path == NULL) { 1187 obj->origin_path = xmalloc(PATH_MAX); 1188 if (rtld_dirname_abs(obj->path, obj->origin_path) == -1) 1189 die(); 1190 } 1191 1192 if (dyn_runpath != NULL) { 1193 obj->runpath = (char *)obj->strtab + dyn_runpath->d_un.d_val; 1194 if (obj->z_origin) 1195 obj->runpath = origin_subst(obj->runpath, obj->origin_path); 1196 } 1197 else if (dyn_rpath != NULL) { 1198 obj->rpath = (char *)obj->strtab + dyn_rpath->d_un.d_val; 1199 if (obj->z_origin) 1200 obj->rpath = origin_subst(obj->rpath, obj->origin_path); 1201 } 1202 1203 if (dyn_soname != NULL) 1204 object_add_name(obj, obj->strtab + dyn_soname->d_un.d_val); 1205 } 1206 1207 static void 1208 digest_dynamic(Obj_Entry *obj, int early) 1209 { 1210 const Elf_Dyn *dyn_rpath; 1211 const Elf_Dyn *dyn_soname; 1212 const Elf_Dyn *dyn_runpath; 1213 1214 digest_dynamic1(obj, early, &dyn_rpath, &dyn_soname, &dyn_runpath); 1215 digest_dynamic2(obj, dyn_rpath, dyn_soname, dyn_runpath); 1216 } 1217 1218 /* 1219 * Process a shared object's program header. This is used only for the 1220 * main program, when the kernel has already loaded the main program 1221 * into memory before calling the dynamic linker. It creates and 1222 * returns an Obj_Entry structure. 1223 */ 1224 static Obj_Entry * 1225 digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path) 1226 { 1227 Obj_Entry *obj; 1228 const Elf_Phdr *phlimit = phdr + phnum; 1229 const Elf_Phdr *ph; 1230 Elf_Addr note_start, note_end; 1231 int nsegs = 0; 1232 1233 obj = obj_new(); 1234 for (ph = phdr; ph < phlimit; ph++) { 1235 if (ph->p_type != PT_PHDR) 1236 continue; 1237 1238 obj->phdr = phdr; 1239 obj->phsize = ph->p_memsz; 1240 obj->relocbase = (caddr_t)phdr - ph->p_vaddr; 1241 break; 1242 } 1243 1244 obj->stack_flags = PF_X | PF_R | PF_W; 1245 1246 for (ph = phdr; ph < phlimit; ph++) { 1247 switch (ph->p_type) { 1248 1249 case PT_INTERP: 1250 obj->interp = (const char *)(ph->p_vaddr + obj->relocbase); 1251 break; 1252 1253 case PT_LOAD: 1254 if (nsegs == 0) { /* First load segment */ 1255 obj->vaddrbase = trunc_page(ph->p_vaddr); 1256 obj->mapbase = obj->vaddrbase + obj->relocbase; 1257 obj->textsize = round_page(ph->p_vaddr + ph->p_memsz) - 1258 obj->vaddrbase; 1259 } else { /* Last load segment */ 1260 obj->mapsize = round_page(ph->p_vaddr + ph->p_memsz) - 1261 obj->vaddrbase; 1262 } 1263 nsegs++; 1264 break; 1265 1266 case PT_DYNAMIC: 1267 obj->dynamic = (const Elf_Dyn *)(ph->p_vaddr + obj->relocbase); 1268 break; 1269 1270 case PT_TLS: 1271 obj->tlsindex = 1; 1272 obj->tlssize = ph->p_memsz; 1273 obj->tlsalign = ph->p_align; 1274 obj->tlsinitsize = ph->p_filesz; 1275 obj->tlsinit = (void*)(ph->p_vaddr + obj->relocbase); 1276 break; 1277 1278 case PT_GNU_STACK: 1279 obj->stack_flags = ph->p_flags; 1280 break; 1281 1282 case PT_GNU_RELRO: 1283 obj->relro_page = obj->relocbase + trunc_page(ph->p_vaddr); 1284 obj->relro_size = round_page(ph->p_memsz); 1285 break; 1286 1287 case PT_NOTE: 1288 note_start = (Elf_Addr)obj->relocbase + ph->p_vaddr; 1289 note_end = note_start + ph->p_filesz; 1290 digest_notes(obj, note_start, note_end); 1291 break; 1292 } 1293 } 1294 if (nsegs < 1) { 1295 _rtld_error("%s: too few PT_LOAD segments", path); 1296 return NULL; 1297 } 1298 1299 obj->entry = entry; 1300 return obj; 1301 } 1302 1303 void 1304 digest_notes(Obj_Entry *obj, Elf_Addr note_start, Elf_Addr note_end) 1305 { 1306 const Elf_Note *note; 1307 const char *note_name; 1308 uintptr_t p; 1309 1310 for (note = (const Elf_Note *)note_start; (Elf_Addr)note < note_end; 1311 note = (const Elf_Note *)((const char *)(note + 1) + 1312 roundup2(note->n_namesz, sizeof(Elf32_Addr)) + 1313 roundup2(note->n_descsz, sizeof(Elf32_Addr)))) { 1314 if (note->n_namesz != sizeof(NOTE_FREEBSD_VENDOR) || 1315 note->n_descsz != sizeof(int32_t)) 1316 continue; 1317 if (note->n_type != ABI_NOTETYPE && 1318 note->n_type != CRT_NOINIT_NOTETYPE) 1319 continue; 1320 note_name = (const char *)(note + 1); 1321 if (strncmp(NOTE_FREEBSD_VENDOR, note_name, 1322 sizeof(NOTE_FREEBSD_VENDOR)) != 0) 1323 continue; 1324 switch (note->n_type) { 1325 case ABI_NOTETYPE: 1326 /* FreeBSD osrel note */ 1327 p = (uintptr_t)(note + 1); 1328 p += roundup2(note->n_namesz, sizeof(Elf32_Addr)); 1329 obj->osrel = *(const int32_t *)(p); 1330 dbg("note osrel %d", obj->osrel); 1331 break; 1332 case CRT_NOINIT_NOTETYPE: 1333 /* FreeBSD 'crt does not call init' note */ 1334 obj->crt_no_init = true; 1335 dbg("note crt_no_init"); 1336 break; 1337 } 1338 } 1339 } 1340 1341 static Obj_Entry * 1342 dlcheck(void *handle) 1343 { 1344 Obj_Entry *obj; 1345 1346 for (obj = obj_list; obj != NULL; obj = obj->next) 1347 if (obj == (Obj_Entry *) handle) 1348 break; 1349 1350 if (obj == NULL || obj->refcount == 0 || obj->dl_refcount == 0) { 1351 _rtld_error("Invalid shared object handle %p", handle); 1352 return NULL; 1353 } 1354 return obj; 1355 } 1356 1357 /* 1358 * If the given object is already in the donelist, return true. Otherwise 1359 * add the object to the list and return false. 1360 */ 1361 static bool 1362 donelist_check(DoneList *dlp, const Obj_Entry *obj) 1363 { 1364 unsigned int i; 1365 1366 for (i = 0; i < dlp->num_used; i++) 1367 if (dlp->objs[i] == obj) 1368 return true; 1369 /* 1370 * Our donelist allocation should always be sufficient. But if 1371 * our threads locking isn't working properly, more shared objects 1372 * could have been loaded since we allocated the list. That should 1373 * never happen, but we'll handle it properly just in case it does. 1374 */ 1375 if (dlp->num_used < dlp->num_alloc) 1376 dlp->objs[dlp->num_used++] = obj; 1377 return false; 1378 } 1379 1380 /* 1381 * Hash function for symbol table lookup. Don't even think about changing 1382 * this. It is specified by the System V ABI. 1383 */ 1384 unsigned long 1385 elf_hash(const char *name) 1386 { 1387 const unsigned char *p = (const unsigned char *) name; 1388 unsigned long h = 0; 1389 unsigned long g; 1390 1391 while (*p != '\0') { 1392 h = (h << 4) + *p++; 1393 if ((g = h & 0xf0000000) != 0) 1394 h ^= g >> 24; 1395 h &= ~g; 1396 } 1397 return h; 1398 } 1399 1400 /* 1401 * The GNU hash function is the Daniel J. Bernstein hash clipped to 32 bits 1402 * unsigned in case it's implemented with a wider type. 1403 */ 1404 static uint32_t 1405 gnu_hash(const char *s) 1406 { 1407 uint32_t h; 1408 unsigned char c; 1409 1410 h = 5381; 1411 for (c = *s; c != '\0'; c = *++s) 1412 h = h * 33 + c; 1413 return (h & 0xffffffff); 1414 } 1415 1416 /* 1417 * Find the library with the given name, and return its full pathname. 1418 * The returned string is dynamically allocated. Generates an error 1419 * message and returns NULL if the library cannot be found. 1420 * 1421 * If the second argument is non-NULL, then it refers to an already- 1422 * loaded shared object, whose library search path will be searched. 1423 * 1424 * The search order is: 1425 * DT_RPATH in the referencing file _unless_ DT_RUNPATH is present (1) 1426 * DT_RPATH of the main object if DSO without defined DT_RUNPATH (1) 1427 * LD_LIBRARY_PATH 1428 * DT_RUNPATH in the referencing file 1429 * ldconfig hints (if -z nodefaultlib, filter out default library directories 1430 * from list) 1431 * /lib:/usr/lib _unless_ the referencing file is linked with -z nodefaultlib 1432 * 1433 * (1) Handled in digest_dynamic2 - rpath left NULL if runpath defined. 1434 */ 1435 static char * 1436 find_library(const char *xname, const Obj_Entry *refobj) 1437 { 1438 char *pathname; 1439 char *name; 1440 bool nodeflib, objgiven; 1441 1442 objgiven = refobj != NULL; 1443 if (strchr(xname, '/') != NULL) { /* Hard coded pathname */ 1444 if (xname[0] != '/' && !trust) { 1445 _rtld_error("Absolute pathname required for shared object \"%s\"", 1446 xname); 1447 return NULL; 1448 } 1449 if (objgiven && refobj->z_origin) { 1450 return (origin_subst(__DECONST(char *, xname), 1451 refobj->origin_path)); 1452 } else { 1453 return (xstrdup(xname)); 1454 } 1455 } 1456 1457 if (libmap_disable || !objgiven || 1458 (name = lm_find(refobj->path, xname)) == NULL) 1459 name = (char *)xname; 1460 1461 dbg(" Searching for \"%s\"", name); 1462 1463 /* 1464 * If refobj->rpath != NULL, then refobj->runpath is NULL. Fall 1465 * back to pre-conforming behaviour if user requested so with 1466 * LD_LIBRARY_PATH_RPATH environment variable and ignore -z 1467 * nodeflib. 1468 */ 1469 if (objgiven && refobj->rpath != NULL && ld_library_path_rpath) { 1470 if ((pathname = search_library_path(name, ld_library_path)) != NULL || 1471 (refobj != NULL && 1472 (pathname = search_library_path(name, refobj->rpath)) != NULL) || 1473 (pathname = search_library_path(name, gethints(false))) != NULL || 1474 (pathname = search_library_path(name, STANDARD_LIBRARY_PATH)) != NULL) 1475 return (pathname); 1476 } else { 1477 nodeflib = objgiven ? refobj->z_nodeflib : false; 1478 if ((objgiven && 1479 (pathname = search_library_path(name, refobj->rpath)) != NULL) || 1480 (objgiven && refobj->runpath == NULL && refobj != obj_main && 1481 (pathname = search_library_path(name, obj_main->rpath)) != NULL) || 1482 (pathname = search_library_path(name, ld_library_path)) != NULL || 1483 (objgiven && 1484 (pathname = search_library_path(name, refobj->runpath)) != NULL) || 1485 (pathname = search_library_path(name, gethints(nodeflib))) != NULL || 1486 (objgiven && !nodeflib && 1487 (pathname = search_library_path(name, STANDARD_LIBRARY_PATH)) != NULL)) 1488 return (pathname); 1489 } 1490 1491 if (objgiven && refobj->path != NULL) { 1492 _rtld_error("Shared object \"%s\" not found, required by \"%s\"", 1493 name, basename(refobj->path)); 1494 } else { 1495 _rtld_error("Shared object \"%s\" not found", name); 1496 } 1497 return NULL; 1498 } 1499 1500 /* 1501 * Given a symbol number in a referencing object, find the corresponding 1502 * definition of the symbol. Returns a pointer to the symbol, or NULL if 1503 * no definition was found. Returns a pointer to the Obj_Entry of the 1504 * defining object via the reference parameter DEFOBJ_OUT. 1505 */ 1506 const Elf_Sym * 1507 find_symdef(unsigned long symnum, const Obj_Entry *refobj, 1508 const Obj_Entry **defobj_out, int flags, SymCache *cache, 1509 RtldLockState *lockstate) 1510 { 1511 const Elf_Sym *ref; 1512 const Elf_Sym *def; 1513 const Obj_Entry *defobj; 1514 SymLook req; 1515 const char *name; 1516 int res; 1517 1518 /* 1519 * If we have already found this symbol, get the information from 1520 * the cache. 1521 */ 1522 if (symnum >= refobj->dynsymcount) 1523 return NULL; /* Bad object */ 1524 if (cache != NULL && cache[symnum].sym != NULL) { 1525 *defobj_out = cache[symnum].obj; 1526 return cache[symnum].sym; 1527 } 1528 1529 ref = refobj->symtab + symnum; 1530 name = refobj->strtab + ref->st_name; 1531 def = NULL; 1532 defobj = NULL; 1533 1534 /* 1535 * We don't have to do a full scale lookup if the symbol is local. 1536 * We know it will bind to the instance in this load module; to 1537 * which we already have a pointer (ie ref). By not doing a lookup, 1538 * we not only improve performance, but it also avoids unresolvable 1539 * symbols when local symbols are not in the hash table. This has 1540 * been seen with the ia64 toolchain. 1541 */ 1542 if (ELF_ST_BIND(ref->st_info) != STB_LOCAL) { 1543 if (ELF_ST_TYPE(ref->st_info) == STT_SECTION) { 1544 _rtld_error("%s: Bogus symbol table entry %lu", refobj->path, 1545 symnum); 1546 } 1547 symlook_init(&req, name); 1548 req.flags = flags; 1549 req.ventry = fetch_ventry(refobj, symnum); 1550 req.lockstate = lockstate; 1551 res = symlook_default(&req, refobj); 1552 if (res == 0) { 1553 def = req.sym_out; 1554 defobj = req.defobj_out; 1555 } 1556 } else { 1557 def = ref; 1558 defobj = refobj; 1559 } 1560 1561 /* 1562 * If we found no definition and the reference is weak, treat the 1563 * symbol as having the value zero. 1564 */ 1565 if (def == NULL && ELF_ST_BIND(ref->st_info) == STB_WEAK) { 1566 def = &sym_zero; 1567 defobj = obj_main; 1568 } 1569 1570 if (def != NULL) { 1571 *defobj_out = defobj; 1572 /* Record the information in the cache to avoid subsequent lookups. */ 1573 if (cache != NULL) { 1574 cache[symnum].sym = def; 1575 cache[symnum].obj = defobj; 1576 } 1577 } else { 1578 if (refobj != &obj_rtld) 1579 _rtld_error("%s: Undefined symbol \"%s\"", refobj->path, name); 1580 } 1581 return def; 1582 } 1583 1584 /* 1585 * Return the search path from the ldconfig hints file, reading it if 1586 * necessary. If nostdlib is true, then the default search paths are 1587 * not added to result. 1588 * 1589 * Returns NULL if there are problems with the hints file, 1590 * or if the search path there is empty. 1591 */ 1592 static const char * 1593 gethints(bool nostdlib) 1594 { 1595 static char *hints, *filtered_path; 1596 struct elfhints_hdr hdr; 1597 struct fill_search_info_args sargs, hargs; 1598 struct dl_serinfo smeta, hmeta, *SLPinfo, *hintinfo; 1599 struct dl_serpath *SLPpath, *hintpath; 1600 char *p; 1601 unsigned int SLPndx, hintndx, fndx, fcount; 1602 int fd; 1603 size_t flen; 1604 bool skip; 1605 1606 /* First call, read the hints file */ 1607 if (hints == NULL) { 1608 /* Keep from trying again in case the hints file is bad. */ 1609 hints = ""; 1610 1611 if ((fd = open(ld_elf_hints_path, O_RDONLY | O_CLOEXEC)) == -1) 1612 return (NULL); 1613 if (read(fd, &hdr, sizeof hdr) != sizeof hdr || 1614 hdr.magic != ELFHINTS_MAGIC || 1615 hdr.version != 1) { 1616 close(fd); 1617 return (NULL); 1618 } 1619 p = xmalloc(hdr.dirlistlen + 1); 1620 if (lseek(fd, hdr.strtab + hdr.dirlist, SEEK_SET) == -1 || 1621 read(fd, p, hdr.dirlistlen + 1) != 1622 (ssize_t)hdr.dirlistlen + 1) { 1623 free(p); 1624 close(fd); 1625 return (NULL); 1626 } 1627 hints = p; 1628 close(fd); 1629 } 1630 1631 /* 1632 * If caller agreed to receive list which includes the default 1633 * paths, we are done. Otherwise, if we still did not 1634 * calculated filtered result, do it now. 1635 */ 1636 if (!nostdlib) 1637 return (hints[0] != '\0' ? hints : NULL); 1638 if (filtered_path != NULL) 1639 goto filt_ret; 1640 1641 /* 1642 * Obtain the list of all configured search paths, and the 1643 * list of the default paths. 1644 * 1645 * First estimate the size of the results. 1646 */ 1647 smeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath); 1648 smeta.dls_cnt = 0; 1649 hmeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath); 1650 hmeta.dls_cnt = 0; 1651 1652 sargs.request = RTLD_DI_SERINFOSIZE; 1653 sargs.serinfo = &smeta; 1654 hargs.request = RTLD_DI_SERINFOSIZE; 1655 hargs.serinfo = &hmeta; 1656 1657 path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &sargs); 1658 path_enumerate(p, fill_search_info, &hargs); 1659 1660 SLPinfo = xmalloc(smeta.dls_size); 1661 hintinfo = xmalloc(hmeta.dls_size); 1662 1663 /* 1664 * Next fetch both sets of paths. 1665 */ 1666 sargs.request = RTLD_DI_SERINFO; 1667 sargs.serinfo = SLPinfo; 1668 sargs.serpath = &SLPinfo->dls_serpath[0]; 1669 sargs.strspace = (char *)&SLPinfo->dls_serpath[smeta.dls_cnt]; 1670 1671 hargs.request = RTLD_DI_SERINFO; 1672 hargs.serinfo = hintinfo; 1673 hargs.serpath = &hintinfo->dls_serpath[0]; 1674 hargs.strspace = (char *)&hintinfo->dls_serpath[hmeta.dls_cnt]; 1675 1676 path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &sargs); 1677 path_enumerate(p, fill_search_info, &hargs); 1678 1679 /* 1680 * Now calculate the difference between two sets, by excluding 1681 * standard paths from the full set. 1682 */ 1683 fndx = 0; 1684 fcount = 0; 1685 filtered_path = xmalloc(hdr.dirlistlen + 1); 1686 hintpath = &hintinfo->dls_serpath[0]; 1687 for (hintndx = 0; hintndx < hmeta.dls_cnt; hintndx++, hintpath++) { 1688 skip = false; 1689 SLPpath = &SLPinfo->dls_serpath[0]; 1690 /* 1691 * Check each standard path against current. 1692 */ 1693 for (SLPndx = 0; SLPndx < smeta.dls_cnt; SLPndx++, SLPpath++) { 1694 /* matched, skip the path */ 1695 if (!strcmp(hintpath->dls_name, SLPpath->dls_name)) { 1696 skip = true; 1697 break; 1698 } 1699 } 1700 if (skip) 1701 continue; 1702 /* 1703 * Not matched against any standard path, add the path 1704 * to result. Separate consequtive paths with ':'. 1705 */ 1706 if (fcount > 0) { 1707 filtered_path[fndx] = ':'; 1708 fndx++; 1709 } 1710 fcount++; 1711 flen = strlen(hintpath->dls_name); 1712 strncpy((filtered_path + fndx), hintpath->dls_name, flen); 1713 fndx += flen; 1714 } 1715 filtered_path[fndx] = '\0'; 1716 1717 free(SLPinfo); 1718 free(hintinfo); 1719 1720 filt_ret: 1721 return (filtered_path[0] != '\0' ? filtered_path : NULL); 1722 } 1723 1724 static void 1725 init_dag(Obj_Entry *root) 1726 { 1727 const Needed_Entry *needed; 1728 const Objlist_Entry *elm; 1729 DoneList donelist; 1730 1731 if (root->dag_inited) 1732 return; 1733 donelist_init(&donelist); 1734 1735 /* Root object belongs to own DAG. */ 1736 objlist_push_tail(&root->dldags, root); 1737 objlist_push_tail(&root->dagmembers, root); 1738 donelist_check(&donelist, root); 1739 1740 /* 1741 * Add dependencies of root object to DAG in breadth order 1742 * by exploiting the fact that each new object get added 1743 * to the tail of the dagmembers list. 1744 */ 1745 STAILQ_FOREACH(elm, &root->dagmembers, link) { 1746 for (needed = elm->obj->needed; needed != NULL; needed = needed->next) { 1747 if (needed->obj == NULL || donelist_check(&donelist, needed->obj)) 1748 continue; 1749 objlist_push_tail(&needed->obj->dldags, root); 1750 objlist_push_tail(&root->dagmembers, needed->obj); 1751 } 1752 } 1753 root->dag_inited = true; 1754 } 1755 1756 static void 1757 process_nodelete(Obj_Entry *root) 1758 { 1759 const Objlist_Entry *elm; 1760 1761 /* 1762 * Walk over object DAG and process every dependent object that 1763 * is marked as DF_1_NODELETE. They need to grow their own DAG, 1764 * which then should have its reference upped separately. 1765 */ 1766 STAILQ_FOREACH(elm, &root->dagmembers, link) { 1767 if (elm->obj != NULL && elm->obj->z_nodelete && 1768 !elm->obj->ref_nodel) { 1769 dbg("obj %s nodelete", elm->obj->path); 1770 init_dag(elm->obj); 1771 ref_dag(elm->obj); 1772 elm->obj->ref_nodel = true; 1773 } 1774 } 1775 } 1776 /* 1777 * Initialize the dynamic linker. The argument is the address at which 1778 * the dynamic linker has been mapped into memory. The primary task of 1779 * this function is to relocate the dynamic linker. 1780 */ 1781 static void 1782 init_rtld(caddr_t mapbase, Elf_Auxinfo **aux_info) 1783 { 1784 Obj_Entry objtmp; /* Temporary rtld object */ 1785 const Elf_Dyn *dyn_rpath; 1786 const Elf_Dyn *dyn_soname; 1787 const Elf_Dyn *dyn_runpath; 1788 1789 /* 1790 * Conjure up an Obj_Entry structure for the dynamic linker. 1791 * 1792 * The "path" member can't be initialized yet because string constants 1793 * cannot yet be accessed. Below we will set it correctly. 1794 */ 1795 memset(&objtmp, 0, sizeof(objtmp)); 1796 objtmp.path = NULL; 1797 objtmp.rtld = true; 1798 objtmp.mapbase = mapbase; 1799 #ifdef PIC 1800 objtmp.relocbase = mapbase; 1801 #endif 1802 if (RTLD_IS_DYNAMIC()) { 1803 objtmp.dynamic = rtld_dynamic(&objtmp); 1804 digest_dynamic1(&objtmp, 1, &dyn_rpath, &dyn_soname, &dyn_runpath); 1805 assert(objtmp.needed == NULL); 1806 #if !defined(__mips__) 1807 /* MIPS has a bogus DT_TEXTREL. */ 1808 assert(!objtmp.textrel); 1809 #endif 1810 1811 /* 1812 * Temporarily put the dynamic linker entry into the object list, so 1813 * that symbols can be found. 1814 */ 1815 1816 relocate_objects(&objtmp, true, &objtmp, 0, NULL); 1817 } 1818 1819 /* Initialize the object list. */ 1820 obj_tail = &obj_list; 1821 1822 /* Now that non-local variables can be accesses, copy out obj_rtld. */ 1823 memcpy(&obj_rtld, &objtmp, sizeof(obj_rtld)); 1824 1825 if (aux_info[AT_PAGESZ] != NULL) 1826 pagesize = aux_info[AT_PAGESZ]->a_un.a_val; 1827 if (aux_info[AT_OSRELDATE] != NULL) 1828 osreldate = aux_info[AT_OSRELDATE]->a_un.a_val; 1829 1830 digest_dynamic2(&obj_rtld, dyn_rpath, dyn_soname, dyn_runpath); 1831 1832 /* Replace the path with a dynamically allocated copy. */ 1833 obj_rtld.path = xstrdup(PATH_RTLD); 1834 1835 r_debug.r_brk = r_debug_state; 1836 r_debug.r_state = RT_CONSISTENT; 1837 } 1838 1839 /* 1840 * Add the init functions from a needed object list (and its recursive 1841 * needed objects) to "list". This is not used directly; it is a helper 1842 * function for initlist_add_objects(). The write lock must be held 1843 * when this function is called. 1844 */ 1845 static void 1846 initlist_add_neededs(Needed_Entry *needed, Objlist *list) 1847 { 1848 /* Recursively process the successor needed objects. */ 1849 if (needed->next != NULL) 1850 initlist_add_neededs(needed->next, list); 1851 1852 /* Process the current needed object. */ 1853 if (needed->obj != NULL) 1854 initlist_add_objects(needed->obj, &needed->obj->next, list); 1855 } 1856 1857 /* 1858 * Scan all of the DAGs rooted in the range of objects from "obj" to 1859 * "tail" and add their init functions to "list". This recurses over 1860 * the DAGs and ensure the proper init ordering such that each object's 1861 * needed libraries are initialized before the object itself. At the 1862 * same time, this function adds the objects to the global finalization 1863 * list "list_fini" in the opposite order. The write lock must be 1864 * held when this function is called. 1865 */ 1866 static void 1867 initlist_add_objects(Obj_Entry *obj, Obj_Entry **tail, Objlist *list) 1868 { 1869 1870 if (obj->init_scanned || obj->init_done) 1871 return; 1872 obj->init_scanned = true; 1873 1874 /* Recursively process the successor objects. */ 1875 if (&obj->next != tail) 1876 initlist_add_objects(obj->next, tail, list); 1877 1878 /* Recursively process the needed objects. */ 1879 if (obj->needed != NULL) 1880 initlist_add_neededs(obj->needed, list); 1881 if (obj->needed_filtees != NULL) 1882 initlist_add_neededs(obj->needed_filtees, list); 1883 if (obj->needed_aux_filtees != NULL) 1884 initlist_add_neededs(obj->needed_aux_filtees, list); 1885 1886 /* Add the object to the init list. */ 1887 if (obj->preinit_array != (Elf_Addr)NULL || obj->init != (Elf_Addr)NULL || 1888 obj->init_array != (Elf_Addr)NULL) 1889 objlist_push_tail(list, obj); 1890 1891 /* Add the object to the global fini list in the reverse order. */ 1892 if ((obj->fini != (Elf_Addr)NULL || obj->fini_array != (Elf_Addr)NULL) 1893 && !obj->on_fini_list) { 1894 objlist_push_head(&list_fini, obj); 1895 obj->on_fini_list = true; 1896 } 1897 } 1898 1899 #ifndef FPTR_TARGET 1900 #define FPTR_TARGET(f) ((Elf_Addr) (f)) 1901 #endif 1902 1903 static void 1904 free_needed_filtees(Needed_Entry *n) 1905 { 1906 Needed_Entry *needed, *needed1; 1907 1908 for (needed = n; needed != NULL; needed = needed->next) { 1909 if (needed->obj != NULL) { 1910 dlclose(needed->obj); 1911 needed->obj = NULL; 1912 } 1913 } 1914 for (needed = n; needed != NULL; needed = needed1) { 1915 needed1 = needed->next; 1916 free(needed); 1917 } 1918 } 1919 1920 static void 1921 unload_filtees(Obj_Entry *obj) 1922 { 1923 1924 free_needed_filtees(obj->needed_filtees); 1925 obj->needed_filtees = NULL; 1926 free_needed_filtees(obj->needed_aux_filtees); 1927 obj->needed_aux_filtees = NULL; 1928 obj->filtees_loaded = false; 1929 } 1930 1931 static void 1932 load_filtee1(Obj_Entry *obj, Needed_Entry *needed, int flags, 1933 RtldLockState *lockstate) 1934 { 1935 1936 for (; needed != NULL; needed = needed->next) { 1937 needed->obj = dlopen_object(obj->strtab + needed->name, -1, obj, 1938 flags, ((ld_loadfltr || obj->z_loadfltr) ? RTLD_NOW : RTLD_LAZY) | 1939 RTLD_LOCAL, lockstate); 1940 } 1941 } 1942 1943 static void 1944 load_filtees(Obj_Entry *obj, int flags, RtldLockState *lockstate) 1945 { 1946 1947 lock_restart_for_upgrade(lockstate); 1948 if (!obj->filtees_loaded) { 1949 load_filtee1(obj, obj->needed_filtees, flags, lockstate); 1950 load_filtee1(obj, obj->needed_aux_filtees, flags, lockstate); 1951 obj->filtees_loaded = true; 1952 } 1953 } 1954 1955 static int 1956 process_needed(Obj_Entry *obj, Needed_Entry *needed, int flags) 1957 { 1958 Obj_Entry *obj1; 1959 1960 for (; needed != NULL; needed = needed->next) { 1961 obj1 = needed->obj = load_object(obj->strtab + needed->name, -1, obj, 1962 flags & ~RTLD_LO_NOLOAD); 1963 if (obj1 == NULL && !ld_tracing && (flags & RTLD_LO_FILTEES) == 0) 1964 return (-1); 1965 } 1966 return (0); 1967 } 1968 1969 /* 1970 * Given a shared object, traverse its list of needed objects, and load 1971 * each of them. Returns 0 on success. Generates an error message and 1972 * returns -1 on failure. 1973 */ 1974 static int 1975 load_needed_objects(Obj_Entry *first, int flags) 1976 { 1977 Obj_Entry *obj; 1978 1979 for (obj = first; obj != NULL; obj = obj->next) { 1980 if (process_needed(obj, obj->needed, flags) == -1) 1981 return (-1); 1982 } 1983 return (0); 1984 } 1985 1986 static int 1987 load_preload_objects(void) 1988 { 1989 char *p = ld_preload; 1990 Obj_Entry *obj; 1991 static const char delim[] = " \t:;"; 1992 1993 if (p == NULL) 1994 return 0; 1995 1996 p += strspn(p, delim); 1997 while (*p != '\0') { 1998 size_t len = strcspn(p, delim); 1999 char savech; 2000 2001 savech = p[len]; 2002 p[len] = '\0'; 2003 obj = load_object(p, -1, NULL, 0); 2004 if (obj == NULL) 2005 return -1; /* XXX - cleanup */ 2006 obj->z_interpose = true; 2007 p[len] = savech; 2008 p += len; 2009 p += strspn(p, delim); 2010 } 2011 LD_UTRACE(UTRACE_PRELOAD_FINISHED, NULL, NULL, 0, 0, NULL); 2012 return 0; 2013 } 2014 2015 static const char * 2016 printable_path(const char *path) 2017 { 2018 2019 return (path == NULL ? "<unknown>" : path); 2020 } 2021 2022 /* 2023 * Load a shared object into memory, if it is not already loaded. The 2024 * object may be specified by name or by user-supplied file descriptor 2025 * fd_u. In the later case, the fd_u descriptor is not closed, but its 2026 * duplicate is. 2027 * 2028 * Returns a pointer to the Obj_Entry for the object. Returns NULL 2029 * on failure. 2030 */ 2031 static Obj_Entry * 2032 load_object(const char *name, int fd_u, const Obj_Entry *refobj, int flags) 2033 { 2034 Obj_Entry *obj; 2035 int fd; 2036 struct stat sb; 2037 char *path; 2038 2039 if (name != NULL) { 2040 for (obj = obj_list->next; obj != NULL; obj = obj->next) { 2041 if (object_match_name(obj, name)) 2042 return (obj); 2043 } 2044 2045 path = find_library(name, refobj); 2046 if (path == NULL) 2047 return (NULL); 2048 } else 2049 path = NULL; 2050 2051 /* 2052 * If we didn't find a match by pathname, or the name is not 2053 * supplied, open the file and check again by device and inode. 2054 * This avoids false mismatches caused by multiple links or ".." 2055 * in pathnames. 2056 * 2057 * To avoid a race, we open the file and use fstat() rather than 2058 * using stat(). 2059 */ 2060 fd = -1; 2061 if (fd_u == -1) { 2062 if ((fd = open(path, O_RDONLY | O_CLOEXEC)) == -1) { 2063 _rtld_error("Cannot open \"%s\"", path); 2064 free(path); 2065 return (NULL); 2066 } 2067 } else { 2068 fd = fcntl(fd_u, F_DUPFD_CLOEXEC, 0); 2069 if (fd == -1) { 2070 _rtld_error("Cannot dup fd"); 2071 free(path); 2072 return (NULL); 2073 } 2074 } 2075 if (fstat(fd, &sb) == -1) { 2076 _rtld_error("Cannot fstat \"%s\"", printable_path(path)); 2077 close(fd); 2078 free(path); 2079 return NULL; 2080 } 2081 for (obj = obj_list->next; obj != NULL; obj = obj->next) 2082 if (obj->ino == sb.st_ino && obj->dev == sb.st_dev) 2083 break; 2084 if (obj != NULL && name != NULL) { 2085 object_add_name(obj, name); 2086 free(path); 2087 close(fd); 2088 return obj; 2089 } 2090 if (flags & RTLD_LO_NOLOAD) { 2091 free(path); 2092 close(fd); 2093 return (NULL); 2094 } 2095 2096 /* First use of this object, so we must map it in */ 2097 obj = do_load_object(fd, name, path, &sb, flags); 2098 if (obj == NULL) 2099 free(path); 2100 close(fd); 2101 2102 return obj; 2103 } 2104 2105 static Obj_Entry * 2106 do_load_object(int fd, const char *name, char *path, struct stat *sbp, 2107 int flags) 2108 { 2109 Obj_Entry *obj; 2110 struct statfs fs; 2111 2112 /* 2113 * but first, make sure that environment variables haven't been 2114 * used to circumvent the noexec flag on a filesystem. 2115 */ 2116 if (dangerous_ld_env) { 2117 if (fstatfs(fd, &fs) != 0) { 2118 _rtld_error("Cannot fstatfs \"%s\"", printable_path(path)); 2119 return NULL; 2120 } 2121 if (fs.f_flags & MNT_NOEXEC) { 2122 _rtld_error("Cannot execute objects on %s\n", fs.f_mntonname); 2123 return NULL; 2124 } 2125 } 2126 dbg("loading \"%s\"", printable_path(path)); 2127 obj = map_object(fd, printable_path(path), sbp); 2128 if (obj == NULL) 2129 return NULL; 2130 2131 /* 2132 * If DT_SONAME is present in the object, digest_dynamic2 already 2133 * added it to the object names. 2134 */ 2135 if (name != NULL) 2136 object_add_name(obj, name); 2137 obj->path = path; 2138 digest_dynamic(obj, 0); 2139 dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d", obj->path, 2140 obj->valid_hash_sysv, obj->valid_hash_gnu, obj->dynsymcount); 2141 if (obj->z_noopen && (flags & (RTLD_LO_DLOPEN | RTLD_LO_TRACE)) == 2142 RTLD_LO_DLOPEN) { 2143 dbg("refusing to load non-loadable \"%s\"", obj->path); 2144 _rtld_error("Cannot dlopen non-loadable %s", obj->path); 2145 munmap(obj->mapbase, obj->mapsize); 2146 obj_free(obj); 2147 return (NULL); 2148 } 2149 2150 *obj_tail = obj; 2151 obj_tail = &obj->next; 2152 obj_count++; 2153 obj_loads++; 2154 linkmap_add(obj); /* for GDB & dlinfo() */ 2155 max_stack_flags |= obj->stack_flags; 2156 2157 dbg(" %p .. %p: %s", obj->mapbase, 2158 obj->mapbase + obj->mapsize - 1, obj->path); 2159 if (obj->textrel) 2160 dbg(" WARNING: %s has impure text", obj->path); 2161 LD_UTRACE(UTRACE_LOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0, 2162 obj->path); 2163 2164 return obj; 2165 } 2166 2167 static Obj_Entry * 2168 obj_from_addr(const void *addr) 2169 { 2170 Obj_Entry *obj; 2171 2172 for (obj = obj_list; obj != NULL; obj = obj->next) { 2173 if (addr < (void *) obj->mapbase) 2174 continue; 2175 if (addr < (void *) (obj->mapbase + obj->mapsize)) 2176 return obj; 2177 } 2178 return NULL; 2179 } 2180 2181 static void 2182 preinit_main(void) 2183 { 2184 Elf_Addr *preinit_addr; 2185 int index; 2186 2187 preinit_addr = (Elf_Addr *)obj_main->preinit_array; 2188 if (preinit_addr == NULL) 2189 return; 2190 2191 for (index = 0; index < obj_main->preinit_array_num; index++) { 2192 if (preinit_addr[index] != 0 && preinit_addr[index] != 1) { 2193 dbg("calling preinit function for %s at %p", obj_main->path, 2194 (void *)preinit_addr[index]); 2195 LD_UTRACE(UTRACE_INIT_CALL, obj_main, (void *)preinit_addr[index], 2196 0, 0, obj_main->path); 2197 call_init_pointer(obj_main, preinit_addr[index]); 2198 } 2199 } 2200 } 2201 2202 /* 2203 * Call the finalization functions for each of the objects in "list" 2204 * belonging to the DAG of "root" and referenced once. If NULL "root" 2205 * is specified, every finalization function will be called regardless 2206 * of the reference count and the list elements won't be freed. All of 2207 * the objects are expected to have non-NULL fini functions. 2208 */ 2209 static void 2210 objlist_call_fini(Objlist *list, Obj_Entry *root, RtldLockState *lockstate) 2211 { 2212 Objlist_Entry *elm; 2213 char *saved_msg; 2214 Elf_Addr *fini_addr; 2215 int index; 2216 2217 assert(root == NULL || root->refcount == 1); 2218 2219 /* 2220 * Preserve the current error message since a fini function might 2221 * call into the dynamic linker and overwrite it. 2222 */ 2223 saved_msg = errmsg_save(); 2224 do { 2225 STAILQ_FOREACH(elm, list, link) { 2226 if (root != NULL && (elm->obj->refcount != 1 || 2227 objlist_find(&root->dagmembers, elm->obj) == NULL)) 2228 continue; 2229 /* Remove object from fini list to prevent recursive invocation. */ 2230 STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link); 2231 /* 2232 * XXX: If a dlopen() call references an object while the 2233 * fini function is in progress, we might end up trying to 2234 * unload the referenced object in dlclose() or the object 2235 * won't be unloaded although its fini function has been 2236 * called. 2237 */ 2238 lock_release(rtld_bind_lock, lockstate); 2239 2240 /* 2241 * It is legal to have both DT_FINI and DT_FINI_ARRAY defined. 2242 * When this happens, DT_FINI_ARRAY is processed first. 2243 */ 2244 fini_addr = (Elf_Addr *)elm->obj->fini_array; 2245 if (fini_addr != NULL && elm->obj->fini_array_num > 0) { 2246 for (index = elm->obj->fini_array_num - 1; index >= 0; 2247 index--) { 2248 if (fini_addr[index] != 0 && fini_addr[index] != 1) { 2249 dbg("calling fini function for %s at %p", 2250 elm->obj->path, (void *)fini_addr[index]); 2251 LD_UTRACE(UTRACE_FINI_CALL, elm->obj, 2252 (void *)fini_addr[index], 0, 0, elm->obj->path); 2253 call_initfini_pointer(elm->obj, fini_addr[index]); 2254 } 2255 } 2256 } 2257 if (elm->obj->fini != (Elf_Addr)NULL) { 2258 dbg("calling fini function for %s at %p", elm->obj->path, 2259 (void *)elm->obj->fini); 2260 LD_UTRACE(UTRACE_FINI_CALL, elm->obj, (void *)elm->obj->fini, 2261 0, 0, elm->obj->path); 2262 call_initfini_pointer(elm->obj, elm->obj->fini); 2263 } 2264 wlock_acquire(rtld_bind_lock, lockstate); 2265 /* No need to free anything if process is going down. */ 2266 if (root != NULL) 2267 free(elm); 2268 /* 2269 * We must restart the list traversal after every fini call 2270 * because a dlclose() call from the fini function or from 2271 * another thread might have modified the reference counts. 2272 */ 2273 break; 2274 } 2275 } while (elm != NULL); 2276 errmsg_restore(saved_msg); 2277 } 2278 2279 /* 2280 * Call the initialization functions for each of the objects in 2281 * "list". All of the objects are expected to have non-NULL init 2282 * functions. 2283 */ 2284 static void 2285 objlist_call_init(Objlist *list, RtldLockState *lockstate) 2286 { 2287 Objlist_Entry *elm; 2288 Obj_Entry *obj; 2289 char *saved_msg; 2290 Elf_Addr *init_addr; 2291 int index; 2292 2293 /* 2294 * Clean init_scanned flag so that objects can be rechecked and 2295 * possibly initialized earlier if any of vectors called below 2296 * cause the change by using dlopen. 2297 */ 2298 for (obj = obj_list; obj != NULL; obj = obj->next) 2299 obj->init_scanned = false; 2300 2301 /* 2302 * Preserve the current error message since an init function might 2303 * call into the dynamic linker and overwrite it. 2304 */ 2305 saved_msg = errmsg_save(); 2306 STAILQ_FOREACH(elm, list, link) { 2307 if (elm->obj->init_done) /* Initialized early. */ 2308 continue; 2309 /* 2310 * Race: other thread might try to use this object before current 2311 * one completes the initilization. Not much can be done here 2312 * without better locking. 2313 */ 2314 elm->obj->init_done = true; 2315 lock_release(rtld_bind_lock, lockstate); 2316 2317 /* 2318 * It is legal to have both DT_INIT and DT_INIT_ARRAY defined. 2319 * When this happens, DT_INIT is processed first. 2320 */ 2321 if (elm->obj->init != (Elf_Addr)NULL) { 2322 dbg("calling init function for %s at %p", elm->obj->path, 2323 (void *)elm->obj->init); 2324 LD_UTRACE(UTRACE_INIT_CALL, elm->obj, (void *)elm->obj->init, 2325 0, 0, elm->obj->path); 2326 call_initfini_pointer(elm->obj, elm->obj->init); 2327 } 2328 init_addr = (Elf_Addr *)elm->obj->init_array; 2329 if (init_addr != NULL) { 2330 for (index = 0; index < elm->obj->init_array_num; index++) { 2331 if (init_addr[index] != 0 && init_addr[index] != 1) { 2332 dbg("calling init function for %s at %p", elm->obj->path, 2333 (void *)init_addr[index]); 2334 LD_UTRACE(UTRACE_INIT_CALL, elm->obj, 2335 (void *)init_addr[index], 0, 0, elm->obj->path); 2336 call_init_pointer(elm->obj, init_addr[index]); 2337 } 2338 } 2339 } 2340 wlock_acquire(rtld_bind_lock, lockstate); 2341 } 2342 errmsg_restore(saved_msg); 2343 } 2344 2345 static void 2346 objlist_clear(Objlist *list) 2347 { 2348 Objlist_Entry *elm; 2349 2350 while (!STAILQ_EMPTY(list)) { 2351 elm = STAILQ_FIRST(list); 2352 STAILQ_REMOVE_HEAD(list, link); 2353 free(elm); 2354 } 2355 } 2356 2357 static Objlist_Entry * 2358 objlist_find(Objlist *list, const Obj_Entry *obj) 2359 { 2360 Objlist_Entry *elm; 2361 2362 STAILQ_FOREACH(elm, list, link) 2363 if (elm->obj == obj) 2364 return elm; 2365 return NULL; 2366 } 2367 2368 static void 2369 objlist_init(Objlist *list) 2370 { 2371 STAILQ_INIT(list); 2372 } 2373 2374 static void 2375 objlist_push_head(Objlist *list, Obj_Entry *obj) 2376 { 2377 Objlist_Entry *elm; 2378 2379 elm = NEW(Objlist_Entry); 2380 elm->obj = obj; 2381 STAILQ_INSERT_HEAD(list, elm, link); 2382 } 2383 2384 static void 2385 objlist_push_tail(Objlist *list, Obj_Entry *obj) 2386 { 2387 Objlist_Entry *elm; 2388 2389 elm = NEW(Objlist_Entry); 2390 elm->obj = obj; 2391 STAILQ_INSERT_TAIL(list, elm, link); 2392 } 2393 2394 static void 2395 objlist_put_after(Objlist *list, Obj_Entry *listobj, Obj_Entry *obj) 2396 { 2397 Objlist_Entry *elm, *listelm; 2398 2399 STAILQ_FOREACH(listelm, list, link) { 2400 if (listelm->obj == listobj) 2401 break; 2402 } 2403 elm = NEW(Objlist_Entry); 2404 elm->obj = obj; 2405 if (listelm != NULL) 2406 STAILQ_INSERT_AFTER(list, listelm, elm, link); 2407 else 2408 STAILQ_INSERT_TAIL(list, elm, link); 2409 } 2410 2411 static void 2412 objlist_remove(Objlist *list, Obj_Entry *obj) 2413 { 2414 Objlist_Entry *elm; 2415 2416 if ((elm = objlist_find(list, obj)) != NULL) { 2417 STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link); 2418 free(elm); 2419 } 2420 } 2421 2422 /* 2423 * Relocate dag rooted in the specified object. 2424 * Returns 0 on success, or -1 on failure. 2425 */ 2426 2427 static int 2428 relocate_object_dag(Obj_Entry *root, bool bind_now, Obj_Entry *rtldobj, 2429 int flags, RtldLockState *lockstate) 2430 { 2431 Objlist_Entry *elm; 2432 int error; 2433 2434 error = 0; 2435 STAILQ_FOREACH(elm, &root->dagmembers, link) { 2436 error = relocate_object(elm->obj, bind_now, rtldobj, flags, 2437 lockstate); 2438 if (error == -1) 2439 break; 2440 } 2441 return (error); 2442 } 2443 2444 /* 2445 * Relocate single object. 2446 * Returns 0 on success, or -1 on failure. 2447 */ 2448 static int 2449 relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj, 2450 int flags, RtldLockState *lockstate) 2451 { 2452 2453 if (obj->relocated) 2454 return (0); 2455 obj->relocated = true; 2456 if (obj != rtldobj) 2457 dbg("relocating \"%s\"", obj->path); 2458 2459 if (obj->symtab == NULL || obj->strtab == NULL || 2460 !(obj->valid_hash_sysv || obj->valid_hash_gnu)) { 2461 _rtld_error("%s: Shared object has no run-time symbol table", 2462 obj->path); 2463 return (-1); 2464 } 2465 2466 if (obj->textrel) { 2467 /* There are relocations to the write-protected text segment. */ 2468 if (mprotect(obj->mapbase, obj->textsize, 2469 PROT_READ|PROT_WRITE|PROT_EXEC) == -1) { 2470 _rtld_error("%s: Cannot write-enable text segment: %s", 2471 obj->path, rtld_strerror(errno)); 2472 return (-1); 2473 } 2474 } 2475 2476 /* Process the non-PLT relocations. */ 2477 if (reloc_non_plt(obj, rtldobj, flags, lockstate)) 2478 return (-1); 2479 2480 if (obj->textrel) { /* Re-protected the text segment. */ 2481 if (mprotect(obj->mapbase, obj->textsize, 2482 PROT_READ|PROT_EXEC) == -1) { 2483 _rtld_error("%s: Cannot write-protect text segment: %s", 2484 obj->path, rtld_strerror(errno)); 2485 return (-1); 2486 } 2487 } 2488 2489 2490 /* Set the special PLT or GOT entries. */ 2491 init_pltgot(obj); 2492 2493 /* Process the PLT relocations. */ 2494 if (reloc_plt(obj) == -1) 2495 return (-1); 2496 /* Relocate the jump slots if we are doing immediate binding. */ 2497 if (obj->bind_now || bind_now) 2498 if (reloc_jmpslots(obj, flags, lockstate) == -1) 2499 return (-1); 2500 2501 if (obj->relro_size > 0) { 2502 if (mprotect(obj->relro_page, obj->relro_size, 2503 PROT_READ) == -1) { 2504 _rtld_error("%s: Cannot enforce relro protection: %s", 2505 obj->path, rtld_strerror(errno)); 2506 return (-1); 2507 } 2508 } 2509 2510 /* 2511 * Set up the magic number and version in the Obj_Entry. These 2512 * were checked in the crt1.o from the original ElfKit, so we 2513 * set them for backward compatibility. 2514 */ 2515 obj->magic = RTLD_MAGIC; 2516 obj->version = RTLD_VERSION; 2517 2518 return (0); 2519 } 2520 2521 /* 2522 * Relocate newly-loaded shared objects. The argument is a pointer to 2523 * the Obj_Entry for the first such object. All objects from the first 2524 * to the end of the list of objects are relocated. Returns 0 on success, 2525 * or -1 on failure. 2526 */ 2527 static int 2528 relocate_objects(Obj_Entry *first, bool bind_now, Obj_Entry *rtldobj, 2529 int flags, RtldLockState *lockstate) 2530 { 2531 Obj_Entry *obj; 2532 int error; 2533 2534 for (error = 0, obj = first; obj != NULL; obj = obj->next) { 2535 error = relocate_object(obj, bind_now, rtldobj, flags, 2536 lockstate); 2537 if (error == -1) 2538 break; 2539 } 2540 return (error); 2541 } 2542 2543 /* 2544 * The handling of R_MACHINE_IRELATIVE relocations and jumpslots 2545 * referencing STT_GNU_IFUNC symbols is postponed till the other 2546 * relocations are done. The indirect functions specified as 2547 * ifunc are allowed to call other symbols, so we need to have 2548 * objects relocated before asking for resolution from indirects. 2549 * 2550 * The R_MACHINE_IRELATIVE slots are resolved in greedy fashion, 2551 * instead of the usual lazy handling of PLT slots. It is 2552 * consistent with how GNU does it. 2553 */ 2554 static int 2555 resolve_object_ifunc(Obj_Entry *obj, bool bind_now, int flags, 2556 RtldLockState *lockstate) 2557 { 2558 if (obj->irelative && reloc_iresolve(obj, lockstate) == -1) 2559 return (-1); 2560 if ((obj->bind_now || bind_now) && obj->gnu_ifunc && 2561 reloc_gnu_ifunc(obj, flags, lockstate) == -1) 2562 return (-1); 2563 return (0); 2564 } 2565 2566 static int 2567 resolve_objects_ifunc(Obj_Entry *first, bool bind_now, int flags, 2568 RtldLockState *lockstate) 2569 { 2570 Obj_Entry *obj; 2571 2572 for (obj = first; obj != NULL; obj = obj->next) { 2573 if (resolve_object_ifunc(obj, bind_now, flags, lockstate) == -1) 2574 return (-1); 2575 } 2576 return (0); 2577 } 2578 2579 static int 2580 initlist_objects_ifunc(Objlist *list, bool bind_now, int flags, 2581 RtldLockState *lockstate) 2582 { 2583 Objlist_Entry *elm; 2584 2585 STAILQ_FOREACH(elm, list, link) { 2586 if (resolve_object_ifunc(elm->obj, bind_now, flags, 2587 lockstate) == -1) 2588 return (-1); 2589 } 2590 return (0); 2591 } 2592 2593 /* 2594 * Cleanup procedure. It will be called (by the atexit mechanism) just 2595 * before the process exits. 2596 */ 2597 static void 2598 rtld_exit(void) 2599 { 2600 RtldLockState lockstate; 2601 2602 wlock_acquire(rtld_bind_lock, &lockstate); 2603 dbg("rtld_exit()"); 2604 objlist_call_fini(&list_fini, NULL, &lockstate); 2605 /* No need to remove the items from the list, since we are exiting. */ 2606 if (!libmap_disable) 2607 lm_fini(); 2608 lock_release(rtld_bind_lock, &lockstate); 2609 } 2610 2611 /* 2612 * Iterate over a search path, translate each element, and invoke the 2613 * callback on the result. 2614 */ 2615 static void * 2616 path_enumerate(const char *path, path_enum_proc callback, void *arg) 2617 { 2618 const char *trans; 2619 if (path == NULL) 2620 return (NULL); 2621 2622 path += strspn(path, ":;"); 2623 while (*path != '\0') { 2624 size_t len; 2625 char *res; 2626 2627 len = strcspn(path, ":;"); 2628 trans = lm_findn(NULL, path, len); 2629 if (trans) 2630 res = callback(trans, strlen(trans), arg); 2631 else 2632 res = callback(path, len, arg); 2633 2634 if (res != NULL) 2635 return (res); 2636 2637 path += len; 2638 path += strspn(path, ":;"); 2639 } 2640 2641 return (NULL); 2642 } 2643 2644 struct try_library_args { 2645 const char *name; 2646 size_t namelen; 2647 char *buffer; 2648 size_t buflen; 2649 }; 2650 2651 static void * 2652 try_library_path(const char *dir, size_t dirlen, void *param) 2653 { 2654 struct try_library_args *arg; 2655 2656 arg = param; 2657 if (*dir == '/' || trust) { 2658 char *pathname; 2659 2660 if (dirlen + 1 + arg->namelen + 1 > arg->buflen) 2661 return (NULL); 2662 2663 pathname = arg->buffer; 2664 strncpy(pathname, dir, dirlen); 2665 pathname[dirlen] = '/'; 2666 strcpy(pathname + dirlen + 1, arg->name); 2667 2668 dbg(" Trying \"%s\"", pathname); 2669 if (access(pathname, F_OK) == 0) { /* We found it */ 2670 pathname = xmalloc(dirlen + 1 + arg->namelen + 1); 2671 strcpy(pathname, arg->buffer); 2672 return (pathname); 2673 } 2674 } 2675 return (NULL); 2676 } 2677 2678 static char * 2679 search_library_path(const char *name, const char *path) 2680 { 2681 char *p; 2682 struct try_library_args arg; 2683 2684 if (path == NULL) 2685 return NULL; 2686 2687 arg.name = name; 2688 arg.namelen = strlen(name); 2689 arg.buffer = xmalloc(PATH_MAX); 2690 arg.buflen = PATH_MAX; 2691 2692 p = path_enumerate(path, try_library_path, &arg); 2693 2694 free(arg.buffer); 2695 2696 return (p); 2697 } 2698 2699 int 2700 dlclose(void *handle) 2701 { 2702 Obj_Entry *root; 2703 RtldLockState lockstate; 2704 2705 wlock_acquire(rtld_bind_lock, &lockstate); 2706 root = dlcheck(handle); 2707 if (root == NULL) { 2708 lock_release(rtld_bind_lock, &lockstate); 2709 return -1; 2710 } 2711 LD_UTRACE(UTRACE_DLCLOSE_START, handle, NULL, 0, root->dl_refcount, 2712 root->path); 2713 2714 /* Unreference the object and its dependencies. */ 2715 root->dl_refcount--; 2716 2717 if (root->refcount == 1) { 2718 /* 2719 * The object will be no longer referenced, so we must unload it. 2720 * First, call the fini functions. 2721 */ 2722 objlist_call_fini(&list_fini, root, &lockstate); 2723 2724 unref_dag(root); 2725 2726 /* Finish cleaning up the newly-unreferenced objects. */ 2727 GDB_STATE(RT_DELETE,&root->linkmap); 2728 unload_object(root); 2729 GDB_STATE(RT_CONSISTENT,NULL); 2730 } else 2731 unref_dag(root); 2732 2733 LD_UTRACE(UTRACE_DLCLOSE_STOP, handle, NULL, 0, 0, NULL); 2734 lock_release(rtld_bind_lock, &lockstate); 2735 return 0; 2736 } 2737 2738 char * 2739 dlerror(void) 2740 { 2741 char *msg = error_message; 2742 error_message = NULL; 2743 return msg; 2744 } 2745 2746 /* 2747 * This function is deprecated and has no effect. 2748 */ 2749 void 2750 dllockinit(void *context, 2751 void *(*lock_create)(void *context), 2752 void (*rlock_acquire)(void *lock), 2753 void (*wlock_acquire)(void *lock), 2754 void (*lock_release)(void *lock), 2755 void (*lock_destroy)(void *lock), 2756 void (*context_destroy)(void *context)) 2757 { 2758 static void *cur_context; 2759 static void (*cur_context_destroy)(void *); 2760 2761 /* Just destroy the context from the previous call, if necessary. */ 2762 if (cur_context_destroy != NULL) 2763 cur_context_destroy(cur_context); 2764 cur_context = context; 2765 cur_context_destroy = context_destroy; 2766 } 2767 2768 void * 2769 dlopen(const char *name, int mode) 2770 { 2771 2772 return (rtld_dlopen(name, -1, mode)); 2773 } 2774 2775 void * 2776 fdlopen(int fd, int mode) 2777 { 2778 2779 return (rtld_dlopen(NULL, fd, mode)); 2780 } 2781 2782 static void * 2783 rtld_dlopen(const char *name, int fd, int mode) 2784 { 2785 RtldLockState lockstate; 2786 int lo_flags; 2787 2788 LD_UTRACE(UTRACE_DLOPEN_START, NULL, NULL, 0, mode, name); 2789 ld_tracing = (mode & RTLD_TRACE) == 0 ? NULL : "1"; 2790 if (ld_tracing != NULL) { 2791 rlock_acquire(rtld_bind_lock, &lockstate); 2792 if (sigsetjmp(lockstate.env, 0) != 0) 2793 lock_upgrade(rtld_bind_lock, &lockstate); 2794 environ = (char **)*get_program_var_addr("environ", &lockstate); 2795 lock_release(rtld_bind_lock, &lockstate); 2796 } 2797 lo_flags = RTLD_LO_DLOPEN; 2798 if (mode & RTLD_NODELETE) 2799 lo_flags |= RTLD_LO_NODELETE; 2800 if (mode & RTLD_NOLOAD) 2801 lo_flags |= RTLD_LO_NOLOAD; 2802 if (ld_tracing != NULL) 2803 lo_flags |= RTLD_LO_TRACE; 2804 2805 return (dlopen_object(name, fd, obj_main, lo_flags, 2806 mode & (RTLD_MODEMASK | RTLD_GLOBAL), NULL)); 2807 } 2808 2809 static void 2810 dlopen_cleanup(Obj_Entry *obj) 2811 { 2812 2813 obj->dl_refcount--; 2814 unref_dag(obj); 2815 if (obj->refcount == 0) 2816 unload_object(obj); 2817 } 2818 2819 static Obj_Entry * 2820 dlopen_object(const char *name, int fd, Obj_Entry *refobj, int lo_flags, 2821 int mode, RtldLockState *lockstate) 2822 { 2823 Obj_Entry **old_obj_tail; 2824 Obj_Entry *obj; 2825 Objlist initlist; 2826 RtldLockState mlockstate; 2827 int result; 2828 2829 objlist_init(&initlist); 2830 2831 if (lockstate == NULL && !(lo_flags & RTLD_LO_EARLY)) { 2832 wlock_acquire(rtld_bind_lock, &mlockstate); 2833 lockstate = &mlockstate; 2834 } 2835 GDB_STATE(RT_ADD,NULL); 2836 2837 old_obj_tail = obj_tail; 2838 obj = NULL; 2839 if (name == NULL && fd == -1) { 2840 obj = obj_main; 2841 obj->refcount++; 2842 } else { 2843 obj = load_object(name, fd, refobj, lo_flags); 2844 } 2845 2846 if (obj) { 2847 obj->dl_refcount++; 2848 if (mode & RTLD_GLOBAL && objlist_find(&list_global, obj) == NULL) 2849 objlist_push_tail(&list_global, obj); 2850 if (*old_obj_tail != NULL) { /* We loaded something new. */ 2851 assert(*old_obj_tail == obj); 2852 result = load_needed_objects(obj, 2853 lo_flags & (RTLD_LO_DLOPEN | RTLD_LO_EARLY)); 2854 init_dag(obj); 2855 ref_dag(obj); 2856 if (result != -1) 2857 result = rtld_verify_versions(&obj->dagmembers); 2858 if (result != -1 && ld_tracing) 2859 goto trace; 2860 if (result == -1 || relocate_object_dag(obj, 2861 (mode & RTLD_MODEMASK) == RTLD_NOW, &obj_rtld, 2862 (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0, 2863 lockstate) == -1) { 2864 dlopen_cleanup(obj); 2865 obj = NULL; 2866 } else if (lo_flags & RTLD_LO_EARLY) { 2867 /* 2868 * Do not call the init functions for early loaded 2869 * filtees. The image is still not initialized enough 2870 * for them to work. 2871 * 2872 * Our object is found by the global object list and 2873 * will be ordered among all init calls done right 2874 * before transferring control to main. 2875 */ 2876 } else { 2877 /* Make list of init functions to call. */ 2878 initlist_add_objects(obj, &obj->next, &initlist); 2879 } 2880 /* 2881 * Process all no_delete objects here, given them own 2882 * DAGs to prevent their dependencies from being unloaded. 2883 * This has to be done after we have loaded all of the 2884 * dependencies, so that we do not miss any. 2885 */ 2886 if (obj != NULL) 2887 process_nodelete(obj); 2888 } else { 2889 /* 2890 * Bump the reference counts for objects on this DAG. If 2891 * this is the first dlopen() call for the object that was 2892 * already loaded as a dependency, initialize the dag 2893 * starting at it. 2894 */ 2895 init_dag(obj); 2896 ref_dag(obj); 2897 2898 if ((lo_flags & RTLD_LO_TRACE) != 0) 2899 goto trace; 2900 } 2901 if (obj != NULL && ((lo_flags & RTLD_LO_NODELETE) != 0 || 2902 obj->z_nodelete) && !obj->ref_nodel) { 2903 dbg("obj %s nodelete", obj->path); 2904 ref_dag(obj); 2905 obj->z_nodelete = obj->ref_nodel = true; 2906 } 2907 } 2908 2909 LD_UTRACE(UTRACE_DLOPEN_STOP, obj, NULL, 0, obj ? obj->dl_refcount : 0, 2910 name); 2911 GDB_STATE(RT_CONSISTENT,obj ? &obj->linkmap : NULL); 2912 2913 if (!(lo_flags & RTLD_LO_EARLY)) { 2914 map_stacks_exec(lockstate); 2915 } 2916 2917 if (initlist_objects_ifunc(&initlist, (mode & RTLD_MODEMASK) == RTLD_NOW, 2918 (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0, 2919 lockstate) == -1) { 2920 objlist_clear(&initlist); 2921 dlopen_cleanup(obj); 2922 if (lockstate == &mlockstate) 2923 lock_release(rtld_bind_lock, lockstate); 2924 return (NULL); 2925 } 2926 2927 if (!(lo_flags & RTLD_LO_EARLY)) { 2928 /* Call the init functions. */ 2929 objlist_call_init(&initlist, lockstate); 2930 } 2931 objlist_clear(&initlist); 2932 if (lockstate == &mlockstate) 2933 lock_release(rtld_bind_lock, lockstate); 2934 return obj; 2935 trace: 2936 trace_loaded_objects(obj); 2937 if (lockstate == &mlockstate) 2938 lock_release(rtld_bind_lock, lockstate); 2939 exit(0); 2940 } 2941 2942 static void * 2943 do_dlsym(void *handle, const char *name, void *retaddr, const Ver_Entry *ve, 2944 int flags) 2945 { 2946 DoneList donelist; 2947 const Obj_Entry *obj, *defobj; 2948 const Elf_Sym *def; 2949 SymLook req; 2950 RtldLockState lockstate; 2951 #ifndef __ia64__ 2952 tls_index ti; 2953 #endif 2954 int res; 2955 2956 def = NULL; 2957 defobj = NULL; 2958 symlook_init(&req, name); 2959 req.ventry = ve; 2960 req.flags = flags | SYMLOOK_IN_PLT; 2961 req.lockstate = &lockstate; 2962 2963 rlock_acquire(rtld_bind_lock, &lockstate); 2964 if (sigsetjmp(lockstate.env, 0) != 0) 2965 lock_upgrade(rtld_bind_lock, &lockstate); 2966 if (handle == NULL || handle == RTLD_NEXT || 2967 handle == RTLD_DEFAULT || handle == RTLD_SELF) { 2968 2969 if ((obj = obj_from_addr(retaddr)) == NULL) { 2970 _rtld_error("Cannot determine caller's shared object"); 2971 lock_release(rtld_bind_lock, &lockstate); 2972 return NULL; 2973 } 2974 if (handle == NULL) { /* Just the caller's shared object. */ 2975 res = symlook_obj(&req, obj); 2976 if (res == 0) { 2977 def = req.sym_out; 2978 defobj = req.defobj_out; 2979 } 2980 } else if (handle == RTLD_NEXT || /* Objects after caller's */ 2981 handle == RTLD_SELF) { /* ... caller included */ 2982 if (handle == RTLD_NEXT) 2983 obj = obj->next; 2984 for (; obj != NULL; obj = obj->next) { 2985 res = symlook_obj(&req, obj); 2986 if (res == 0) { 2987 if (def == NULL || 2988 ELF_ST_BIND(req.sym_out->st_info) != STB_WEAK) { 2989 def = req.sym_out; 2990 defobj = req.defobj_out; 2991 if (ELF_ST_BIND(def->st_info) != STB_WEAK) 2992 break; 2993 } 2994 } 2995 } 2996 /* 2997 * Search the dynamic linker itself, and possibly resolve the 2998 * symbol from there. This is how the application links to 2999 * dynamic linker services such as dlopen. 3000 */ 3001 if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) { 3002 res = symlook_obj(&req, &obj_rtld); 3003 if (res == 0) { 3004 def = req.sym_out; 3005 defobj = req.defobj_out; 3006 } 3007 } 3008 } else { 3009 assert(handle == RTLD_DEFAULT); 3010 res = symlook_default(&req, obj); 3011 if (res == 0) { 3012 defobj = req.defobj_out; 3013 def = req.sym_out; 3014 } 3015 } 3016 } else { 3017 if ((obj = dlcheck(handle)) == NULL) { 3018 lock_release(rtld_bind_lock, &lockstate); 3019 return NULL; 3020 } 3021 3022 donelist_init(&donelist); 3023 if (obj->mainprog) { 3024 /* Handle obtained by dlopen(NULL, ...) implies global scope. */ 3025 res = symlook_global(&req, &donelist); 3026 if (res == 0) { 3027 def = req.sym_out; 3028 defobj = req.defobj_out; 3029 } 3030 /* 3031 * Search the dynamic linker itself, and possibly resolve the 3032 * symbol from there. This is how the application links to 3033 * dynamic linker services such as dlopen. 3034 */ 3035 if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) { 3036 res = symlook_obj(&req, &obj_rtld); 3037 if (res == 0) { 3038 def = req.sym_out; 3039 defobj = req.defobj_out; 3040 } 3041 } 3042 } 3043 else { 3044 /* Search the whole DAG rooted at the given object. */ 3045 res = symlook_list(&req, &obj->dagmembers, &donelist); 3046 if (res == 0) { 3047 def = req.sym_out; 3048 defobj = req.defobj_out; 3049 } 3050 } 3051 } 3052 3053 if (def != NULL) { 3054 lock_release(rtld_bind_lock, &lockstate); 3055 3056 /* 3057 * The value required by the caller is derived from the value 3058 * of the symbol. For the ia64 architecture, we need to 3059 * construct a function descriptor which the caller can use to 3060 * call the function with the right 'gp' value. For other 3061 * architectures and for non-functions, the value is simply 3062 * the relocated value of the symbol. 3063 */ 3064 if (ELF_ST_TYPE(def->st_info) == STT_FUNC) 3065 return (make_function_pointer(def, defobj)); 3066 else if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) 3067 return (rtld_resolve_ifunc(defobj, def)); 3068 else if (ELF_ST_TYPE(def->st_info) == STT_TLS) { 3069 #ifdef __ia64__ 3070 return (__tls_get_addr(defobj->tlsindex, def->st_value)); 3071 #else 3072 ti.ti_module = defobj->tlsindex; 3073 ti.ti_offset = def->st_value; 3074 return (__tls_get_addr(&ti)); 3075 #endif 3076 } else 3077 return (defobj->relocbase + def->st_value); 3078 } 3079 3080 _rtld_error("Undefined symbol \"%s\"", name); 3081 lock_release(rtld_bind_lock, &lockstate); 3082 return NULL; 3083 } 3084 3085 void * 3086 dlsym(void *handle, const char *name) 3087 { 3088 return do_dlsym(handle, name, __builtin_return_address(0), NULL, 3089 SYMLOOK_DLSYM); 3090 } 3091 3092 dlfunc_t 3093 dlfunc(void *handle, const char *name) 3094 { 3095 union { 3096 void *d; 3097 dlfunc_t f; 3098 } rv; 3099 3100 rv.d = do_dlsym(handle, name, __builtin_return_address(0), NULL, 3101 SYMLOOK_DLSYM); 3102 return (rv.f); 3103 } 3104 3105 void * 3106 dlvsym(void *handle, const char *name, const char *version) 3107 { 3108 Ver_Entry ventry; 3109 3110 ventry.name = version; 3111 ventry.file = NULL; 3112 ventry.hash = elf_hash(version); 3113 ventry.flags= 0; 3114 return do_dlsym(handle, name, __builtin_return_address(0), &ventry, 3115 SYMLOOK_DLSYM); 3116 } 3117 3118 int 3119 _rtld_addr_phdr(const void *addr, struct dl_phdr_info *phdr_info) 3120 { 3121 const Obj_Entry *obj; 3122 RtldLockState lockstate; 3123 3124 rlock_acquire(rtld_bind_lock, &lockstate); 3125 obj = obj_from_addr(addr); 3126 if (obj == NULL) { 3127 _rtld_error("No shared object contains address"); 3128 lock_release(rtld_bind_lock, &lockstate); 3129 return (0); 3130 } 3131 rtld_fill_dl_phdr_info(obj, phdr_info); 3132 lock_release(rtld_bind_lock, &lockstate); 3133 return (1); 3134 } 3135 3136 int 3137 dladdr(const void *addr, Dl_info *info) 3138 { 3139 const Obj_Entry *obj; 3140 const Elf_Sym *def; 3141 void *symbol_addr; 3142 unsigned long symoffset; 3143 RtldLockState lockstate; 3144 3145 rlock_acquire(rtld_bind_lock, &lockstate); 3146 obj = obj_from_addr(addr); 3147 if (obj == NULL) { 3148 _rtld_error("No shared object contains address"); 3149 lock_release(rtld_bind_lock, &lockstate); 3150 return 0; 3151 } 3152 info->dli_fname = obj->path; 3153 info->dli_fbase = obj->mapbase; 3154 info->dli_saddr = (void *)0; 3155 info->dli_sname = NULL; 3156 3157 /* 3158 * Walk the symbol list looking for the symbol whose address is 3159 * closest to the address sent in. 3160 */ 3161 for (symoffset = 0; symoffset < obj->dynsymcount; symoffset++) { 3162 def = obj->symtab + symoffset; 3163 3164 /* 3165 * For skip the symbol if st_shndx is either SHN_UNDEF or 3166 * SHN_COMMON. 3167 */ 3168 if (def->st_shndx == SHN_UNDEF || def->st_shndx == SHN_COMMON) 3169 continue; 3170 3171 /* 3172 * If the symbol is greater than the specified address, or if it 3173 * is further away from addr than the current nearest symbol, 3174 * then reject it. 3175 */ 3176 symbol_addr = obj->relocbase + def->st_value; 3177 if (symbol_addr > addr || symbol_addr < info->dli_saddr) 3178 continue; 3179 3180 /* Update our idea of the nearest symbol. */ 3181 info->dli_sname = obj->strtab + def->st_name; 3182 info->dli_saddr = symbol_addr; 3183 3184 /* Exact match? */ 3185 if (info->dli_saddr == addr) 3186 break; 3187 } 3188 lock_release(rtld_bind_lock, &lockstate); 3189 return 1; 3190 } 3191 3192 int 3193 dlinfo(void *handle, int request, void *p) 3194 { 3195 const Obj_Entry *obj; 3196 RtldLockState lockstate; 3197 int error; 3198 3199 rlock_acquire(rtld_bind_lock, &lockstate); 3200 3201 if (handle == NULL || handle == RTLD_SELF) { 3202 void *retaddr; 3203 3204 retaddr = __builtin_return_address(0); /* __GNUC__ only */ 3205 if ((obj = obj_from_addr(retaddr)) == NULL) 3206 _rtld_error("Cannot determine caller's shared object"); 3207 } else 3208 obj = dlcheck(handle); 3209 3210 if (obj == NULL) { 3211 lock_release(rtld_bind_lock, &lockstate); 3212 return (-1); 3213 } 3214 3215 error = 0; 3216 switch (request) { 3217 case RTLD_DI_LINKMAP: 3218 *((struct link_map const **)p) = &obj->linkmap; 3219 break; 3220 case RTLD_DI_ORIGIN: 3221 error = rtld_dirname(obj->path, p); 3222 break; 3223 3224 case RTLD_DI_SERINFOSIZE: 3225 case RTLD_DI_SERINFO: 3226 error = do_search_info(obj, request, (struct dl_serinfo *)p); 3227 break; 3228 3229 default: 3230 _rtld_error("Invalid request %d passed to dlinfo()", request); 3231 error = -1; 3232 } 3233 3234 lock_release(rtld_bind_lock, &lockstate); 3235 3236 return (error); 3237 } 3238 3239 static void 3240 rtld_fill_dl_phdr_info(const Obj_Entry *obj, struct dl_phdr_info *phdr_info) 3241 { 3242 3243 phdr_info->dlpi_addr = (Elf_Addr)obj->relocbase; 3244 phdr_info->dlpi_name = STAILQ_FIRST(&obj->names) ? 3245 STAILQ_FIRST(&obj->names)->name : obj->path; 3246 phdr_info->dlpi_phdr = obj->phdr; 3247 phdr_info->dlpi_phnum = obj->phsize / sizeof(obj->phdr[0]); 3248 phdr_info->dlpi_tls_modid = obj->tlsindex; 3249 phdr_info->dlpi_tls_data = obj->tlsinit; 3250 phdr_info->dlpi_adds = obj_loads; 3251 phdr_info->dlpi_subs = obj_loads - obj_count; 3252 } 3253 3254 int 3255 dl_iterate_phdr(__dl_iterate_hdr_callback callback, void *param) 3256 { 3257 struct dl_phdr_info phdr_info; 3258 const Obj_Entry *obj; 3259 RtldLockState bind_lockstate, phdr_lockstate; 3260 int error; 3261 3262 wlock_acquire(rtld_phdr_lock, &phdr_lockstate); 3263 rlock_acquire(rtld_bind_lock, &bind_lockstate); 3264 3265 error = 0; 3266 3267 for (obj = obj_list; obj != NULL; obj = obj->next) { 3268 rtld_fill_dl_phdr_info(obj, &phdr_info); 3269 if ((error = callback(&phdr_info, sizeof phdr_info, param)) != 0) 3270 break; 3271 3272 } 3273 if (error == 0) { 3274 rtld_fill_dl_phdr_info(&obj_rtld, &phdr_info); 3275 error = callback(&phdr_info, sizeof(phdr_info), param); 3276 } 3277 3278 lock_release(rtld_bind_lock, &bind_lockstate); 3279 lock_release(rtld_phdr_lock, &phdr_lockstate); 3280 3281 return (error); 3282 } 3283 3284 static void * 3285 fill_search_info(const char *dir, size_t dirlen, void *param) 3286 { 3287 struct fill_search_info_args *arg; 3288 3289 arg = param; 3290 3291 if (arg->request == RTLD_DI_SERINFOSIZE) { 3292 arg->serinfo->dls_cnt ++; 3293 arg->serinfo->dls_size += sizeof(struct dl_serpath) + dirlen + 1; 3294 } else { 3295 struct dl_serpath *s_entry; 3296 3297 s_entry = arg->serpath; 3298 s_entry->dls_name = arg->strspace; 3299 s_entry->dls_flags = arg->flags; 3300 3301 strncpy(arg->strspace, dir, dirlen); 3302 arg->strspace[dirlen] = '\0'; 3303 3304 arg->strspace += dirlen + 1; 3305 arg->serpath++; 3306 } 3307 3308 return (NULL); 3309 } 3310 3311 static int 3312 do_search_info(const Obj_Entry *obj, int request, struct dl_serinfo *info) 3313 { 3314 struct dl_serinfo _info; 3315 struct fill_search_info_args args; 3316 3317 args.request = RTLD_DI_SERINFOSIZE; 3318 args.serinfo = &_info; 3319 3320 _info.dls_size = __offsetof(struct dl_serinfo, dls_serpath); 3321 _info.dls_cnt = 0; 3322 3323 path_enumerate(obj->rpath, fill_search_info, &args); 3324 path_enumerate(ld_library_path, fill_search_info, &args); 3325 path_enumerate(obj->runpath, fill_search_info, &args); 3326 path_enumerate(gethints(obj->z_nodeflib), fill_search_info, &args); 3327 if (!obj->z_nodeflib) 3328 path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &args); 3329 3330 3331 if (request == RTLD_DI_SERINFOSIZE) { 3332 info->dls_size = _info.dls_size; 3333 info->dls_cnt = _info.dls_cnt; 3334 return (0); 3335 } 3336 3337 if (info->dls_cnt != _info.dls_cnt || info->dls_size != _info.dls_size) { 3338 _rtld_error("Uninitialized Dl_serinfo struct passed to dlinfo()"); 3339 return (-1); 3340 } 3341 3342 args.request = RTLD_DI_SERINFO; 3343 args.serinfo = info; 3344 args.serpath = &info->dls_serpath[0]; 3345 args.strspace = (char *)&info->dls_serpath[_info.dls_cnt]; 3346 3347 args.flags = LA_SER_RUNPATH; 3348 if (path_enumerate(obj->rpath, fill_search_info, &args) != NULL) 3349 return (-1); 3350 3351 args.flags = LA_SER_LIBPATH; 3352 if (path_enumerate(ld_library_path, fill_search_info, &args) != NULL) 3353 return (-1); 3354 3355 args.flags = LA_SER_RUNPATH; 3356 if (path_enumerate(obj->runpath, fill_search_info, &args) != NULL) 3357 return (-1); 3358 3359 args.flags = LA_SER_CONFIG; 3360 if (path_enumerate(gethints(obj->z_nodeflib), fill_search_info, &args) 3361 != NULL) 3362 return (-1); 3363 3364 args.flags = LA_SER_DEFAULT; 3365 if (!obj->z_nodeflib && 3366 path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &args) != NULL) 3367 return (-1); 3368 return (0); 3369 } 3370 3371 static int 3372 rtld_dirname(const char *path, char *bname) 3373 { 3374 const char *endp; 3375 3376 /* Empty or NULL string gets treated as "." */ 3377 if (path == NULL || *path == '\0') { 3378 bname[0] = '.'; 3379 bname[1] = '\0'; 3380 return (0); 3381 } 3382 3383 /* Strip trailing slashes */ 3384 endp = path + strlen(path) - 1; 3385 while (endp > path && *endp == '/') 3386 endp--; 3387 3388 /* Find the start of the dir */ 3389 while (endp > path && *endp != '/') 3390 endp--; 3391 3392 /* Either the dir is "/" or there are no slashes */ 3393 if (endp == path) { 3394 bname[0] = *endp == '/' ? '/' : '.'; 3395 bname[1] = '\0'; 3396 return (0); 3397 } else { 3398 do { 3399 endp--; 3400 } while (endp > path && *endp == '/'); 3401 } 3402 3403 if (endp - path + 2 > PATH_MAX) 3404 { 3405 _rtld_error("Filename is too long: %s", path); 3406 return(-1); 3407 } 3408 3409 strncpy(bname, path, endp - path + 1); 3410 bname[endp - path + 1] = '\0'; 3411 return (0); 3412 } 3413 3414 static int 3415 rtld_dirname_abs(const char *path, char *base) 3416 { 3417 char base_rel[PATH_MAX]; 3418 3419 if (rtld_dirname(path, base) == -1) 3420 return (-1); 3421 if (base[0] == '/') 3422 return (0); 3423 if (getcwd(base_rel, sizeof(base_rel)) == NULL || 3424 strlcat(base_rel, "/", sizeof(base_rel)) >= sizeof(base_rel) || 3425 strlcat(base_rel, base, sizeof(base_rel)) >= sizeof(base_rel)) 3426 return (-1); 3427 strcpy(base, base_rel); 3428 return (0); 3429 } 3430 3431 static void 3432 linkmap_add(Obj_Entry *obj) 3433 { 3434 struct link_map *l = &obj->linkmap; 3435 struct link_map *prev; 3436 3437 obj->linkmap.l_name = obj->path; 3438 obj->linkmap.l_addr = obj->mapbase; 3439 obj->linkmap.l_ld = obj->dynamic; 3440 #ifdef __mips__ 3441 /* GDB needs load offset on MIPS to use the symbols */ 3442 obj->linkmap.l_offs = obj->relocbase; 3443 #endif 3444 3445 if (r_debug.r_map == NULL) { 3446 r_debug.r_map = l; 3447 return; 3448 } 3449 3450 /* 3451 * Scan to the end of the list, but not past the entry for the 3452 * dynamic linker, which we want to keep at the very end. 3453 */ 3454 for (prev = r_debug.r_map; 3455 prev->l_next != NULL && prev->l_next != &obj_rtld.linkmap; 3456 prev = prev->l_next) 3457 ; 3458 3459 /* Link in the new entry. */ 3460 l->l_prev = prev; 3461 l->l_next = prev->l_next; 3462 if (l->l_next != NULL) 3463 l->l_next->l_prev = l; 3464 prev->l_next = l; 3465 } 3466 3467 static void 3468 linkmap_delete(Obj_Entry *obj) 3469 { 3470 struct link_map *l = &obj->linkmap; 3471 3472 if (l->l_prev == NULL) { 3473 if ((r_debug.r_map = l->l_next) != NULL) 3474 l->l_next->l_prev = NULL; 3475 return; 3476 } 3477 3478 if ((l->l_prev->l_next = l->l_next) != NULL) 3479 l->l_next->l_prev = l->l_prev; 3480 } 3481 3482 /* 3483 * Function for the debugger to set a breakpoint on to gain control. 3484 * 3485 * The two parameters allow the debugger to easily find and determine 3486 * what the runtime loader is doing and to whom it is doing it. 3487 * 3488 * When the loadhook trap is hit (r_debug_state, set at program 3489 * initialization), the arguments can be found on the stack: 3490 * 3491 * +8 struct link_map *m 3492 * +4 struct r_debug *rd 3493 * +0 RetAddr 3494 */ 3495 void 3496 r_debug_state(struct r_debug* rd, struct link_map *m) 3497 { 3498 /* 3499 * The following is a hack to force the compiler to emit calls to 3500 * this function, even when optimizing. If the function is empty, 3501 * the compiler is not obliged to emit any code for calls to it, 3502 * even when marked __noinline. However, gdb depends on those 3503 * calls being made. 3504 */ 3505 __asm __volatile("" : : : "memory"); 3506 } 3507 3508 /* 3509 * Get address of the pointer variable in the main program. 3510 * Prefer non-weak symbol over the weak one. 3511 */ 3512 static const void ** 3513 get_program_var_addr(const char *name, RtldLockState *lockstate) 3514 { 3515 SymLook req; 3516 DoneList donelist; 3517 3518 symlook_init(&req, name); 3519 req.lockstate = lockstate; 3520 donelist_init(&donelist); 3521 if (symlook_global(&req, &donelist) != 0) 3522 return (NULL); 3523 if (ELF_ST_TYPE(req.sym_out->st_info) == STT_FUNC) 3524 return ((const void **)make_function_pointer(req.sym_out, 3525 req.defobj_out)); 3526 else if (ELF_ST_TYPE(req.sym_out->st_info) == STT_GNU_IFUNC) 3527 return ((const void **)rtld_resolve_ifunc(req.defobj_out, req.sym_out)); 3528 else 3529 return ((const void **)(req.defobj_out->relocbase + 3530 req.sym_out->st_value)); 3531 } 3532 3533 /* 3534 * Set a pointer variable in the main program to the given value. This 3535 * is used to set key variables such as "environ" before any of the 3536 * init functions are called. 3537 */ 3538 static void 3539 set_program_var(const char *name, const void *value) 3540 { 3541 const void **addr; 3542 3543 if ((addr = get_program_var_addr(name, NULL)) != NULL) { 3544 dbg("\"%s\": *%p <-- %p", name, addr, value); 3545 *addr = value; 3546 } 3547 } 3548 3549 /* 3550 * Search the global objects, including dependencies and main object, 3551 * for the given symbol. 3552 */ 3553 static int 3554 symlook_global(SymLook *req, DoneList *donelist) 3555 { 3556 SymLook req1; 3557 const Objlist_Entry *elm; 3558 int res; 3559 3560 symlook_init_from_req(&req1, req); 3561 3562 /* Search all objects loaded at program start up. */ 3563 if (req->defobj_out == NULL || 3564 ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) { 3565 res = symlook_list(&req1, &list_main, donelist); 3566 if (res == 0 && (req->defobj_out == NULL || 3567 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) { 3568 req->sym_out = req1.sym_out; 3569 req->defobj_out = req1.defobj_out; 3570 assert(req->defobj_out != NULL); 3571 } 3572 } 3573 3574 /* Search all DAGs whose roots are RTLD_GLOBAL objects. */ 3575 STAILQ_FOREACH(elm, &list_global, link) { 3576 if (req->defobj_out != NULL && 3577 ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK) 3578 break; 3579 res = symlook_list(&req1, &elm->obj->dagmembers, donelist); 3580 if (res == 0 && (req->defobj_out == NULL || 3581 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) { 3582 req->sym_out = req1.sym_out; 3583 req->defobj_out = req1.defobj_out; 3584 assert(req->defobj_out != NULL); 3585 } 3586 } 3587 3588 return (req->sym_out != NULL ? 0 : ESRCH); 3589 } 3590 3591 /* 3592 * Given a symbol name in a referencing object, find the corresponding 3593 * definition of the symbol. Returns a pointer to the symbol, or NULL if 3594 * no definition was found. Returns a pointer to the Obj_Entry of the 3595 * defining object via the reference parameter DEFOBJ_OUT. 3596 */ 3597 static int 3598 symlook_default(SymLook *req, const Obj_Entry *refobj) 3599 { 3600 DoneList donelist; 3601 const Objlist_Entry *elm; 3602 SymLook req1; 3603 int res; 3604 3605 donelist_init(&donelist); 3606 symlook_init_from_req(&req1, req); 3607 3608 /* Look first in the referencing object if linked symbolically. */ 3609 if (refobj->symbolic && !donelist_check(&donelist, refobj)) { 3610 res = symlook_obj(&req1, refobj); 3611 if (res == 0) { 3612 req->sym_out = req1.sym_out; 3613 req->defobj_out = req1.defobj_out; 3614 assert(req->defobj_out != NULL); 3615 } 3616 } 3617 3618 symlook_global(req, &donelist); 3619 3620 /* Search all dlopened DAGs containing the referencing object. */ 3621 STAILQ_FOREACH(elm, &refobj->dldags, link) { 3622 if (req->sym_out != NULL && 3623 ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK) 3624 break; 3625 res = symlook_list(&req1, &elm->obj->dagmembers, &donelist); 3626 if (res == 0 && (req->sym_out == NULL || 3627 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) { 3628 req->sym_out = req1.sym_out; 3629 req->defobj_out = req1.defobj_out; 3630 assert(req->defobj_out != NULL); 3631 } 3632 } 3633 3634 /* 3635 * Search the dynamic linker itself, and possibly resolve the 3636 * symbol from there. This is how the application links to 3637 * dynamic linker services such as dlopen. 3638 */ 3639 if (req->sym_out == NULL || 3640 ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) { 3641 res = symlook_obj(&req1, &obj_rtld); 3642 if (res == 0) { 3643 req->sym_out = req1.sym_out; 3644 req->defobj_out = req1.defobj_out; 3645 assert(req->defobj_out != NULL); 3646 } 3647 } 3648 3649 return (req->sym_out != NULL ? 0 : ESRCH); 3650 } 3651 3652 static int 3653 symlook_list(SymLook *req, const Objlist *objlist, DoneList *dlp) 3654 { 3655 const Elf_Sym *def; 3656 const Obj_Entry *defobj; 3657 const Objlist_Entry *elm; 3658 SymLook req1; 3659 int res; 3660 3661 def = NULL; 3662 defobj = NULL; 3663 STAILQ_FOREACH(elm, objlist, link) { 3664 if (donelist_check(dlp, elm->obj)) 3665 continue; 3666 symlook_init_from_req(&req1, req); 3667 if ((res = symlook_obj(&req1, elm->obj)) == 0) { 3668 if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) { 3669 def = req1.sym_out; 3670 defobj = req1.defobj_out; 3671 if (ELF_ST_BIND(def->st_info) != STB_WEAK) 3672 break; 3673 } 3674 } 3675 } 3676 if (def != NULL) { 3677 req->sym_out = def; 3678 req->defobj_out = defobj; 3679 return (0); 3680 } 3681 return (ESRCH); 3682 } 3683 3684 /* 3685 * Search the chain of DAGS cointed to by the given Needed_Entry 3686 * for a symbol of the given name. Each DAG is scanned completely 3687 * before advancing to the next one. Returns a pointer to the symbol, 3688 * or NULL if no definition was found. 3689 */ 3690 static int 3691 symlook_needed(SymLook *req, const Needed_Entry *needed, DoneList *dlp) 3692 { 3693 const Elf_Sym *def; 3694 const Needed_Entry *n; 3695 const Obj_Entry *defobj; 3696 SymLook req1; 3697 int res; 3698 3699 def = NULL; 3700 defobj = NULL; 3701 symlook_init_from_req(&req1, req); 3702 for (n = needed; n != NULL; n = n->next) { 3703 if (n->obj == NULL || 3704 (res = symlook_list(&req1, &n->obj->dagmembers, dlp)) != 0) 3705 continue; 3706 if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) { 3707 def = req1.sym_out; 3708 defobj = req1.defobj_out; 3709 if (ELF_ST_BIND(def->st_info) != STB_WEAK) 3710 break; 3711 } 3712 } 3713 if (def != NULL) { 3714 req->sym_out = def; 3715 req->defobj_out = defobj; 3716 return (0); 3717 } 3718 return (ESRCH); 3719 } 3720 3721 /* 3722 * Search the symbol table of a single shared object for a symbol of 3723 * the given name and version, if requested. Returns a pointer to the 3724 * symbol, or NULL if no definition was found. If the object is 3725 * filter, return filtered symbol from filtee. 3726 * 3727 * The symbol's hash value is passed in for efficiency reasons; that 3728 * eliminates many recomputations of the hash value. 3729 */ 3730 int 3731 symlook_obj(SymLook *req, const Obj_Entry *obj) 3732 { 3733 DoneList donelist; 3734 SymLook req1; 3735 int flags, res, mres; 3736 3737 /* 3738 * If there is at least one valid hash at this point, we prefer to 3739 * use the faster GNU version if available. 3740 */ 3741 if (obj->valid_hash_gnu) 3742 mres = symlook_obj1_gnu(req, obj); 3743 else if (obj->valid_hash_sysv) 3744 mres = symlook_obj1_sysv(req, obj); 3745 else 3746 return (EINVAL); 3747 3748 if (mres == 0) { 3749 if (obj->needed_filtees != NULL) { 3750 flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0; 3751 load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate); 3752 donelist_init(&donelist); 3753 symlook_init_from_req(&req1, req); 3754 res = symlook_needed(&req1, obj->needed_filtees, &donelist); 3755 if (res == 0) { 3756 req->sym_out = req1.sym_out; 3757 req->defobj_out = req1.defobj_out; 3758 } 3759 return (res); 3760 } 3761 if (obj->needed_aux_filtees != NULL) { 3762 flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0; 3763 load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate); 3764 donelist_init(&donelist); 3765 symlook_init_from_req(&req1, req); 3766 res = symlook_needed(&req1, obj->needed_aux_filtees, &donelist); 3767 if (res == 0) { 3768 req->sym_out = req1.sym_out; 3769 req->defobj_out = req1.defobj_out; 3770 return (res); 3771 } 3772 } 3773 } 3774 return (mres); 3775 } 3776 3777 /* Symbol match routine common to both hash functions */ 3778 static bool 3779 matched_symbol(SymLook *req, const Obj_Entry *obj, Sym_Match_Result *result, 3780 const unsigned long symnum) 3781 { 3782 Elf_Versym verndx; 3783 const Elf_Sym *symp; 3784 const char *strp; 3785 3786 symp = obj->symtab + symnum; 3787 strp = obj->strtab + symp->st_name; 3788 3789 switch (ELF_ST_TYPE(symp->st_info)) { 3790 case STT_FUNC: 3791 case STT_NOTYPE: 3792 case STT_OBJECT: 3793 case STT_COMMON: 3794 case STT_GNU_IFUNC: 3795 if (symp->st_value == 0) 3796 return (false); 3797 /* fallthrough */ 3798 case STT_TLS: 3799 if (symp->st_shndx != SHN_UNDEF) 3800 break; 3801 #ifndef __mips__ 3802 else if (((req->flags & SYMLOOK_IN_PLT) == 0) && 3803 (ELF_ST_TYPE(symp->st_info) == STT_FUNC)) 3804 break; 3805 /* fallthrough */ 3806 #endif 3807 default: 3808 return (false); 3809 } 3810 if (req->name[0] != strp[0] || strcmp(req->name, strp) != 0) 3811 return (false); 3812 3813 if (req->ventry == NULL) { 3814 if (obj->versyms != NULL) { 3815 verndx = VER_NDX(obj->versyms[symnum]); 3816 if (verndx > obj->vernum) { 3817 _rtld_error( 3818 "%s: symbol %s references wrong version %d", 3819 obj->path, obj->strtab + symnum, verndx); 3820 return (false); 3821 } 3822 /* 3823 * If we are not called from dlsym (i.e. this 3824 * is a normal relocation from unversioned 3825 * binary), accept the symbol immediately if 3826 * it happens to have first version after this 3827 * shared object became versioned. Otherwise, 3828 * if symbol is versioned and not hidden, 3829 * remember it. If it is the only symbol with 3830 * this name exported by the shared object, it 3831 * will be returned as a match by the calling 3832 * function. If symbol is global (verndx < 2) 3833 * accept it unconditionally. 3834 */ 3835 if ((req->flags & SYMLOOK_DLSYM) == 0 && 3836 verndx == VER_NDX_GIVEN) { 3837 result->sym_out = symp; 3838 return (true); 3839 } 3840 else if (verndx >= VER_NDX_GIVEN) { 3841 if ((obj->versyms[symnum] & VER_NDX_HIDDEN) 3842 == 0) { 3843 if (result->vsymp == NULL) 3844 result->vsymp = symp; 3845 result->vcount++; 3846 } 3847 return (false); 3848 } 3849 } 3850 result->sym_out = symp; 3851 return (true); 3852 } 3853 if (obj->versyms == NULL) { 3854 if (object_match_name(obj, req->ventry->name)) { 3855 _rtld_error("%s: object %s should provide version %s " 3856 "for symbol %s", obj_rtld.path, obj->path, 3857 req->ventry->name, obj->strtab + symnum); 3858 return (false); 3859 } 3860 } else { 3861 verndx = VER_NDX(obj->versyms[symnum]); 3862 if (verndx > obj->vernum) { 3863 _rtld_error("%s: symbol %s references wrong version %d", 3864 obj->path, obj->strtab + symnum, verndx); 3865 return (false); 3866 } 3867 if (obj->vertab[verndx].hash != req->ventry->hash || 3868 strcmp(obj->vertab[verndx].name, req->ventry->name)) { 3869 /* 3870 * Version does not match. Look if this is a 3871 * global symbol and if it is not hidden. If 3872 * global symbol (verndx < 2) is available, 3873 * use it. Do not return symbol if we are 3874 * called by dlvsym, because dlvsym looks for 3875 * a specific version and default one is not 3876 * what dlvsym wants. 3877 */ 3878 if ((req->flags & SYMLOOK_DLSYM) || 3879 (verndx >= VER_NDX_GIVEN) || 3880 (obj->versyms[symnum] & VER_NDX_HIDDEN)) 3881 return (false); 3882 } 3883 } 3884 result->sym_out = symp; 3885 return (true); 3886 } 3887 3888 /* 3889 * Search for symbol using SysV hash function. 3890 * obj->buckets is known not to be NULL at this point; the test for this was 3891 * performed with the obj->valid_hash_sysv assignment. 3892 */ 3893 static int 3894 symlook_obj1_sysv(SymLook *req, const Obj_Entry *obj) 3895 { 3896 unsigned long symnum; 3897 Sym_Match_Result matchres; 3898 3899 matchres.sym_out = NULL; 3900 matchres.vsymp = NULL; 3901 matchres.vcount = 0; 3902 3903 for (symnum = obj->buckets[req->hash % obj->nbuckets]; 3904 symnum != STN_UNDEF; symnum = obj->chains[symnum]) { 3905 if (symnum >= obj->nchains) 3906 return (ESRCH); /* Bad object */ 3907 3908 if (matched_symbol(req, obj, &matchres, symnum)) { 3909 req->sym_out = matchres.sym_out; 3910 req->defobj_out = obj; 3911 return (0); 3912 } 3913 } 3914 if (matchres.vcount == 1) { 3915 req->sym_out = matchres.vsymp; 3916 req->defobj_out = obj; 3917 return (0); 3918 } 3919 return (ESRCH); 3920 } 3921 3922 /* Search for symbol using GNU hash function */ 3923 static int 3924 symlook_obj1_gnu(SymLook *req, const Obj_Entry *obj) 3925 { 3926 Elf_Addr bloom_word; 3927 const Elf32_Word *hashval; 3928 Elf32_Word bucket; 3929 Sym_Match_Result matchres; 3930 unsigned int h1, h2; 3931 unsigned long symnum; 3932 3933 matchres.sym_out = NULL; 3934 matchres.vsymp = NULL; 3935 matchres.vcount = 0; 3936 3937 /* Pick right bitmask word from Bloom filter array */ 3938 bloom_word = obj->bloom_gnu[(req->hash_gnu / __ELF_WORD_SIZE) & 3939 obj->maskwords_bm_gnu]; 3940 3941 /* Calculate modulus word size of gnu hash and its derivative */ 3942 h1 = req->hash_gnu & (__ELF_WORD_SIZE - 1); 3943 h2 = ((req->hash_gnu >> obj->shift2_gnu) & (__ELF_WORD_SIZE - 1)); 3944 3945 /* Filter out the "definitely not in set" queries */ 3946 if (((bloom_word >> h1) & (bloom_word >> h2) & 1) == 0) 3947 return (ESRCH); 3948 3949 /* Locate hash chain and corresponding value element*/ 3950 bucket = obj->buckets_gnu[req->hash_gnu % obj->nbuckets_gnu]; 3951 if (bucket == 0) 3952 return (ESRCH); 3953 hashval = &obj->chain_zero_gnu[bucket]; 3954 do { 3955 if (((*hashval ^ req->hash_gnu) >> 1) == 0) { 3956 symnum = hashval - obj->chain_zero_gnu; 3957 if (matched_symbol(req, obj, &matchres, symnum)) { 3958 req->sym_out = matchres.sym_out; 3959 req->defobj_out = obj; 3960 return (0); 3961 } 3962 } 3963 } while ((*hashval++ & 1) == 0); 3964 if (matchres.vcount == 1) { 3965 req->sym_out = matchres.vsymp; 3966 req->defobj_out = obj; 3967 return (0); 3968 } 3969 return (ESRCH); 3970 } 3971 3972 static void 3973 trace_loaded_objects(Obj_Entry *obj) 3974 { 3975 char *fmt1, *fmt2, *fmt, *main_local, *list_containers; 3976 int c; 3977 3978 if ((main_local = getenv(LD_ "TRACE_LOADED_OBJECTS_PROGNAME")) == NULL) 3979 main_local = ""; 3980 3981 if ((fmt1 = getenv(LD_ "TRACE_LOADED_OBJECTS_FMT1")) == NULL) 3982 fmt1 = "\t%o => %p (%x)\n"; 3983 3984 if ((fmt2 = getenv(LD_ "TRACE_LOADED_OBJECTS_FMT2")) == NULL) 3985 fmt2 = "\t%o (%x)\n"; 3986 3987 list_containers = getenv(LD_ "TRACE_LOADED_OBJECTS_ALL"); 3988 3989 for (; obj; obj = obj->next) { 3990 Needed_Entry *needed; 3991 char *name, *path; 3992 bool is_lib; 3993 3994 if (list_containers && obj->needed != NULL) 3995 rtld_printf("%s:\n", obj->path); 3996 for (needed = obj->needed; needed; needed = needed->next) { 3997 if (needed->obj != NULL) { 3998 if (needed->obj->traced && !list_containers) 3999 continue; 4000 needed->obj->traced = true; 4001 path = needed->obj->path; 4002 } else 4003 path = "not found"; 4004 4005 name = (char *)obj->strtab + needed->name; 4006 is_lib = strncmp(name, "lib", 3) == 0; /* XXX - bogus */ 4007 4008 fmt = is_lib ? fmt1 : fmt2; 4009 while ((c = *fmt++) != '\0') { 4010 switch (c) { 4011 default: 4012 rtld_putchar(c); 4013 continue; 4014 case '\\': 4015 switch (c = *fmt) { 4016 case '\0': 4017 continue; 4018 case 'n': 4019 rtld_putchar('\n'); 4020 break; 4021 case 't': 4022 rtld_putchar('\t'); 4023 break; 4024 } 4025 break; 4026 case '%': 4027 switch (c = *fmt) { 4028 case '\0': 4029 continue; 4030 case '%': 4031 default: 4032 rtld_putchar(c); 4033 break; 4034 case 'A': 4035 rtld_putstr(main_local); 4036 break; 4037 case 'a': 4038 rtld_putstr(obj_main->path); 4039 break; 4040 case 'o': 4041 rtld_putstr(name); 4042 break; 4043 #if 0 4044 case 'm': 4045 rtld_printf("%d", sodp->sod_major); 4046 break; 4047 case 'n': 4048 rtld_printf("%d", sodp->sod_minor); 4049 break; 4050 #endif 4051 case 'p': 4052 rtld_putstr(path); 4053 break; 4054 case 'x': 4055 rtld_printf("%p", needed->obj ? needed->obj->mapbase : 4056 0); 4057 break; 4058 } 4059 break; 4060 } 4061 ++fmt; 4062 } 4063 } 4064 } 4065 } 4066 4067 /* 4068 * Unload a dlopened object and its dependencies from memory and from 4069 * our data structures. It is assumed that the DAG rooted in the 4070 * object has already been unreferenced, and that the object has a 4071 * reference count of 0. 4072 */ 4073 static void 4074 unload_object(Obj_Entry *root) 4075 { 4076 Obj_Entry *obj; 4077 Obj_Entry **linkp; 4078 4079 assert(root->refcount == 0); 4080 4081 /* 4082 * Pass over the DAG removing unreferenced objects from 4083 * appropriate lists. 4084 */ 4085 unlink_object(root); 4086 4087 /* Unmap all objects that are no longer referenced. */ 4088 linkp = &obj_list->next; 4089 while ((obj = *linkp) != NULL) { 4090 if (obj->refcount == 0) { 4091 LD_UTRACE(UTRACE_UNLOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0, 4092 obj->path); 4093 dbg("unloading \"%s\"", obj->path); 4094 unload_filtees(root); 4095 munmap(obj->mapbase, obj->mapsize); 4096 linkmap_delete(obj); 4097 *linkp = obj->next; 4098 obj_count--; 4099 obj_free(obj); 4100 } else 4101 linkp = &obj->next; 4102 } 4103 obj_tail = linkp; 4104 } 4105 4106 static void 4107 unlink_object(Obj_Entry *root) 4108 { 4109 Objlist_Entry *elm; 4110 4111 if (root->refcount == 0) { 4112 /* Remove the object from the RTLD_GLOBAL list. */ 4113 objlist_remove(&list_global, root); 4114 4115 /* Remove the object from all objects' DAG lists. */ 4116 STAILQ_FOREACH(elm, &root->dagmembers, link) { 4117 objlist_remove(&elm->obj->dldags, root); 4118 if (elm->obj != root) 4119 unlink_object(elm->obj); 4120 } 4121 } 4122 } 4123 4124 static void 4125 ref_dag(Obj_Entry *root) 4126 { 4127 Objlist_Entry *elm; 4128 4129 assert(root->dag_inited); 4130 STAILQ_FOREACH(elm, &root->dagmembers, link) 4131 elm->obj->refcount++; 4132 } 4133 4134 static void 4135 unref_dag(Obj_Entry *root) 4136 { 4137 Objlist_Entry *elm; 4138 4139 assert(root->dag_inited); 4140 STAILQ_FOREACH(elm, &root->dagmembers, link) 4141 elm->obj->refcount--; 4142 } 4143 4144 /* 4145 * Common code for MD __tls_get_addr(). 4146 */ 4147 static void *tls_get_addr_slow(Elf_Addr **, int, size_t) __noinline; 4148 static void * 4149 tls_get_addr_slow(Elf_Addr **dtvp, int index, size_t offset) 4150 { 4151 Elf_Addr *newdtv, *dtv; 4152 RtldLockState lockstate; 4153 int to_copy; 4154 4155 dtv = *dtvp; 4156 /* Check dtv generation in case new modules have arrived */ 4157 if (dtv[0] != tls_dtv_generation) { 4158 wlock_acquire(rtld_bind_lock, &lockstate); 4159 newdtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr)); 4160 to_copy = dtv[1]; 4161 if (to_copy > tls_max_index) 4162 to_copy = tls_max_index; 4163 memcpy(&newdtv[2], &dtv[2], to_copy * sizeof(Elf_Addr)); 4164 newdtv[0] = tls_dtv_generation; 4165 newdtv[1] = tls_max_index; 4166 free(dtv); 4167 lock_release(rtld_bind_lock, &lockstate); 4168 dtv = *dtvp = newdtv; 4169 } 4170 4171 /* Dynamically allocate module TLS if necessary */ 4172 if (dtv[index + 1] == 0) { 4173 /* Signal safe, wlock will block out signals. */ 4174 wlock_acquire(rtld_bind_lock, &lockstate); 4175 if (!dtv[index + 1]) 4176 dtv[index + 1] = (Elf_Addr)allocate_module_tls(index); 4177 lock_release(rtld_bind_lock, &lockstate); 4178 } 4179 return ((void *)(dtv[index + 1] + offset)); 4180 } 4181 4182 void * 4183 tls_get_addr_common(Elf_Addr **dtvp, int index, size_t offset) 4184 { 4185 Elf_Addr *dtv; 4186 4187 dtv = *dtvp; 4188 /* Check dtv generation in case new modules have arrived */ 4189 if (__predict_true(dtv[0] == tls_dtv_generation && 4190 dtv[index + 1] != 0)) 4191 return ((void *)(dtv[index + 1] + offset)); 4192 return (tls_get_addr_slow(dtvp, index, offset)); 4193 } 4194 4195 #if defined(__arm__) || defined(__ia64__) || defined(__mips__) || defined(__powerpc__) 4196 4197 /* 4198 * Allocate Static TLS using the Variant I method. 4199 */ 4200 void * 4201 allocate_tls(Obj_Entry *objs, void *oldtcb, size_t tcbsize, size_t tcbalign) 4202 { 4203 Obj_Entry *obj; 4204 char *tcb; 4205 Elf_Addr **tls; 4206 Elf_Addr *dtv; 4207 Elf_Addr addr; 4208 int i; 4209 4210 if (oldtcb != NULL && tcbsize == TLS_TCB_SIZE) 4211 return (oldtcb); 4212 4213 assert(tcbsize >= TLS_TCB_SIZE); 4214 tcb = xcalloc(1, tls_static_space - TLS_TCB_SIZE + tcbsize); 4215 tls = (Elf_Addr **)(tcb + tcbsize - TLS_TCB_SIZE); 4216 4217 if (oldtcb != NULL) { 4218 memcpy(tls, oldtcb, tls_static_space); 4219 free(oldtcb); 4220 4221 /* Adjust the DTV. */ 4222 dtv = tls[0]; 4223 for (i = 0; i < dtv[1]; i++) { 4224 if (dtv[i+2] >= (Elf_Addr)oldtcb && 4225 dtv[i+2] < (Elf_Addr)oldtcb + tls_static_space) { 4226 dtv[i+2] = dtv[i+2] - (Elf_Addr)oldtcb + (Elf_Addr)tls; 4227 } 4228 } 4229 } else { 4230 dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr)); 4231 tls[0] = dtv; 4232 dtv[0] = tls_dtv_generation; 4233 dtv[1] = tls_max_index; 4234 4235 for (obj = objs; obj; obj = obj->next) { 4236 if (obj->tlsoffset > 0) { 4237 addr = (Elf_Addr)tls + obj->tlsoffset; 4238 if (obj->tlsinitsize > 0) 4239 memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize); 4240 if (obj->tlssize > obj->tlsinitsize) 4241 memset((void*) (addr + obj->tlsinitsize), 0, 4242 obj->tlssize - obj->tlsinitsize); 4243 dtv[obj->tlsindex + 1] = addr; 4244 } 4245 } 4246 } 4247 4248 return (tcb); 4249 } 4250 4251 void 4252 free_tls(void *tcb, size_t tcbsize, size_t tcbalign) 4253 { 4254 Elf_Addr *dtv; 4255 Elf_Addr tlsstart, tlsend; 4256 int dtvsize, i; 4257 4258 assert(tcbsize >= TLS_TCB_SIZE); 4259 4260 tlsstart = (Elf_Addr)tcb + tcbsize - TLS_TCB_SIZE; 4261 tlsend = tlsstart + tls_static_space; 4262 4263 dtv = *(Elf_Addr **)tlsstart; 4264 dtvsize = dtv[1]; 4265 for (i = 0; i < dtvsize; i++) { 4266 if (dtv[i+2] && (dtv[i+2] < tlsstart || dtv[i+2] >= tlsend)) { 4267 free((void*)dtv[i+2]); 4268 } 4269 } 4270 free(dtv); 4271 free(tcb); 4272 } 4273 4274 #endif 4275 4276 #if defined(__i386__) || defined(__amd64__) || defined(__sparc64__) 4277 4278 /* 4279 * Allocate Static TLS using the Variant II method. 4280 */ 4281 void * 4282 allocate_tls(Obj_Entry *objs, void *oldtls, size_t tcbsize, size_t tcbalign) 4283 { 4284 Obj_Entry *obj; 4285 size_t size, ralign; 4286 char *tls; 4287 Elf_Addr *dtv, *olddtv; 4288 Elf_Addr segbase, oldsegbase, addr; 4289 int i; 4290 4291 ralign = tcbalign; 4292 if (tls_static_max_align > ralign) 4293 ralign = tls_static_max_align; 4294 size = round(tls_static_space, ralign) + round(tcbsize, ralign); 4295 4296 assert(tcbsize >= 2*sizeof(Elf_Addr)); 4297 tls = malloc_aligned(size, ralign); 4298 dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr)); 4299 4300 segbase = (Elf_Addr)(tls + round(tls_static_space, ralign)); 4301 ((Elf_Addr*)segbase)[0] = segbase; 4302 ((Elf_Addr*)segbase)[1] = (Elf_Addr) dtv; 4303 4304 dtv[0] = tls_dtv_generation; 4305 dtv[1] = tls_max_index; 4306 4307 if (oldtls) { 4308 /* 4309 * Copy the static TLS block over whole. 4310 */ 4311 oldsegbase = (Elf_Addr) oldtls; 4312 memcpy((void *)(segbase - tls_static_space), 4313 (const void *)(oldsegbase - tls_static_space), 4314 tls_static_space); 4315 4316 /* 4317 * If any dynamic TLS blocks have been created tls_get_addr(), 4318 * move them over. 4319 */ 4320 olddtv = ((Elf_Addr**)oldsegbase)[1]; 4321 for (i = 0; i < olddtv[1]; i++) { 4322 if (olddtv[i+2] < oldsegbase - size || olddtv[i+2] > oldsegbase) { 4323 dtv[i+2] = olddtv[i+2]; 4324 olddtv[i+2] = 0; 4325 } 4326 } 4327 4328 /* 4329 * We assume that this block was the one we created with 4330 * allocate_initial_tls(). 4331 */ 4332 free_tls(oldtls, 2*sizeof(Elf_Addr), sizeof(Elf_Addr)); 4333 } else { 4334 for (obj = objs; obj; obj = obj->next) { 4335 if (obj->tlsoffset) { 4336 addr = segbase - obj->tlsoffset; 4337 memset((void*) (addr + obj->tlsinitsize), 4338 0, obj->tlssize - obj->tlsinitsize); 4339 if (obj->tlsinit) 4340 memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize); 4341 dtv[obj->tlsindex + 1] = addr; 4342 } 4343 } 4344 } 4345 4346 return (void*) segbase; 4347 } 4348 4349 void 4350 free_tls(void *tls, size_t tcbsize, size_t tcbalign) 4351 { 4352 Elf_Addr* dtv; 4353 size_t size, ralign; 4354 int dtvsize, i; 4355 Elf_Addr tlsstart, tlsend; 4356 4357 /* 4358 * Figure out the size of the initial TLS block so that we can 4359 * find stuff which ___tls_get_addr() allocated dynamically. 4360 */ 4361 ralign = tcbalign; 4362 if (tls_static_max_align > ralign) 4363 ralign = tls_static_max_align; 4364 size = round(tls_static_space, ralign); 4365 4366 dtv = ((Elf_Addr**)tls)[1]; 4367 dtvsize = dtv[1]; 4368 tlsend = (Elf_Addr) tls; 4369 tlsstart = tlsend - size; 4370 for (i = 0; i < dtvsize; i++) { 4371 if (dtv[i + 2] != 0 && (dtv[i + 2] < tlsstart || dtv[i + 2] > tlsend)) { 4372 free_aligned((void *)dtv[i + 2]); 4373 } 4374 } 4375 4376 free_aligned((void *)tlsstart); 4377 free((void*) dtv); 4378 } 4379 4380 #endif 4381 4382 /* 4383 * Allocate TLS block for module with given index. 4384 */ 4385 void * 4386 allocate_module_tls(int index) 4387 { 4388 Obj_Entry* obj; 4389 char* p; 4390 4391 for (obj = obj_list; obj; obj = obj->next) { 4392 if (obj->tlsindex == index) 4393 break; 4394 } 4395 if (!obj) { 4396 _rtld_error("Can't find module with TLS index %d", index); 4397 die(); 4398 } 4399 4400 p = malloc_aligned(obj->tlssize, obj->tlsalign); 4401 memcpy(p, obj->tlsinit, obj->tlsinitsize); 4402 memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize); 4403 4404 return p; 4405 } 4406 4407 bool 4408 allocate_tls_offset(Obj_Entry *obj) 4409 { 4410 size_t off; 4411 4412 if (obj->tls_done) 4413 return true; 4414 4415 if (obj->tlssize == 0) { 4416 obj->tls_done = true; 4417 return true; 4418 } 4419 4420 if (obj->tlsindex == 1) 4421 off = calculate_first_tls_offset(obj->tlssize, obj->tlsalign); 4422 else 4423 off = calculate_tls_offset(tls_last_offset, tls_last_size, 4424 obj->tlssize, obj->tlsalign); 4425 4426 /* 4427 * If we have already fixed the size of the static TLS block, we 4428 * must stay within that size. When allocating the static TLS, we 4429 * leave a small amount of space spare to be used for dynamically 4430 * loading modules which use static TLS. 4431 */ 4432 if (tls_static_space != 0) { 4433 if (calculate_tls_end(off, obj->tlssize) > tls_static_space) 4434 return false; 4435 } else if (obj->tlsalign > tls_static_max_align) { 4436 tls_static_max_align = obj->tlsalign; 4437 } 4438 4439 tls_last_offset = obj->tlsoffset = off; 4440 tls_last_size = obj->tlssize; 4441 obj->tls_done = true; 4442 4443 return true; 4444 } 4445 4446 void 4447 free_tls_offset(Obj_Entry *obj) 4448 { 4449 4450 /* 4451 * If we were the last thing to allocate out of the static TLS 4452 * block, we give our space back to the 'allocator'. This is a 4453 * simplistic workaround to allow libGL.so.1 to be loaded and 4454 * unloaded multiple times. 4455 */ 4456 if (calculate_tls_end(obj->tlsoffset, obj->tlssize) 4457 == calculate_tls_end(tls_last_offset, tls_last_size)) { 4458 tls_last_offset -= obj->tlssize; 4459 tls_last_size = 0; 4460 } 4461 } 4462 4463 void * 4464 _rtld_allocate_tls(void *oldtls, size_t tcbsize, size_t tcbalign) 4465 { 4466 void *ret; 4467 RtldLockState lockstate; 4468 4469 wlock_acquire(rtld_bind_lock, &lockstate); 4470 ret = allocate_tls(obj_list, oldtls, tcbsize, tcbalign); 4471 lock_release(rtld_bind_lock, &lockstate); 4472 return (ret); 4473 } 4474 4475 void 4476 _rtld_free_tls(void *tcb, size_t tcbsize, size_t tcbalign) 4477 { 4478 RtldLockState lockstate; 4479 4480 wlock_acquire(rtld_bind_lock, &lockstate); 4481 free_tls(tcb, tcbsize, tcbalign); 4482 lock_release(rtld_bind_lock, &lockstate); 4483 } 4484 4485 static void 4486 object_add_name(Obj_Entry *obj, const char *name) 4487 { 4488 Name_Entry *entry; 4489 size_t len; 4490 4491 len = strlen(name); 4492 entry = malloc(sizeof(Name_Entry) + len); 4493 4494 if (entry != NULL) { 4495 strcpy(entry->name, name); 4496 STAILQ_INSERT_TAIL(&obj->names, entry, link); 4497 } 4498 } 4499 4500 static int 4501 object_match_name(const Obj_Entry *obj, const char *name) 4502 { 4503 Name_Entry *entry; 4504 4505 STAILQ_FOREACH(entry, &obj->names, link) { 4506 if (strcmp(name, entry->name) == 0) 4507 return (1); 4508 } 4509 return (0); 4510 } 4511 4512 static Obj_Entry * 4513 locate_dependency(const Obj_Entry *obj, const char *name) 4514 { 4515 const Objlist_Entry *entry; 4516 const Needed_Entry *needed; 4517 4518 STAILQ_FOREACH(entry, &list_main, link) { 4519 if (object_match_name(entry->obj, name)) 4520 return entry->obj; 4521 } 4522 4523 for (needed = obj->needed; needed != NULL; needed = needed->next) { 4524 if (strcmp(obj->strtab + needed->name, name) == 0 || 4525 (needed->obj != NULL && object_match_name(needed->obj, name))) { 4526 /* 4527 * If there is DT_NEEDED for the name we are looking for, 4528 * we are all set. Note that object might not be found if 4529 * dependency was not loaded yet, so the function can 4530 * return NULL here. This is expected and handled 4531 * properly by the caller. 4532 */ 4533 return (needed->obj); 4534 } 4535 } 4536 _rtld_error("%s: Unexpected inconsistency: dependency %s not found", 4537 obj->path, name); 4538 die(); 4539 } 4540 4541 static int 4542 check_object_provided_version(Obj_Entry *refobj, const Obj_Entry *depobj, 4543 const Elf_Vernaux *vna) 4544 { 4545 const Elf_Verdef *vd; 4546 const char *vername; 4547 4548 vername = refobj->strtab + vna->vna_name; 4549 vd = depobj->verdef; 4550 if (vd == NULL) { 4551 _rtld_error("%s: version %s required by %s not defined", 4552 depobj->path, vername, refobj->path); 4553 return (-1); 4554 } 4555 for (;;) { 4556 if (vd->vd_version != VER_DEF_CURRENT) { 4557 _rtld_error("%s: Unsupported version %d of Elf_Verdef entry", 4558 depobj->path, vd->vd_version); 4559 return (-1); 4560 } 4561 if (vna->vna_hash == vd->vd_hash) { 4562 const Elf_Verdaux *aux = (const Elf_Verdaux *) 4563 ((char *)vd + vd->vd_aux); 4564 if (strcmp(vername, depobj->strtab + aux->vda_name) == 0) 4565 return (0); 4566 } 4567 if (vd->vd_next == 0) 4568 break; 4569 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next); 4570 } 4571 if (vna->vna_flags & VER_FLG_WEAK) 4572 return (0); 4573 _rtld_error("%s: version %s required by %s not found", 4574 depobj->path, vername, refobj->path); 4575 return (-1); 4576 } 4577 4578 static int 4579 rtld_verify_object_versions(Obj_Entry *obj) 4580 { 4581 const Elf_Verneed *vn; 4582 const Elf_Verdef *vd; 4583 const Elf_Verdaux *vda; 4584 const Elf_Vernaux *vna; 4585 const Obj_Entry *depobj; 4586 int maxvernum, vernum; 4587 4588 if (obj->ver_checked) 4589 return (0); 4590 obj->ver_checked = true; 4591 4592 maxvernum = 0; 4593 /* 4594 * Walk over defined and required version records and figure out 4595 * max index used by any of them. Do very basic sanity checking 4596 * while there. 4597 */ 4598 vn = obj->verneed; 4599 while (vn != NULL) { 4600 if (vn->vn_version != VER_NEED_CURRENT) { 4601 _rtld_error("%s: Unsupported version %d of Elf_Verneed entry", 4602 obj->path, vn->vn_version); 4603 return (-1); 4604 } 4605 vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux); 4606 for (;;) { 4607 vernum = VER_NEED_IDX(vna->vna_other); 4608 if (vernum > maxvernum) 4609 maxvernum = vernum; 4610 if (vna->vna_next == 0) 4611 break; 4612 vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next); 4613 } 4614 if (vn->vn_next == 0) 4615 break; 4616 vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next); 4617 } 4618 4619 vd = obj->verdef; 4620 while (vd != NULL) { 4621 if (vd->vd_version != VER_DEF_CURRENT) { 4622 _rtld_error("%s: Unsupported version %d of Elf_Verdef entry", 4623 obj->path, vd->vd_version); 4624 return (-1); 4625 } 4626 vernum = VER_DEF_IDX(vd->vd_ndx); 4627 if (vernum > maxvernum) 4628 maxvernum = vernum; 4629 if (vd->vd_next == 0) 4630 break; 4631 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next); 4632 } 4633 4634 if (maxvernum == 0) 4635 return (0); 4636 4637 /* 4638 * Store version information in array indexable by version index. 4639 * Verify that object version requirements are satisfied along the 4640 * way. 4641 */ 4642 obj->vernum = maxvernum + 1; 4643 obj->vertab = xcalloc(obj->vernum, sizeof(Ver_Entry)); 4644 4645 vd = obj->verdef; 4646 while (vd != NULL) { 4647 if ((vd->vd_flags & VER_FLG_BASE) == 0) { 4648 vernum = VER_DEF_IDX(vd->vd_ndx); 4649 assert(vernum <= maxvernum); 4650 vda = (const Elf_Verdaux *)((char *)vd + vd->vd_aux); 4651 obj->vertab[vernum].hash = vd->vd_hash; 4652 obj->vertab[vernum].name = obj->strtab + vda->vda_name; 4653 obj->vertab[vernum].file = NULL; 4654 obj->vertab[vernum].flags = 0; 4655 } 4656 if (vd->vd_next == 0) 4657 break; 4658 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next); 4659 } 4660 4661 vn = obj->verneed; 4662 while (vn != NULL) { 4663 depobj = locate_dependency(obj, obj->strtab + vn->vn_file); 4664 if (depobj == NULL) 4665 return (-1); 4666 vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux); 4667 for (;;) { 4668 if (check_object_provided_version(obj, depobj, vna)) 4669 return (-1); 4670 vernum = VER_NEED_IDX(vna->vna_other); 4671 assert(vernum <= maxvernum); 4672 obj->vertab[vernum].hash = vna->vna_hash; 4673 obj->vertab[vernum].name = obj->strtab + vna->vna_name; 4674 obj->vertab[vernum].file = obj->strtab + vn->vn_file; 4675 obj->vertab[vernum].flags = (vna->vna_other & VER_NEED_HIDDEN) ? 4676 VER_INFO_HIDDEN : 0; 4677 if (vna->vna_next == 0) 4678 break; 4679 vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next); 4680 } 4681 if (vn->vn_next == 0) 4682 break; 4683 vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next); 4684 } 4685 return 0; 4686 } 4687 4688 static int 4689 rtld_verify_versions(const Objlist *objlist) 4690 { 4691 Objlist_Entry *entry; 4692 int rc; 4693 4694 rc = 0; 4695 STAILQ_FOREACH(entry, objlist, link) { 4696 /* 4697 * Skip dummy objects or objects that have their version requirements 4698 * already checked. 4699 */ 4700 if (entry->obj->strtab == NULL || entry->obj->vertab != NULL) 4701 continue; 4702 if (rtld_verify_object_versions(entry->obj) == -1) { 4703 rc = -1; 4704 if (ld_tracing == NULL) 4705 break; 4706 } 4707 } 4708 if (rc == 0 || ld_tracing != NULL) 4709 rc = rtld_verify_object_versions(&obj_rtld); 4710 return rc; 4711 } 4712 4713 const Ver_Entry * 4714 fetch_ventry(const Obj_Entry *obj, unsigned long symnum) 4715 { 4716 Elf_Versym vernum; 4717 4718 if (obj->vertab) { 4719 vernum = VER_NDX(obj->versyms[symnum]); 4720 if (vernum >= obj->vernum) { 4721 _rtld_error("%s: symbol %s has wrong verneed value %d", 4722 obj->path, obj->strtab + symnum, vernum); 4723 } else if (obj->vertab[vernum].hash != 0) { 4724 return &obj->vertab[vernum]; 4725 } 4726 } 4727 return NULL; 4728 } 4729 4730 int 4731 _rtld_get_stack_prot(void) 4732 { 4733 4734 return (stack_prot); 4735 } 4736 4737 static void 4738 map_stacks_exec(RtldLockState *lockstate) 4739 { 4740 void (*thr_map_stacks_exec)(void); 4741 4742 if ((max_stack_flags & PF_X) == 0 || (stack_prot & PROT_EXEC) != 0) 4743 return; 4744 thr_map_stacks_exec = (void (*)(void))(uintptr_t) 4745 get_program_var_addr("__pthread_map_stacks_exec", lockstate); 4746 if (thr_map_stacks_exec != NULL) { 4747 stack_prot |= PROT_EXEC; 4748 thr_map_stacks_exec(); 4749 } 4750 } 4751 4752 void 4753 symlook_init(SymLook *dst, const char *name) 4754 { 4755 4756 bzero(dst, sizeof(*dst)); 4757 dst->name = name; 4758 dst->hash = elf_hash(name); 4759 dst->hash_gnu = gnu_hash(name); 4760 } 4761 4762 static void 4763 symlook_init_from_req(SymLook *dst, const SymLook *src) 4764 { 4765 4766 dst->name = src->name; 4767 dst->hash = src->hash; 4768 dst->hash_gnu = src->hash_gnu; 4769 dst->ventry = src->ventry; 4770 dst->flags = src->flags; 4771 dst->defobj_out = NULL; 4772 dst->sym_out = NULL; 4773 dst->lockstate = src->lockstate; 4774 } 4775 4776 /* 4777 * Overrides for libc_pic-provided functions. 4778 */ 4779 4780 int 4781 __getosreldate(void) 4782 { 4783 size_t len; 4784 int oid[2]; 4785 int error, osrel; 4786 4787 if (osreldate != 0) 4788 return (osreldate); 4789 4790 oid[0] = CTL_KERN; 4791 oid[1] = KERN_OSRELDATE; 4792 osrel = 0; 4793 len = sizeof(osrel); 4794 error = sysctl(oid, 2, &osrel, &len, NULL, 0); 4795 if (error == 0 && osrel > 0 && len == sizeof(osrel)) 4796 osreldate = osrel; 4797 return (osreldate); 4798 } 4799 4800 void 4801 exit(int status) 4802 { 4803 4804 _exit(status); 4805 } 4806 4807 void (*__cleanup)(void); 4808 int __isthreaded = 0; 4809 int _thread_autoinit_dummy_decl = 1; 4810 4811 /* 4812 * No unresolved symbols for rtld. 4813 */ 4814 void 4815 __pthread_cxa_finalize(struct dl_phdr_info *a) 4816 { 4817 } 4818 4819 void 4820 __stack_chk_fail(void) 4821 { 4822 4823 _rtld_error("stack overflow detected; terminated"); 4824 die(); 4825 } 4826 __weak_reference(__stack_chk_fail, __stack_chk_fail_local); 4827 4828 void 4829 __chk_fail(void) 4830 { 4831 4832 _rtld_error("buffer overflow detected; terminated"); 4833 die(); 4834 } 4835 4836 const char * 4837 rtld_strerror(int errnum) 4838 { 4839 4840 if (errnum < 0 || errnum >= sys_nerr) 4841 return ("Unknown error"); 4842 return (sys_errlist[errnum]); 4843 } 4844