1 /*- 2 * Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra. 3 * Copyright 2003 Alexander Kabaev <kan@FreeBSD.ORG>. 4 * Copyright 2009-2012 Konstantin Belousov <kib@FreeBSD.ORG>. 5 * Copyright 2012 John Marino <draco@marino.st>. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 /* 32 * Dynamic linker for ELF. 33 * 34 * John Polstra <jdp@polstra.com>. 35 */ 36 37 #include <sys/param.h> 38 #include <sys/mount.h> 39 #include <sys/mman.h> 40 #include <sys/stat.h> 41 #include <sys/sysctl.h> 42 #include <sys/uio.h> 43 #include <sys/utsname.h> 44 #include <sys/ktrace.h> 45 46 #include <dlfcn.h> 47 #include <err.h> 48 #include <errno.h> 49 #include <fcntl.h> 50 #include <stdarg.h> 51 #include <stdio.h> 52 #include <stdlib.h> 53 #include <string.h> 54 #include <unistd.h> 55 56 #include "debug.h" 57 #include "rtld.h" 58 #include "libmap.h" 59 #include "paths.h" 60 #include "rtld_tls.h" 61 #include "rtld_printf.h" 62 #include "rtld_utrace.h" 63 #include "notes.h" 64 65 /* Types. */ 66 typedef void (*func_ptr_type)(); 67 typedef void * (*path_enum_proc) (const char *path, size_t len, void *arg); 68 69 /* 70 * Function declarations. 71 */ 72 static const char *basename(const char *); 73 static void digest_dynamic1(Obj_Entry *, int, const Elf_Dyn **, 74 const Elf_Dyn **, const Elf_Dyn **); 75 static void digest_dynamic2(Obj_Entry *, const Elf_Dyn *, const Elf_Dyn *, 76 const Elf_Dyn *); 77 static void digest_dynamic(Obj_Entry *, int); 78 static Obj_Entry *digest_phdr(const Elf_Phdr *, int, caddr_t, const char *); 79 static Obj_Entry *dlcheck(void *); 80 static Obj_Entry *dlopen_object(const char *name, int fd, Obj_Entry *refobj, 81 int lo_flags, int mode, RtldLockState *lockstate); 82 static Obj_Entry *do_load_object(int, const char *, char *, struct stat *, int); 83 static int do_search_info(const Obj_Entry *obj, int, struct dl_serinfo *); 84 static bool donelist_check(DoneList *, const Obj_Entry *); 85 static void errmsg_restore(char *); 86 static char *errmsg_save(void); 87 static void *fill_search_info(const char *, size_t, void *); 88 static char *find_library(const char *, const Obj_Entry *, int *); 89 static const char *gethints(bool); 90 static void hold_object(Obj_Entry *); 91 static void unhold_object(Obj_Entry *); 92 static void init_dag(Obj_Entry *); 93 static void init_marker(Obj_Entry *); 94 static void init_pagesizes(Elf_Auxinfo **aux_info); 95 static void init_rtld(caddr_t, Elf_Auxinfo **); 96 static void initlist_add_neededs(Needed_Entry *, Objlist *); 97 static void initlist_add_objects(Obj_Entry *, Obj_Entry *, Objlist *); 98 static void linkmap_add(Obj_Entry *); 99 static void linkmap_delete(Obj_Entry *); 100 static void load_filtees(Obj_Entry *, int flags, RtldLockState *); 101 static void unload_filtees(Obj_Entry *); 102 static int load_needed_objects(Obj_Entry *, int); 103 static int load_preload_objects(void); 104 static Obj_Entry *load_object(const char *, int fd, const Obj_Entry *, int); 105 static void map_stacks_exec(RtldLockState *); 106 static int obj_enforce_relro(Obj_Entry *); 107 static Obj_Entry *obj_from_addr(const void *); 108 static void objlist_call_fini(Objlist *, Obj_Entry *, RtldLockState *); 109 static void objlist_call_init(Objlist *, RtldLockState *); 110 static void objlist_clear(Objlist *); 111 static Objlist_Entry *objlist_find(Objlist *, const Obj_Entry *); 112 static void objlist_init(Objlist *); 113 static void objlist_push_head(Objlist *, Obj_Entry *); 114 static void objlist_push_tail(Objlist *, Obj_Entry *); 115 static void objlist_put_after(Objlist *, Obj_Entry *, Obj_Entry *); 116 static void objlist_remove(Objlist *, Obj_Entry *); 117 static int parse_libdir(const char *); 118 static void *path_enumerate(const char *, path_enum_proc, void *); 119 static void release_object(Obj_Entry *); 120 static int relocate_object_dag(Obj_Entry *root, bool bind_now, 121 Obj_Entry *rtldobj, int flags, RtldLockState *lockstate); 122 static int relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj, 123 int flags, RtldLockState *lockstate); 124 static int relocate_objects(Obj_Entry *, bool, Obj_Entry *, int, 125 RtldLockState *); 126 static int resolve_objects_ifunc(Obj_Entry *first, bool bind_now, 127 int flags, RtldLockState *lockstate); 128 static int rtld_dirname(const char *, char *); 129 static int rtld_dirname_abs(const char *, char *); 130 static void *rtld_dlopen(const char *name, int fd, int mode); 131 static void rtld_exit(void); 132 static char *search_library_path(const char *, const char *); 133 static char *search_library_pathfds(const char *, const char *, int *); 134 static const void **get_program_var_addr(const char *, RtldLockState *); 135 static void set_program_var(const char *, const void *); 136 static int symlook_default(SymLook *, const Obj_Entry *refobj); 137 static int symlook_global(SymLook *, DoneList *); 138 static void symlook_init_from_req(SymLook *, const SymLook *); 139 static int symlook_list(SymLook *, const Objlist *, DoneList *); 140 static int symlook_needed(SymLook *, const Needed_Entry *, DoneList *); 141 static int symlook_obj1_sysv(SymLook *, const Obj_Entry *); 142 static int symlook_obj1_gnu(SymLook *, const Obj_Entry *); 143 static void trace_loaded_objects(Obj_Entry *); 144 static void unlink_object(Obj_Entry *); 145 static void unload_object(Obj_Entry *); 146 static void unref_dag(Obj_Entry *); 147 static void ref_dag(Obj_Entry *); 148 static char *origin_subst_one(Obj_Entry *, char *, const char *, 149 const char *, bool); 150 static char *origin_subst(Obj_Entry *, char *); 151 static bool obj_resolve_origin(Obj_Entry *obj); 152 static void preinit_main(void); 153 static int rtld_verify_versions(const Objlist *); 154 static int rtld_verify_object_versions(Obj_Entry *); 155 static void object_add_name(Obj_Entry *, const char *); 156 static int object_match_name(const Obj_Entry *, const char *); 157 static void ld_utrace_log(int, void *, void *, size_t, int, const char *); 158 static void rtld_fill_dl_phdr_info(const Obj_Entry *obj, 159 struct dl_phdr_info *phdr_info); 160 static uint32_t gnu_hash(const char *); 161 static bool matched_symbol(SymLook *, const Obj_Entry *, Sym_Match_Result *, 162 const unsigned long); 163 164 void r_debug_state(struct r_debug *, struct link_map *) __noinline __exported; 165 void _r_debug_postinit(struct link_map *) __noinline __exported; 166 167 int __sys_openat(int, const char *, int, ...); 168 169 /* 170 * Data declarations. 171 */ 172 static char *error_message; /* Message for dlerror(), or NULL */ 173 struct r_debug r_debug __exported; /* for GDB; */ 174 static bool libmap_disable; /* Disable libmap */ 175 static bool ld_loadfltr; /* Immediate filters processing */ 176 static char *libmap_override; /* Maps to use in addition to libmap.conf */ 177 static bool trust; /* False for setuid and setgid programs */ 178 static bool dangerous_ld_env; /* True if environment variables have been 179 used to affect the libraries loaded */ 180 static char *ld_bind_now; /* Environment variable for immediate binding */ 181 static char *ld_debug; /* Environment variable for debugging */ 182 static char *ld_library_path; /* Environment variable for search path */ 183 static char *ld_library_dirs; /* Environment variable for library descriptors */ 184 static char *ld_preload; /* Environment variable for libraries to 185 load first */ 186 static char *ld_elf_hints_path; /* Environment variable for alternative hints path */ 187 static char *ld_tracing; /* Called from ldd to print libs */ 188 static char *ld_utrace; /* Use utrace() to log events. */ 189 static struct obj_entry_q obj_list; /* Queue of all loaded objects */ 190 static Obj_Entry *obj_main; /* The main program shared object */ 191 static Obj_Entry obj_rtld; /* The dynamic linker shared object */ 192 static unsigned int obj_count; /* Number of objects in obj_list */ 193 static unsigned int obj_loads; /* Number of loads of objects (gen count) */ 194 195 static Objlist list_global = /* Objects dlopened with RTLD_GLOBAL */ 196 STAILQ_HEAD_INITIALIZER(list_global); 197 static Objlist list_main = /* Objects loaded at program startup */ 198 STAILQ_HEAD_INITIALIZER(list_main); 199 static Objlist list_fini = /* Objects needing fini() calls */ 200 STAILQ_HEAD_INITIALIZER(list_fini); 201 202 Elf_Sym sym_zero; /* For resolving undefined weak refs. */ 203 204 #define GDB_STATE(s,m) r_debug.r_state = s; r_debug_state(&r_debug,m); 205 206 extern Elf_Dyn _DYNAMIC; 207 #pragma weak _DYNAMIC 208 209 int dlclose(void *) __exported; 210 char *dlerror(void) __exported; 211 void *dlopen(const char *, int) __exported; 212 void *fdlopen(int, int) __exported; 213 void *dlsym(void *, const char *) __exported; 214 dlfunc_t dlfunc(void *, const char *) __exported; 215 void *dlvsym(void *, const char *, const char *) __exported; 216 int dladdr(const void *, Dl_info *) __exported; 217 void dllockinit(void *, void *(*)(void *), void (*)(void *), void (*)(void *), 218 void (*)(void *), void (*)(void *), void (*)(void *)) __exported; 219 int dlinfo(void *, int , void *) __exported; 220 int dl_iterate_phdr(__dl_iterate_hdr_callback, void *) __exported; 221 int _rtld_addr_phdr(const void *, struct dl_phdr_info *) __exported; 222 int _rtld_get_stack_prot(void) __exported; 223 int _rtld_is_dlopened(void *) __exported; 224 void _rtld_error(const char *, ...) __exported; 225 226 int npagesizes, osreldate; 227 size_t *pagesizes; 228 229 long __stack_chk_guard[8] = {0, 0, 0, 0, 0, 0, 0, 0}; 230 231 static int stack_prot = PROT_READ | PROT_WRITE | RTLD_DEFAULT_STACK_EXEC; 232 static int max_stack_flags; 233 234 /* 235 * Global declarations normally provided by crt1. The dynamic linker is 236 * not built with crt1, so we have to provide them ourselves. 237 */ 238 char *__progname; 239 char **environ; 240 241 /* 242 * Used to pass argc, argv to init functions. 243 */ 244 int main_argc; 245 char **main_argv; 246 247 /* 248 * Globals to control TLS allocation. 249 */ 250 size_t tls_last_offset; /* Static TLS offset of last module */ 251 size_t tls_last_size; /* Static TLS size of last module */ 252 size_t tls_static_space; /* Static TLS space allocated */ 253 size_t tls_static_max_align; 254 int tls_dtv_generation = 1; /* Used to detect when dtv size changes */ 255 int tls_max_index = 1; /* Largest module index allocated */ 256 257 bool ld_library_path_rpath = false; 258 259 /* 260 * Globals for path names, and such 261 */ 262 char *ld_elf_hints_default = _PATH_ELF_HINTS; 263 char *ld_path_libmap_conf = _PATH_LIBMAP_CONF; 264 char *ld_path_rtld = _PATH_RTLD; 265 char *ld_standard_library_path = STANDARD_LIBRARY_PATH; 266 char *ld_env_prefix = LD_; 267 268 /* 269 * Fill in a DoneList with an allocation large enough to hold all of 270 * the currently-loaded objects. Keep this as a macro since it calls 271 * alloca and we want that to occur within the scope of the caller. 272 */ 273 #define donelist_init(dlp) \ 274 ((dlp)->objs = alloca(obj_count * sizeof (dlp)->objs[0]), \ 275 assert((dlp)->objs != NULL), \ 276 (dlp)->num_alloc = obj_count, \ 277 (dlp)->num_used = 0) 278 279 #define LD_UTRACE(e, h, mb, ms, r, n) do { \ 280 if (ld_utrace != NULL) \ 281 ld_utrace_log(e, h, mb, ms, r, n); \ 282 } while (0) 283 284 static void 285 ld_utrace_log(int event, void *handle, void *mapbase, size_t mapsize, 286 int refcnt, const char *name) 287 { 288 struct utrace_rtld ut; 289 static const char rtld_utrace_sig[RTLD_UTRACE_SIG_SZ] = RTLD_UTRACE_SIG; 290 291 memcpy(ut.sig, rtld_utrace_sig, sizeof(ut.sig)); 292 ut.event = event; 293 ut.handle = handle; 294 ut.mapbase = mapbase; 295 ut.mapsize = mapsize; 296 ut.refcnt = refcnt; 297 bzero(ut.name, sizeof(ut.name)); 298 if (name) 299 strlcpy(ut.name, name, sizeof(ut.name)); 300 utrace(&ut, sizeof(ut)); 301 } 302 303 #ifdef RTLD_VARIANT_ENV_NAMES 304 /* 305 * construct the env variable based on the type of binary that's 306 * running. 307 */ 308 static inline const char * 309 _LD(const char *var) 310 { 311 static char buffer[128]; 312 313 strlcpy(buffer, ld_env_prefix, sizeof(buffer)); 314 strlcat(buffer, var, sizeof(buffer)); 315 return (buffer); 316 } 317 #else 318 #define _LD(x) LD_ x 319 #endif 320 321 /* 322 * Main entry point for dynamic linking. The first argument is the 323 * stack pointer. The stack is expected to be laid out as described 324 * in the SVR4 ABI specification, Intel 386 Processor Supplement. 325 * Specifically, the stack pointer points to a word containing 326 * ARGC. Following that in the stack is a null-terminated sequence 327 * of pointers to argument strings. Then comes a null-terminated 328 * sequence of pointers to environment strings. Finally, there is a 329 * sequence of "auxiliary vector" entries. 330 * 331 * The second argument points to a place to store the dynamic linker's 332 * exit procedure pointer and the third to a place to store the main 333 * program's object. 334 * 335 * The return value is the main program's entry point. 336 */ 337 func_ptr_type 338 _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp) 339 { 340 Elf_Auxinfo *aux_info[AT_COUNT]; 341 int i; 342 int argc; 343 char **argv; 344 char **env; 345 Elf_Auxinfo *aux; 346 Elf_Auxinfo *auxp; 347 const char *argv0; 348 Objlist_Entry *entry; 349 Obj_Entry *obj; 350 Obj_Entry *preload_tail; 351 Obj_Entry *last_interposer; 352 Objlist initlist; 353 RtldLockState lockstate; 354 char *library_path_rpath; 355 int mib[2]; 356 size_t len; 357 358 /* 359 * On entry, the dynamic linker itself has not been relocated yet. 360 * Be very careful not to reference any global data until after 361 * init_rtld has returned. It is OK to reference file-scope statics 362 * and string constants, and to call static and global functions. 363 */ 364 365 /* Find the auxiliary vector on the stack. */ 366 argc = *sp++; 367 argv = (char **) sp; 368 sp += argc + 1; /* Skip over arguments and NULL terminator */ 369 env = (char **) sp; 370 while (*sp++ != 0) /* Skip over environment, and NULL terminator */ 371 ; 372 aux = (Elf_Auxinfo *) sp; 373 374 /* Digest the auxiliary vector. */ 375 for (i = 0; i < AT_COUNT; i++) 376 aux_info[i] = NULL; 377 for (auxp = aux; auxp->a_type != AT_NULL; auxp++) { 378 if (auxp->a_type < AT_COUNT) 379 aux_info[auxp->a_type] = auxp; 380 } 381 382 /* Initialize and relocate ourselves. */ 383 assert(aux_info[AT_BASE] != NULL); 384 init_rtld((caddr_t) aux_info[AT_BASE]->a_un.a_ptr, aux_info); 385 386 __progname = obj_rtld.path; 387 argv0 = argv[0] != NULL ? argv[0] : "(null)"; 388 environ = env; 389 main_argc = argc; 390 main_argv = argv; 391 392 if (aux_info[AT_CANARY] != NULL && 393 aux_info[AT_CANARY]->a_un.a_ptr != NULL) { 394 i = aux_info[AT_CANARYLEN]->a_un.a_val; 395 if (i > sizeof(__stack_chk_guard)) 396 i = sizeof(__stack_chk_guard); 397 memcpy(__stack_chk_guard, aux_info[AT_CANARY]->a_un.a_ptr, i); 398 } else { 399 mib[0] = CTL_KERN; 400 mib[1] = KERN_ARND; 401 402 len = sizeof(__stack_chk_guard); 403 if (sysctl(mib, 2, __stack_chk_guard, &len, NULL, 0) == -1 || 404 len != sizeof(__stack_chk_guard)) { 405 /* If sysctl was unsuccessful, use the "terminator canary". */ 406 ((unsigned char *)(void *)__stack_chk_guard)[0] = 0; 407 ((unsigned char *)(void *)__stack_chk_guard)[1] = 0; 408 ((unsigned char *)(void *)__stack_chk_guard)[2] = '\n'; 409 ((unsigned char *)(void *)__stack_chk_guard)[3] = 255; 410 } 411 } 412 413 trust = !issetugid(); 414 415 md_abi_variant_hook(aux_info); 416 417 ld_bind_now = getenv(_LD("BIND_NOW")); 418 /* 419 * If the process is tainted, then we un-set the dangerous environment 420 * variables. The process will be marked as tainted until setuid(2) 421 * is called. If any child process calls setuid(2) we do not want any 422 * future processes to honor the potentially un-safe variables. 423 */ 424 if (!trust) { 425 if (unsetenv(_LD("PRELOAD")) || unsetenv(_LD("LIBMAP")) || 426 unsetenv(_LD("LIBRARY_PATH")) || unsetenv(_LD("LIBRARY_PATH_FDS")) || 427 unsetenv(_LD("LIBMAP_DISABLE")) || 428 unsetenv(_LD("DEBUG")) || unsetenv(_LD("ELF_HINTS_PATH")) || 429 unsetenv(_LD("LOADFLTR")) || unsetenv(_LD("LIBRARY_PATH_RPATH"))) { 430 _rtld_error("environment corrupt; aborting"); 431 rtld_die(); 432 } 433 } 434 ld_debug = getenv(_LD("DEBUG")); 435 libmap_disable = getenv(_LD("LIBMAP_DISABLE")) != NULL; 436 libmap_override = getenv(_LD("LIBMAP")); 437 ld_library_path = getenv(_LD("LIBRARY_PATH")); 438 ld_library_dirs = getenv(_LD("LIBRARY_PATH_FDS")); 439 ld_preload = getenv(_LD("PRELOAD")); 440 ld_elf_hints_path = getenv(_LD("ELF_HINTS_PATH")); 441 ld_loadfltr = getenv(_LD("LOADFLTR")) != NULL; 442 library_path_rpath = getenv(_LD("LIBRARY_PATH_RPATH")); 443 if (library_path_rpath != NULL) { 444 if (library_path_rpath[0] == 'y' || 445 library_path_rpath[0] == 'Y' || 446 library_path_rpath[0] == '1') 447 ld_library_path_rpath = true; 448 else 449 ld_library_path_rpath = false; 450 } 451 dangerous_ld_env = libmap_disable || (libmap_override != NULL) || 452 (ld_library_path != NULL) || (ld_preload != NULL) || 453 (ld_elf_hints_path != NULL) || ld_loadfltr; 454 ld_tracing = getenv(_LD("TRACE_LOADED_OBJECTS")); 455 ld_utrace = getenv(_LD("UTRACE")); 456 457 if ((ld_elf_hints_path == NULL) || strlen(ld_elf_hints_path) == 0) 458 ld_elf_hints_path = ld_elf_hints_default; 459 460 if (ld_debug != NULL && *ld_debug != '\0') 461 debug = 1; 462 dbg("%s is initialized, base address = %p", __progname, 463 (caddr_t) aux_info[AT_BASE]->a_un.a_ptr); 464 dbg("RTLD dynamic = %p", obj_rtld.dynamic); 465 dbg("RTLD pltgot = %p", obj_rtld.pltgot); 466 467 dbg("initializing thread locks"); 468 lockdflt_init(); 469 470 /* 471 * Load the main program, or process its program header if it is 472 * already loaded. 473 */ 474 if (aux_info[AT_EXECFD] != NULL) { /* Load the main program. */ 475 int fd = aux_info[AT_EXECFD]->a_un.a_val; 476 dbg("loading main program"); 477 obj_main = map_object(fd, argv0, NULL); 478 close(fd); 479 if (obj_main == NULL) 480 rtld_die(); 481 max_stack_flags = obj->stack_flags; 482 } else { /* Main program already loaded. */ 483 const Elf_Phdr *phdr; 484 int phnum; 485 caddr_t entry; 486 487 dbg("processing main program's program header"); 488 assert(aux_info[AT_PHDR] != NULL); 489 phdr = (const Elf_Phdr *) aux_info[AT_PHDR]->a_un.a_ptr; 490 assert(aux_info[AT_PHNUM] != NULL); 491 phnum = aux_info[AT_PHNUM]->a_un.a_val; 492 assert(aux_info[AT_PHENT] != NULL); 493 assert(aux_info[AT_PHENT]->a_un.a_val == sizeof(Elf_Phdr)); 494 assert(aux_info[AT_ENTRY] != NULL); 495 entry = (caddr_t) aux_info[AT_ENTRY]->a_un.a_ptr; 496 if ((obj_main = digest_phdr(phdr, phnum, entry, argv0)) == NULL) 497 rtld_die(); 498 } 499 500 if (aux_info[AT_EXECPATH] != NULL) { 501 char *kexecpath; 502 char buf[MAXPATHLEN]; 503 504 kexecpath = aux_info[AT_EXECPATH]->a_un.a_ptr; 505 dbg("AT_EXECPATH %p %s", kexecpath, kexecpath); 506 if (kexecpath[0] == '/') 507 obj_main->path = kexecpath; 508 else if (getcwd(buf, sizeof(buf)) == NULL || 509 strlcat(buf, "/", sizeof(buf)) >= sizeof(buf) || 510 strlcat(buf, kexecpath, sizeof(buf)) >= sizeof(buf)) 511 obj_main->path = xstrdup(argv0); 512 else 513 obj_main->path = xstrdup(buf); 514 } else { 515 dbg("No AT_EXECPATH"); 516 obj_main->path = xstrdup(argv0); 517 } 518 dbg("obj_main path %s", obj_main->path); 519 obj_main->mainprog = true; 520 521 if (aux_info[AT_STACKPROT] != NULL && 522 aux_info[AT_STACKPROT]->a_un.a_val != 0) 523 stack_prot = aux_info[AT_STACKPROT]->a_un.a_val; 524 525 #ifndef COMPAT_32BIT 526 /* 527 * Get the actual dynamic linker pathname from the executable if 528 * possible. (It should always be possible.) That ensures that 529 * gdb will find the right dynamic linker even if a non-standard 530 * one is being used. 531 */ 532 if (obj_main->interp != NULL && 533 strcmp(obj_main->interp, obj_rtld.path) != 0) { 534 free(obj_rtld.path); 535 obj_rtld.path = xstrdup(obj_main->interp); 536 __progname = obj_rtld.path; 537 } 538 #endif 539 540 digest_dynamic(obj_main, 0); 541 dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d", 542 obj_main->path, obj_main->valid_hash_sysv, obj_main->valid_hash_gnu, 543 obj_main->dynsymcount); 544 545 linkmap_add(obj_main); 546 linkmap_add(&obj_rtld); 547 548 /* Link the main program into the list of objects. */ 549 TAILQ_INSERT_HEAD(&obj_list, obj_main, next); 550 obj_count++; 551 obj_loads++; 552 553 /* Initialize a fake symbol for resolving undefined weak references. */ 554 sym_zero.st_info = ELF_ST_INFO(STB_GLOBAL, STT_NOTYPE); 555 sym_zero.st_shndx = SHN_UNDEF; 556 sym_zero.st_value = -(uintptr_t)obj_main->relocbase; 557 558 if (!libmap_disable) 559 libmap_disable = (bool)lm_init(libmap_override); 560 561 dbg("loading LD_PRELOAD libraries"); 562 if (load_preload_objects() == -1) 563 rtld_die(); 564 preload_tail = globallist_curr(TAILQ_LAST(&obj_list, obj_entry_q)); 565 566 dbg("loading needed objects"); 567 if (load_needed_objects(obj_main, 0) == -1) 568 rtld_die(); 569 570 /* Make a list of all objects loaded at startup. */ 571 last_interposer = obj_main; 572 TAILQ_FOREACH(obj, &obj_list, next) { 573 if (obj->marker) 574 continue; 575 if (obj->z_interpose && obj != obj_main) { 576 objlist_put_after(&list_main, last_interposer, obj); 577 last_interposer = obj; 578 } else { 579 objlist_push_tail(&list_main, obj); 580 } 581 obj->refcount++; 582 } 583 584 dbg("checking for required versions"); 585 if (rtld_verify_versions(&list_main) == -1 && !ld_tracing) 586 rtld_die(); 587 588 if (ld_tracing) { /* We're done */ 589 trace_loaded_objects(obj_main); 590 exit(0); 591 } 592 593 if (getenv(_LD("DUMP_REL_PRE")) != NULL) { 594 dump_relocations(obj_main); 595 exit (0); 596 } 597 598 /* 599 * Processing tls relocations requires having the tls offsets 600 * initialized. Prepare offsets before starting initial 601 * relocation processing. 602 */ 603 dbg("initializing initial thread local storage offsets"); 604 STAILQ_FOREACH(entry, &list_main, link) { 605 /* 606 * Allocate all the initial objects out of the static TLS 607 * block even if they didn't ask for it. 608 */ 609 allocate_tls_offset(entry->obj); 610 } 611 612 if (relocate_objects(obj_main, 613 ld_bind_now != NULL && *ld_bind_now != '\0', 614 &obj_rtld, SYMLOOK_EARLY, NULL) == -1) 615 rtld_die(); 616 617 dbg("doing copy relocations"); 618 if (do_copy_relocations(obj_main) == -1) 619 rtld_die(); 620 621 dbg("enforcing main obj relro"); 622 if (obj_enforce_relro(obj_main) == -1) 623 rtld_die(); 624 625 if (getenv(_LD("DUMP_REL_POST")) != NULL) { 626 dump_relocations(obj_main); 627 exit (0); 628 } 629 630 /* 631 * Setup TLS for main thread. This must be done after the 632 * relocations are processed, since tls initialization section 633 * might be the subject for relocations. 634 */ 635 dbg("initializing initial thread local storage"); 636 allocate_initial_tls(globallist_curr(TAILQ_FIRST(&obj_list))); 637 638 dbg("initializing key program variables"); 639 set_program_var("__progname", argv[0] != NULL ? basename(argv[0]) : ""); 640 set_program_var("environ", env); 641 set_program_var("__elf_aux_vector", aux); 642 643 /* Make a list of init functions to call. */ 644 objlist_init(&initlist); 645 initlist_add_objects(globallist_curr(TAILQ_FIRST(&obj_list)), 646 preload_tail, &initlist); 647 648 r_debug_state(NULL, &obj_main->linkmap); /* say hello to gdb! */ 649 650 map_stacks_exec(NULL); 651 ifunc_init(aux); 652 653 dbg("resolving ifuncs"); 654 if (resolve_objects_ifunc(obj_main, 655 ld_bind_now != NULL && *ld_bind_now != '\0', SYMLOOK_EARLY, 656 NULL) == -1) 657 rtld_die(); 658 659 if (!obj_main->crt_no_init) { 660 /* 661 * Make sure we don't call the main program's init and fini 662 * functions for binaries linked with old crt1 which calls 663 * _init itself. 664 */ 665 obj_main->init = obj_main->fini = (Elf_Addr)NULL; 666 obj_main->preinit_array = obj_main->init_array = 667 obj_main->fini_array = (Elf_Addr)NULL; 668 } 669 670 wlock_acquire(rtld_bind_lock, &lockstate); 671 if (obj_main->crt_no_init) 672 preinit_main(); 673 objlist_call_init(&initlist, &lockstate); 674 _r_debug_postinit(&obj_main->linkmap); 675 objlist_clear(&initlist); 676 dbg("loading filtees"); 677 TAILQ_FOREACH(obj, &obj_list, next) { 678 if (obj->marker) 679 continue; 680 if (ld_loadfltr || obj->z_loadfltr) 681 load_filtees(obj, 0, &lockstate); 682 } 683 lock_release(rtld_bind_lock, &lockstate); 684 685 dbg("transferring control to program entry point = %p", obj_main->entry); 686 687 /* Return the exit procedure and the program entry point. */ 688 *exit_proc = rtld_exit; 689 *objp = obj_main; 690 return (func_ptr_type) obj_main->entry; 691 } 692 693 void * 694 rtld_resolve_ifunc(const Obj_Entry *obj, const Elf_Sym *def) 695 { 696 void *ptr; 697 Elf_Addr target; 698 699 ptr = (void *)make_function_pointer(def, obj); 700 target = call_ifunc_resolver(ptr); 701 return ((void *)target); 702 } 703 704 /* 705 * NB: MIPS uses a private version of this function (_mips_rtld_bind). 706 * Changes to this function should be applied there as well. 707 */ 708 Elf_Addr 709 _rtld_bind(Obj_Entry *obj, Elf_Size reloff) 710 { 711 const Elf_Rel *rel; 712 const Elf_Sym *def; 713 const Obj_Entry *defobj; 714 Elf_Addr *where; 715 Elf_Addr target; 716 RtldLockState lockstate; 717 718 rlock_acquire(rtld_bind_lock, &lockstate); 719 if (sigsetjmp(lockstate.env, 0) != 0) 720 lock_upgrade(rtld_bind_lock, &lockstate); 721 if (obj->pltrel) 722 rel = (const Elf_Rel *) ((caddr_t) obj->pltrel + reloff); 723 else 724 rel = (const Elf_Rel *) ((caddr_t) obj->pltrela + reloff); 725 726 where = (Elf_Addr *) (obj->relocbase + rel->r_offset); 727 def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, SYMLOOK_IN_PLT, 728 NULL, &lockstate); 729 if (def == NULL) 730 rtld_die(); 731 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) 732 target = (Elf_Addr)rtld_resolve_ifunc(defobj, def); 733 else 734 target = (Elf_Addr)(defobj->relocbase + def->st_value); 735 736 dbg("\"%s\" in \"%s\" ==> %p in \"%s\"", 737 defobj->strtab + def->st_name, basename(obj->path), 738 (void *)target, basename(defobj->path)); 739 740 /* 741 * Write the new contents for the jmpslot. Note that depending on 742 * architecture, the value which we need to return back to the 743 * lazy binding trampoline may or may not be the target 744 * address. The value returned from reloc_jmpslot() is the value 745 * that the trampoline needs. 746 */ 747 target = reloc_jmpslot(where, target, defobj, obj, rel); 748 lock_release(rtld_bind_lock, &lockstate); 749 return target; 750 } 751 752 /* 753 * Error reporting function. Use it like printf. If formats the message 754 * into a buffer, and sets things up so that the next call to dlerror() 755 * will return the message. 756 */ 757 void 758 _rtld_error(const char *fmt, ...) 759 { 760 static char buf[512]; 761 va_list ap; 762 763 va_start(ap, fmt); 764 rtld_vsnprintf(buf, sizeof buf, fmt, ap); 765 error_message = buf; 766 va_end(ap); 767 LD_UTRACE(UTRACE_RTLD_ERROR, NULL, NULL, 0, 0, error_message); 768 } 769 770 /* 771 * Return a dynamically-allocated copy of the current error message, if any. 772 */ 773 static char * 774 errmsg_save(void) 775 { 776 return error_message == NULL ? NULL : xstrdup(error_message); 777 } 778 779 /* 780 * Restore the current error message from a copy which was previously saved 781 * by errmsg_save(). The copy is freed. 782 */ 783 static void 784 errmsg_restore(char *saved_msg) 785 { 786 if (saved_msg == NULL) 787 error_message = NULL; 788 else { 789 _rtld_error("%s", saved_msg); 790 free(saved_msg); 791 } 792 } 793 794 static const char * 795 basename(const char *name) 796 { 797 const char *p = strrchr(name, '/'); 798 return p != NULL ? p + 1 : name; 799 } 800 801 static struct utsname uts; 802 803 static char * 804 origin_subst_one(Obj_Entry *obj, char *real, const char *kw, 805 const char *subst, bool may_free) 806 { 807 char *p, *p1, *res, *resp; 808 int subst_len, kw_len, subst_count, old_len, new_len; 809 810 kw_len = strlen(kw); 811 812 /* 813 * First, count the number of the keyword occurrences, to 814 * preallocate the final string. 815 */ 816 for (p = real, subst_count = 0;; p = p1 + kw_len, subst_count++) { 817 p1 = strstr(p, kw); 818 if (p1 == NULL) 819 break; 820 } 821 822 /* 823 * If the keyword is not found, just return. 824 * 825 * Return non-substituted string if resolution failed. We 826 * cannot do anything more reasonable, the failure mode of the 827 * caller is unresolved library anyway. 828 */ 829 if (subst_count == 0 || (obj != NULL && !obj_resolve_origin(obj))) 830 return (may_free ? real : xstrdup(real)); 831 if (obj != NULL) 832 subst = obj->origin_path; 833 834 /* 835 * There is indeed something to substitute. Calculate the 836 * length of the resulting string, and allocate it. 837 */ 838 subst_len = strlen(subst); 839 old_len = strlen(real); 840 new_len = old_len + (subst_len - kw_len) * subst_count; 841 res = xmalloc(new_len + 1); 842 843 /* 844 * Now, execute the substitution loop. 845 */ 846 for (p = real, resp = res, *resp = '\0';;) { 847 p1 = strstr(p, kw); 848 if (p1 != NULL) { 849 /* Copy the prefix before keyword. */ 850 memcpy(resp, p, p1 - p); 851 resp += p1 - p; 852 /* Keyword replacement. */ 853 memcpy(resp, subst, subst_len); 854 resp += subst_len; 855 *resp = '\0'; 856 p = p1 + kw_len; 857 } else 858 break; 859 } 860 861 /* Copy to the end of string and finish. */ 862 strcat(resp, p); 863 if (may_free) 864 free(real); 865 return (res); 866 } 867 868 static char * 869 origin_subst(Obj_Entry *obj, char *real) 870 { 871 char *res1, *res2, *res3, *res4; 872 873 if (obj == NULL || !trust) 874 return (xstrdup(real)); 875 if (uts.sysname[0] == '\0') { 876 if (uname(&uts) != 0) { 877 _rtld_error("utsname failed: %d", errno); 878 return (NULL); 879 } 880 } 881 res1 = origin_subst_one(obj, real, "$ORIGIN", NULL, false); 882 res2 = origin_subst_one(NULL, res1, "$OSNAME", uts.sysname, true); 883 res3 = origin_subst_one(NULL, res2, "$OSREL", uts.release, true); 884 res4 = origin_subst_one(NULL, res3, "$PLATFORM", uts.machine, true); 885 return (res4); 886 } 887 888 void 889 rtld_die(void) 890 { 891 const char *msg = dlerror(); 892 893 if (msg == NULL) 894 msg = "Fatal error"; 895 rtld_fdputstr(STDERR_FILENO, msg); 896 rtld_fdputchar(STDERR_FILENO, '\n'); 897 _exit(1); 898 } 899 900 /* 901 * Process a shared object's DYNAMIC section, and save the important 902 * information in its Obj_Entry structure. 903 */ 904 static void 905 digest_dynamic1(Obj_Entry *obj, int early, const Elf_Dyn **dyn_rpath, 906 const Elf_Dyn **dyn_soname, const Elf_Dyn **dyn_runpath) 907 { 908 const Elf_Dyn *dynp; 909 Needed_Entry **needed_tail = &obj->needed; 910 Needed_Entry **needed_filtees_tail = &obj->needed_filtees; 911 Needed_Entry **needed_aux_filtees_tail = &obj->needed_aux_filtees; 912 const Elf_Hashelt *hashtab; 913 const Elf32_Word *hashval; 914 Elf32_Word bkt, nmaskwords; 915 int bloom_size32; 916 int plttype = DT_REL; 917 918 *dyn_rpath = NULL; 919 *dyn_soname = NULL; 920 *dyn_runpath = NULL; 921 922 obj->bind_now = false; 923 for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) { 924 switch (dynp->d_tag) { 925 926 case DT_REL: 927 obj->rel = (const Elf_Rel *) (obj->relocbase + dynp->d_un.d_ptr); 928 break; 929 930 case DT_RELSZ: 931 obj->relsize = dynp->d_un.d_val; 932 break; 933 934 case DT_RELENT: 935 assert(dynp->d_un.d_val == sizeof(Elf_Rel)); 936 break; 937 938 case DT_JMPREL: 939 obj->pltrel = (const Elf_Rel *) 940 (obj->relocbase + dynp->d_un.d_ptr); 941 break; 942 943 case DT_PLTRELSZ: 944 obj->pltrelsize = dynp->d_un.d_val; 945 break; 946 947 case DT_RELA: 948 obj->rela = (const Elf_Rela *) (obj->relocbase + dynp->d_un.d_ptr); 949 break; 950 951 case DT_RELASZ: 952 obj->relasize = dynp->d_un.d_val; 953 break; 954 955 case DT_RELAENT: 956 assert(dynp->d_un.d_val == sizeof(Elf_Rela)); 957 break; 958 959 case DT_PLTREL: 960 plttype = dynp->d_un.d_val; 961 assert(dynp->d_un.d_val == DT_REL || plttype == DT_RELA); 962 break; 963 964 case DT_SYMTAB: 965 obj->symtab = (const Elf_Sym *) 966 (obj->relocbase + dynp->d_un.d_ptr); 967 break; 968 969 case DT_SYMENT: 970 assert(dynp->d_un.d_val == sizeof(Elf_Sym)); 971 break; 972 973 case DT_STRTAB: 974 obj->strtab = (const char *) (obj->relocbase + dynp->d_un.d_ptr); 975 break; 976 977 case DT_STRSZ: 978 obj->strsize = dynp->d_un.d_val; 979 break; 980 981 case DT_VERNEED: 982 obj->verneed = (const Elf_Verneed *) (obj->relocbase + 983 dynp->d_un.d_val); 984 break; 985 986 case DT_VERNEEDNUM: 987 obj->verneednum = dynp->d_un.d_val; 988 break; 989 990 case DT_VERDEF: 991 obj->verdef = (const Elf_Verdef *) (obj->relocbase + 992 dynp->d_un.d_val); 993 break; 994 995 case DT_VERDEFNUM: 996 obj->verdefnum = dynp->d_un.d_val; 997 break; 998 999 case DT_VERSYM: 1000 obj->versyms = (const Elf_Versym *)(obj->relocbase + 1001 dynp->d_un.d_val); 1002 break; 1003 1004 case DT_HASH: 1005 { 1006 hashtab = (const Elf_Hashelt *)(obj->relocbase + 1007 dynp->d_un.d_ptr); 1008 obj->nbuckets = hashtab[0]; 1009 obj->nchains = hashtab[1]; 1010 obj->buckets = hashtab + 2; 1011 obj->chains = obj->buckets + obj->nbuckets; 1012 obj->valid_hash_sysv = obj->nbuckets > 0 && obj->nchains > 0 && 1013 obj->buckets != NULL; 1014 } 1015 break; 1016 1017 case DT_GNU_HASH: 1018 { 1019 hashtab = (const Elf_Hashelt *)(obj->relocbase + 1020 dynp->d_un.d_ptr); 1021 obj->nbuckets_gnu = hashtab[0]; 1022 obj->symndx_gnu = hashtab[1]; 1023 nmaskwords = hashtab[2]; 1024 bloom_size32 = (__ELF_WORD_SIZE / 32) * nmaskwords; 1025 obj->maskwords_bm_gnu = nmaskwords - 1; 1026 obj->shift2_gnu = hashtab[3]; 1027 obj->bloom_gnu = (Elf_Addr *) (hashtab + 4); 1028 obj->buckets_gnu = hashtab + 4 + bloom_size32; 1029 obj->chain_zero_gnu = obj->buckets_gnu + obj->nbuckets_gnu - 1030 obj->symndx_gnu; 1031 /* Number of bitmask words is required to be power of 2 */ 1032 obj->valid_hash_gnu = powerof2(nmaskwords) && 1033 obj->nbuckets_gnu > 0 && obj->buckets_gnu != NULL; 1034 } 1035 break; 1036 1037 case DT_NEEDED: 1038 if (!obj->rtld) { 1039 Needed_Entry *nep = NEW(Needed_Entry); 1040 nep->name = dynp->d_un.d_val; 1041 nep->obj = NULL; 1042 nep->next = NULL; 1043 1044 *needed_tail = nep; 1045 needed_tail = &nep->next; 1046 } 1047 break; 1048 1049 case DT_FILTER: 1050 if (!obj->rtld) { 1051 Needed_Entry *nep = NEW(Needed_Entry); 1052 nep->name = dynp->d_un.d_val; 1053 nep->obj = NULL; 1054 nep->next = NULL; 1055 1056 *needed_filtees_tail = nep; 1057 needed_filtees_tail = &nep->next; 1058 } 1059 break; 1060 1061 case DT_AUXILIARY: 1062 if (!obj->rtld) { 1063 Needed_Entry *nep = NEW(Needed_Entry); 1064 nep->name = dynp->d_un.d_val; 1065 nep->obj = NULL; 1066 nep->next = NULL; 1067 1068 *needed_aux_filtees_tail = nep; 1069 needed_aux_filtees_tail = &nep->next; 1070 } 1071 break; 1072 1073 case DT_PLTGOT: 1074 obj->pltgot = (Elf_Addr *) (obj->relocbase + dynp->d_un.d_ptr); 1075 break; 1076 1077 case DT_TEXTREL: 1078 obj->textrel = true; 1079 break; 1080 1081 case DT_SYMBOLIC: 1082 obj->symbolic = true; 1083 break; 1084 1085 case DT_RPATH: 1086 /* 1087 * We have to wait until later to process this, because we 1088 * might not have gotten the address of the string table yet. 1089 */ 1090 *dyn_rpath = dynp; 1091 break; 1092 1093 case DT_SONAME: 1094 *dyn_soname = dynp; 1095 break; 1096 1097 case DT_RUNPATH: 1098 *dyn_runpath = dynp; 1099 break; 1100 1101 case DT_INIT: 1102 obj->init = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr); 1103 break; 1104 1105 case DT_PREINIT_ARRAY: 1106 obj->preinit_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr); 1107 break; 1108 1109 case DT_PREINIT_ARRAYSZ: 1110 obj->preinit_array_num = dynp->d_un.d_val / sizeof(Elf_Addr); 1111 break; 1112 1113 case DT_INIT_ARRAY: 1114 obj->init_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr); 1115 break; 1116 1117 case DT_INIT_ARRAYSZ: 1118 obj->init_array_num = dynp->d_un.d_val / sizeof(Elf_Addr); 1119 break; 1120 1121 case DT_FINI: 1122 obj->fini = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr); 1123 break; 1124 1125 case DT_FINI_ARRAY: 1126 obj->fini_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr); 1127 break; 1128 1129 case DT_FINI_ARRAYSZ: 1130 obj->fini_array_num = dynp->d_un.d_val / sizeof(Elf_Addr); 1131 break; 1132 1133 /* 1134 * Don't process DT_DEBUG on MIPS as the dynamic section 1135 * is mapped read-only. DT_MIPS_RLD_MAP is used instead. 1136 */ 1137 1138 #ifndef __mips__ 1139 case DT_DEBUG: 1140 if (!early) 1141 dbg("Filling in DT_DEBUG entry"); 1142 ((Elf_Dyn*)dynp)->d_un.d_ptr = (Elf_Addr) &r_debug; 1143 break; 1144 #endif 1145 1146 case DT_FLAGS: 1147 if (dynp->d_un.d_val & DF_ORIGIN) 1148 obj->z_origin = true; 1149 if (dynp->d_un.d_val & DF_SYMBOLIC) 1150 obj->symbolic = true; 1151 if (dynp->d_un.d_val & DF_TEXTREL) 1152 obj->textrel = true; 1153 if (dynp->d_un.d_val & DF_BIND_NOW) 1154 obj->bind_now = true; 1155 /*if (dynp->d_un.d_val & DF_STATIC_TLS) 1156 ;*/ 1157 break; 1158 #ifdef __mips__ 1159 case DT_MIPS_LOCAL_GOTNO: 1160 obj->local_gotno = dynp->d_un.d_val; 1161 break; 1162 1163 case DT_MIPS_SYMTABNO: 1164 obj->symtabno = dynp->d_un.d_val; 1165 break; 1166 1167 case DT_MIPS_GOTSYM: 1168 obj->gotsym = dynp->d_un.d_val; 1169 break; 1170 1171 case DT_MIPS_RLD_MAP: 1172 *((Elf_Addr *)(dynp->d_un.d_ptr)) = (Elf_Addr) &r_debug; 1173 break; 1174 #endif 1175 1176 #ifdef __powerpc64__ 1177 case DT_PPC64_GLINK: 1178 obj->glink = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr); 1179 break; 1180 #endif 1181 1182 case DT_FLAGS_1: 1183 if (dynp->d_un.d_val & DF_1_NOOPEN) 1184 obj->z_noopen = true; 1185 if (dynp->d_un.d_val & DF_1_ORIGIN) 1186 obj->z_origin = true; 1187 if (dynp->d_un.d_val & DF_1_GLOBAL) 1188 obj->z_global = true; 1189 if (dynp->d_un.d_val & DF_1_BIND_NOW) 1190 obj->bind_now = true; 1191 if (dynp->d_un.d_val & DF_1_NODELETE) 1192 obj->z_nodelete = true; 1193 if (dynp->d_un.d_val & DF_1_LOADFLTR) 1194 obj->z_loadfltr = true; 1195 if (dynp->d_un.d_val & DF_1_INTERPOSE) 1196 obj->z_interpose = true; 1197 if (dynp->d_un.d_val & DF_1_NODEFLIB) 1198 obj->z_nodeflib = true; 1199 break; 1200 1201 default: 1202 if (!early) { 1203 dbg("Ignoring d_tag %ld = %#lx", (long)dynp->d_tag, 1204 (long)dynp->d_tag); 1205 } 1206 break; 1207 } 1208 } 1209 1210 obj->traced = false; 1211 1212 if (plttype == DT_RELA) { 1213 obj->pltrela = (const Elf_Rela *) obj->pltrel; 1214 obj->pltrel = NULL; 1215 obj->pltrelasize = obj->pltrelsize; 1216 obj->pltrelsize = 0; 1217 } 1218 1219 /* Determine size of dynsym table (equal to nchains of sysv hash) */ 1220 if (obj->valid_hash_sysv) 1221 obj->dynsymcount = obj->nchains; 1222 else if (obj->valid_hash_gnu) { 1223 obj->dynsymcount = 0; 1224 for (bkt = 0; bkt < obj->nbuckets_gnu; bkt++) { 1225 if (obj->buckets_gnu[bkt] == 0) 1226 continue; 1227 hashval = &obj->chain_zero_gnu[obj->buckets_gnu[bkt]]; 1228 do 1229 obj->dynsymcount++; 1230 while ((*hashval++ & 1u) == 0); 1231 } 1232 obj->dynsymcount += obj->symndx_gnu; 1233 } 1234 } 1235 1236 static bool 1237 obj_resolve_origin(Obj_Entry *obj) 1238 { 1239 1240 if (obj->origin_path != NULL) 1241 return (true); 1242 obj->origin_path = xmalloc(PATH_MAX); 1243 return (rtld_dirname_abs(obj->path, obj->origin_path) != -1); 1244 } 1245 1246 static void 1247 digest_dynamic2(Obj_Entry *obj, const Elf_Dyn *dyn_rpath, 1248 const Elf_Dyn *dyn_soname, const Elf_Dyn *dyn_runpath) 1249 { 1250 1251 if (obj->z_origin && !obj_resolve_origin(obj)) 1252 rtld_die(); 1253 1254 if (dyn_runpath != NULL) { 1255 obj->runpath = (char *)obj->strtab + dyn_runpath->d_un.d_val; 1256 obj->runpath = origin_subst(obj, obj->runpath); 1257 } else if (dyn_rpath != NULL) { 1258 obj->rpath = (char *)obj->strtab + dyn_rpath->d_un.d_val; 1259 obj->rpath = origin_subst(obj, obj->rpath); 1260 } 1261 if (dyn_soname != NULL) 1262 object_add_name(obj, obj->strtab + dyn_soname->d_un.d_val); 1263 } 1264 1265 static void 1266 digest_dynamic(Obj_Entry *obj, int early) 1267 { 1268 const Elf_Dyn *dyn_rpath; 1269 const Elf_Dyn *dyn_soname; 1270 const Elf_Dyn *dyn_runpath; 1271 1272 digest_dynamic1(obj, early, &dyn_rpath, &dyn_soname, &dyn_runpath); 1273 digest_dynamic2(obj, dyn_rpath, dyn_soname, dyn_runpath); 1274 } 1275 1276 /* 1277 * Process a shared object's program header. This is used only for the 1278 * main program, when the kernel has already loaded the main program 1279 * into memory before calling the dynamic linker. It creates and 1280 * returns an Obj_Entry structure. 1281 */ 1282 static Obj_Entry * 1283 digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path) 1284 { 1285 Obj_Entry *obj; 1286 const Elf_Phdr *phlimit = phdr + phnum; 1287 const Elf_Phdr *ph; 1288 Elf_Addr note_start, note_end; 1289 int nsegs = 0; 1290 1291 obj = obj_new(); 1292 for (ph = phdr; ph < phlimit; ph++) { 1293 if (ph->p_type != PT_PHDR) 1294 continue; 1295 1296 obj->phdr = phdr; 1297 obj->phsize = ph->p_memsz; 1298 obj->relocbase = (caddr_t)phdr - ph->p_vaddr; 1299 break; 1300 } 1301 1302 obj->stack_flags = PF_X | PF_R | PF_W; 1303 1304 for (ph = phdr; ph < phlimit; ph++) { 1305 switch (ph->p_type) { 1306 1307 case PT_INTERP: 1308 obj->interp = (const char *)(ph->p_vaddr + obj->relocbase); 1309 break; 1310 1311 case PT_LOAD: 1312 if (nsegs == 0) { /* First load segment */ 1313 obj->vaddrbase = trunc_page(ph->p_vaddr); 1314 obj->mapbase = obj->vaddrbase + obj->relocbase; 1315 obj->textsize = round_page(ph->p_vaddr + ph->p_memsz) - 1316 obj->vaddrbase; 1317 } else { /* Last load segment */ 1318 obj->mapsize = round_page(ph->p_vaddr + ph->p_memsz) - 1319 obj->vaddrbase; 1320 } 1321 nsegs++; 1322 break; 1323 1324 case PT_DYNAMIC: 1325 obj->dynamic = (const Elf_Dyn *)(ph->p_vaddr + obj->relocbase); 1326 break; 1327 1328 case PT_TLS: 1329 obj->tlsindex = 1; 1330 obj->tlssize = ph->p_memsz; 1331 obj->tlsalign = ph->p_align; 1332 obj->tlsinitsize = ph->p_filesz; 1333 obj->tlsinit = (void*)(ph->p_vaddr + obj->relocbase); 1334 break; 1335 1336 case PT_GNU_STACK: 1337 obj->stack_flags = ph->p_flags; 1338 break; 1339 1340 case PT_GNU_RELRO: 1341 obj->relro_page = obj->relocbase + trunc_page(ph->p_vaddr); 1342 obj->relro_size = round_page(ph->p_memsz); 1343 break; 1344 1345 case PT_NOTE: 1346 note_start = (Elf_Addr)obj->relocbase + ph->p_vaddr; 1347 note_end = note_start + ph->p_filesz; 1348 digest_notes(obj, note_start, note_end); 1349 break; 1350 } 1351 } 1352 if (nsegs < 1) { 1353 _rtld_error("%s: too few PT_LOAD segments", path); 1354 return NULL; 1355 } 1356 1357 obj->entry = entry; 1358 return obj; 1359 } 1360 1361 void 1362 digest_notes(Obj_Entry *obj, Elf_Addr note_start, Elf_Addr note_end) 1363 { 1364 const Elf_Note *note; 1365 const char *note_name; 1366 uintptr_t p; 1367 1368 for (note = (const Elf_Note *)note_start; (Elf_Addr)note < note_end; 1369 note = (const Elf_Note *)((const char *)(note + 1) + 1370 roundup2(note->n_namesz, sizeof(Elf32_Addr)) + 1371 roundup2(note->n_descsz, sizeof(Elf32_Addr)))) { 1372 if (note->n_namesz != sizeof(NOTE_FREEBSD_VENDOR) || 1373 note->n_descsz != sizeof(int32_t)) 1374 continue; 1375 if (note->n_type != NT_FREEBSD_ABI_TAG && 1376 note->n_type != NT_FREEBSD_NOINIT_TAG) 1377 continue; 1378 note_name = (const char *)(note + 1); 1379 if (strncmp(NOTE_FREEBSD_VENDOR, note_name, 1380 sizeof(NOTE_FREEBSD_VENDOR)) != 0) 1381 continue; 1382 switch (note->n_type) { 1383 case NT_FREEBSD_ABI_TAG: 1384 /* FreeBSD osrel note */ 1385 p = (uintptr_t)(note + 1); 1386 p += roundup2(note->n_namesz, sizeof(Elf32_Addr)); 1387 obj->osrel = *(const int32_t *)(p); 1388 dbg("note osrel %d", obj->osrel); 1389 break; 1390 case NT_FREEBSD_NOINIT_TAG: 1391 /* FreeBSD 'crt does not call init' note */ 1392 obj->crt_no_init = true; 1393 dbg("note crt_no_init"); 1394 break; 1395 } 1396 } 1397 } 1398 1399 static Obj_Entry * 1400 dlcheck(void *handle) 1401 { 1402 Obj_Entry *obj; 1403 1404 TAILQ_FOREACH(obj, &obj_list, next) { 1405 if (obj == (Obj_Entry *) handle) 1406 break; 1407 } 1408 1409 if (obj == NULL || obj->refcount == 0 || obj->dl_refcount == 0) { 1410 _rtld_error("Invalid shared object handle %p", handle); 1411 return NULL; 1412 } 1413 return obj; 1414 } 1415 1416 /* 1417 * If the given object is already in the donelist, return true. Otherwise 1418 * add the object to the list and return false. 1419 */ 1420 static bool 1421 donelist_check(DoneList *dlp, const Obj_Entry *obj) 1422 { 1423 unsigned int i; 1424 1425 for (i = 0; i < dlp->num_used; i++) 1426 if (dlp->objs[i] == obj) 1427 return true; 1428 /* 1429 * Our donelist allocation should always be sufficient. But if 1430 * our threads locking isn't working properly, more shared objects 1431 * could have been loaded since we allocated the list. That should 1432 * never happen, but we'll handle it properly just in case it does. 1433 */ 1434 if (dlp->num_used < dlp->num_alloc) 1435 dlp->objs[dlp->num_used++] = obj; 1436 return false; 1437 } 1438 1439 /* 1440 * Hash function for symbol table lookup. Don't even think about changing 1441 * this. It is specified by the System V ABI. 1442 */ 1443 unsigned long 1444 elf_hash(const char *name) 1445 { 1446 const unsigned char *p = (const unsigned char *) name; 1447 unsigned long h = 0; 1448 unsigned long g; 1449 1450 while (*p != '\0') { 1451 h = (h << 4) + *p++; 1452 if ((g = h & 0xf0000000) != 0) 1453 h ^= g >> 24; 1454 h &= ~g; 1455 } 1456 return h; 1457 } 1458 1459 /* 1460 * The GNU hash function is the Daniel J. Bernstein hash clipped to 32 bits 1461 * unsigned in case it's implemented with a wider type. 1462 */ 1463 static uint32_t 1464 gnu_hash(const char *s) 1465 { 1466 uint32_t h; 1467 unsigned char c; 1468 1469 h = 5381; 1470 for (c = *s; c != '\0'; c = *++s) 1471 h = h * 33 + c; 1472 return (h & 0xffffffff); 1473 } 1474 1475 1476 /* 1477 * Find the library with the given name, and return its full pathname. 1478 * The returned string is dynamically allocated. Generates an error 1479 * message and returns NULL if the library cannot be found. 1480 * 1481 * If the second argument is non-NULL, then it refers to an already- 1482 * loaded shared object, whose library search path will be searched. 1483 * 1484 * If a library is successfully located via LD_LIBRARY_PATH_FDS, its 1485 * descriptor (which is close-on-exec) will be passed out via the third 1486 * argument. 1487 * 1488 * The search order is: 1489 * DT_RPATH in the referencing file _unless_ DT_RUNPATH is present (1) 1490 * DT_RPATH of the main object if DSO without defined DT_RUNPATH (1) 1491 * LD_LIBRARY_PATH 1492 * DT_RUNPATH in the referencing file 1493 * ldconfig hints (if -z nodefaultlib, filter out default library directories 1494 * from list) 1495 * /lib:/usr/lib _unless_ the referencing file is linked with -z nodefaultlib 1496 * 1497 * (1) Handled in digest_dynamic2 - rpath left NULL if runpath defined. 1498 */ 1499 static char * 1500 find_library(const char *xname, const Obj_Entry *refobj, int *fdp) 1501 { 1502 char *pathname; 1503 char *name; 1504 bool nodeflib, objgiven; 1505 1506 objgiven = refobj != NULL; 1507 if (strchr(xname, '/') != NULL) { /* Hard coded pathname */ 1508 if (xname[0] != '/' && !trust) { 1509 _rtld_error("Absolute pathname required for shared object \"%s\"", 1510 xname); 1511 return NULL; 1512 } 1513 return (origin_subst(__DECONST(Obj_Entry *, refobj), 1514 __DECONST(char *, xname))); 1515 } 1516 1517 if (libmap_disable || !objgiven || 1518 (name = lm_find(refobj->path, xname)) == NULL) 1519 name = (char *)xname; 1520 1521 dbg(" Searching for \"%s\"", name); 1522 1523 /* 1524 * If refobj->rpath != NULL, then refobj->runpath is NULL. Fall 1525 * back to pre-conforming behaviour if user requested so with 1526 * LD_LIBRARY_PATH_RPATH environment variable and ignore -z 1527 * nodeflib. 1528 */ 1529 if (objgiven && refobj->rpath != NULL && ld_library_path_rpath) { 1530 if ((pathname = search_library_path(name, ld_library_path)) != NULL || 1531 (refobj != NULL && 1532 (pathname = search_library_path(name, refobj->rpath)) != NULL) || 1533 (pathname = search_library_pathfds(name, ld_library_dirs, fdp)) != NULL || 1534 (pathname = search_library_path(name, gethints(false))) != NULL || 1535 (pathname = search_library_path(name, ld_standard_library_path)) != NULL) 1536 return (pathname); 1537 } else { 1538 nodeflib = objgiven ? refobj->z_nodeflib : false; 1539 if ((objgiven && 1540 (pathname = search_library_path(name, refobj->rpath)) != NULL) || 1541 (objgiven && refobj->runpath == NULL && refobj != obj_main && 1542 (pathname = search_library_path(name, obj_main->rpath)) != NULL) || 1543 (pathname = search_library_path(name, ld_library_path)) != NULL || 1544 (objgiven && 1545 (pathname = search_library_path(name, refobj->runpath)) != NULL) || 1546 (pathname = search_library_pathfds(name, ld_library_dirs, fdp)) != NULL || 1547 (pathname = search_library_path(name, gethints(nodeflib))) != NULL || 1548 (objgiven && !nodeflib && 1549 (pathname = search_library_path(name, ld_standard_library_path)) != NULL)) 1550 return (pathname); 1551 } 1552 1553 if (objgiven && refobj->path != NULL) { 1554 _rtld_error("Shared object \"%s\" not found, required by \"%s\"", 1555 name, basename(refobj->path)); 1556 } else { 1557 _rtld_error("Shared object \"%s\" not found", name); 1558 } 1559 return NULL; 1560 } 1561 1562 /* 1563 * Given a symbol number in a referencing object, find the corresponding 1564 * definition of the symbol. Returns a pointer to the symbol, or NULL if 1565 * no definition was found. Returns a pointer to the Obj_Entry of the 1566 * defining object via the reference parameter DEFOBJ_OUT. 1567 */ 1568 const Elf_Sym * 1569 find_symdef(unsigned long symnum, const Obj_Entry *refobj, 1570 const Obj_Entry **defobj_out, int flags, SymCache *cache, 1571 RtldLockState *lockstate) 1572 { 1573 const Elf_Sym *ref; 1574 const Elf_Sym *def; 1575 const Obj_Entry *defobj; 1576 SymLook req; 1577 const char *name; 1578 int res; 1579 1580 /* 1581 * If we have already found this symbol, get the information from 1582 * the cache. 1583 */ 1584 if (symnum >= refobj->dynsymcount) 1585 return NULL; /* Bad object */ 1586 if (cache != NULL && cache[symnum].sym != NULL) { 1587 *defobj_out = cache[symnum].obj; 1588 return cache[symnum].sym; 1589 } 1590 1591 ref = refobj->symtab + symnum; 1592 name = refobj->strtab + ref->st_name; 1593 def = NULL; 1594 defobj = NULL; 1595 1596 /* 1597 * We don't have to do a full scale lookup if the symbol is local. 1598 * We know it will bind to the instance in this load module; to 1599 * which we already have a pointer (ie ref). By not doing a lookup, 1600 * we not only improve performance, but it also avoids unresolvable 1601 * symbols when local symbols are not in the hash table. This has 1602 * been seen with the ia64 toolchain. 1603 */ 1604 if (ELF_ST_BIND(ref->st_info) != STB_LOCAL) { 1605 if (ELF_ST_TYPE(ref->st_info) == STT_SECTION) { 1606 _rtld_error("%s: Bogus symbol table entry %lu", refobj->path, 1607 symnum); 1608 } 1609 symlook_init(&req, name); 1610 req.flags = flags; 1611 req.ventry = fetch_ventry(refobj, symnum); 1612 req.lockstate = lockstate; 1613 res = symlook_default(&req, refobj); 1614 if (res == 0) { 1615 def = req.sym_out; 1616 defobj = req.defobj_out; 1617 } 1618 } else { 1619 def = ref; 1620 defobj = refobj; 1621 } 1622 1623 /* 1624 * If we found no definition and the reference is weak, treat the 1625 * symbol as having the value zero. 1626 */ 1627 if (def == NULL && ELF_ST_BIND(ref->st_info) == STB_WEAK) { 1628 def = &sym_zero; 1629 defobj = obj_main; 1630 } 1631 1632 if (def != NULL) { 1633 *defobj_out = defobj; 1634 /* Record the information in the cache to avoid subsequent lookups. */ 1635 if (cache != NULL) { 1636 cache[symnum].sym = def; 1637 cache[symnum].obj = defobj; 1638 } 1639 } else { 1640 if (refobj != &obj_rtld) 1641 _rtld_error("%s: Undefined symbol \"%s\"", refobj->path, name); 1642 } 1643 return def; 1644 } 1645 1646 /* 1647 * Return the search path from the ldconfig hints file, reading it if 1648 * necessary. If nostdlib is true, then the default search paths are 1649 * not added to result. 1650 * 1651 * Returns NULL if there are problems with the hints file, 1652 * or if the search path there is empty. 1653 */ 1654 static const char * 1655 gethints(bool nostdlib) 1656 { 1657 static char *hints, *filtered_path; 1658 static struct elfhints_hdr hdr; 1659 struct fill_search_info_args sargs, hargs; 1660 struct dl_serinfo smeta, hmeta, *SLPinfo, *hintinfo; 1661 struct dl_serpath *SLPpath, *hintpath; 1662 char *p; 1663 struct stat hint_stat; 1664 unsigned int SLPndx, hintndx, fndx, fcount; 1665 int fd; 1666 size_t flen; 1667 uint32_t dl; 1668 bool skip; 1669 1670 /* First call, read the hints file */ 1671 if (hints == NULL) { 1672 /* Keep from trying again in case the hints file is bad. */ 1673 hints = ""; 1674 1675 if ((fd = open(ld_elf_hints_path, O_RDONLY | O_CLOEXEC)) == -1) 1676 return (NULL); 1677 1678 /* 1679 * Check of hdr.dirlistlen value against type limit 1680 * intends to pacify static analyzers. Further 1681 * paranoia leads to checks that dirlist is fully 1682 * contained in the file range. 1683 */ 1684 if (read(fd, &hdr, sizeof hdr) != sizeof hdr || 1685 hdr.magic != ELFHINTS_MAGIC || 1686 hdr.version != 1 || hdr.dirlistlen > UINT_MAX / 2 || 1687 fstat(fd, &hint_stat) == -1) { 1688 cleanup1: 1689 close(fd); 1690 hdr.dirlistlen = 0; 1691 return (NULL); 1692 } 1693 dl = hdr.strtab; 1694 if (dl + hdr.dirlist < dl) 1695 goto cleanup1; 1696 dl += hdr.dirlist; 1697 if (dl + hdr.dirlistlen < dl) 1698 goto cleanup1; 1699 dl += hdr.dirlistlen; 1700 if (dl > hint_stat.st_size) 1701 goto cleanup1; 1702 p = xmalloc(hdr.dirlistlen + 1); 1703 1704 if (lseek(fd, hdr.strtab + hdr.dirlist, SEEK_SET) == -1 || 1705 read(fd, p, hdr.dirlistlen + 1) != 1706 (ssize_t)hdr.dirlistlen + 1 || p[hdr.dirlistlen] != '\0') { 1707 free(p); 1708 goto cleanup1; 1709 } 1710 hints = p; 1711 close(fd); 1712 } 1713 1714 /* 1715 * If caller agreed to receive list which includes the default 1716 * paths, we are done. Otherwise, if we still did not 1717 * calculated filtered result, do it now. 1718 */ 1719 if (!nostdlib) 1720 return (hints[0] != '\0' ? hints : NULL); 1721 if (filtered_path != NULL) 1722 goto filt_ret; 1723 1724 /* 1725 * Obtain the list of all configured search paths, and the 1726 * list of the default paths. 1727 * 1728 * First estimate the size of the results. 1729 */ 1730 smeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath); 1731 smeta.dls_cnt = 0; 1732 hmeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath); 1733 hmeta.dls_cnt = 0; 1734 1735 sargs.request = RTLD_DI_SERINFOSIZE; 1736 sargs.serinfo = &smeta; 1737 hargs.request = RTLD_DI_SERINFOSIZE; 1738 hargs.serinfo = &hmeta; 1739 1740 path_enumerate(ld_standard_library_path, fill_search_info, &sargs); 1741 path_enumerate(hints, fill_search_info, &hargs); 1742 1743 SLPinfo = xmalloc(smeta.dls_size); 1744 hintinfo = xmalloc(hmeta.dls_size); 1745 1746 /* 1747 * Next fetch both sets of paths. 1748 */ 1749 sargs.request = RTLD_DI_SERINFO; 1750 sargs.serinfo = SLPinfo; 1751 sargs.serpath = &SLPinfo->dls_serpath[0]; 1752 sargs.strspace = (char *)&SLPinfo->dls_serpath[smeta.dls_cnt]; 1753 1754 hargs.request = RTLD_DI_SERINFO; 1755 hargs.serinfo = hintinfo; 1756 hargs.serpath = &hintinfo->dls_serpath[0]; 1757 hargs.strspace = (char *)&hintinfo->dls_serpath[hmeta.dls_cnt]; 1758 1759 path_enumerate(ld_standard_library_path, fill_search_info, &sargs); 1760 path_enumerate(hints, fill_search_info, &hargs); 1761 1762 /* 1763 * Now calculate the difference between two sets, by excluding 1764 * standard paths from the full set. 1765 */ 1766 fndx = 0; 1767 fcount = 0; 1768 filtered_path = xmalloc(hdr.dirlistlen + 1); 1769 hintpath = &hintinfo->dls_serpath[0]; 1770 for (hintndx = 0; hintndx < hmeta.dls_cnt; hintndx++, hintpath++) { 1771 skip = false; 1772 SLPpath = &SLPinfo->dls_serpath[0]; 1773 /* 1774 * Check each standard path against current. 1775 */ 1776 for (SLPndx = 0; SLPndx < smeta.dls_cnt; SLPndx++, SLPpath++) { 1777 /* matched, skip the path */ 1778 if (!strcmp(hintpath->dls_name, SLPpath->dls_name)) { 1779 skip = true; 1780 break; 1781 } 1782 } 1783 if (skip) 1784 continue; 1785 /* 1786 * Not matched against any standard path, add the path 1787 * to result. Separate consequtive paths with ':'. 1788 */ 1789 if (fcount > 0) { 1790 filtered_path[fndx] = ':'; 1791 fndx++; 1792 } 1793 fcount++; 1794 flen = strlen(hintpath->dls_name); 1795 strncpy((filtered_path + fndx), hintpath->dls_name, flen); 1796 fndx += flen; 1797 } 1798 filtered_path[fndx] = '\0'; 1799 1800 free(SLPinfo); 1801 free(hintinfo); 1802 1803 filt_ret: 1804 return (filtered_path[0] != '\0' ? filtered_path : NULL); 1805 } 1806 1807 static void 1808 init_dag(Obj_Entry *root) 1809 { 1810 const Needed_Entry *needed; 1811 const Objlist_Entry *elm; 1812 DoneList donelist; 1813 1814 if (root->dag_inited) 1815 return; 1816 donelist_init(&donelist); 1817 1818 /* Root object belongs to own DAG. */ 1819 objlist_push_tail(&root->dldags, root); 1820 objlist_push_tail(&root->dagmembers, root); 1821 donelist_check(&donelist, root); 1822 1823 /* 1824 * Add dependencies of root object to DAG in breadth order 1825 * by exploiting the fact that each new object get added 1826 * to the tail of the dagmembers list. 1827 */ 1828 STAILQ_FOREACH(elm, &root->dagmembers, link) { 1829 for (needed = elm->obj->needed; needed != NULL; needed = needed->next) { 1830 if (needed->obj == NULL || donelist_check(&donelist, needed->obj)) 1831 continue; 1832 objlist_push_tail(&needed->obj->dldags, root); 1833 objlist_push_tail(&root->dagmembers, needed->obj); 1834 } 1835 } 1836 root->dag_inited = true; 1837 } 1838 1839 static void 1840 init_marker(Obj_Entry *marker) 1841 { 1842 1843 bzero(marker, sizeof(*marker)); 1844 marker->marker = true; 1845 } 1846 1847 Obj_Entry * 1848 globallist_curr(const Obj_Entry *obj) 1849 { 1850 1851 for (;;) { 1852 if (obj == NULL) 1853 return (NULL); 1854 if (!obj->marker) 1855 return (__DECONST(Obj_Entry *, obj)); 1856 obj = TAILQ_PREV(obj, obj_entry_q, next); 1857 } 1858 } 1859 1860 Obj_Entry * 1861 globallist_next(const Obj_Entry *obj) 1862 { 1863 1864 for (;;) { 1865 obj = TAILQ_NEXT(obj, next); 1866 if (obj == NULL) 1867 return (NULL); 1868 if (!obj->marker) 1869 return (__DECONST(Obj_Entry *, obj)); 1870 } 1871 } 1872 1873 /* Prevent the object from being unmapped while the bind lock is dropped. */ 1874 static void 1875 hold_object(Obj_Entry *obj) 1876 { 1877 1878 obj->holdcount++; 1879 } 1880 1881 static void 1882 unhold_object(Obj_Entry *obj) 1883 { 1884 1885 assert(obj->holdcount > 0); 1886 if (--obj->holdcount == 0 && obj->unholdfree) 1887 release_object(obj); 1888 } 1889 1890 static void 1891 process_z(Obj_Entry *root) 1892 { 1893 const Objlist_Entry *elm; 1894 Obj_Entry *obj; 1895 1896 /* 1897 * Walk over object DAG and process every dependent object 1898 * that is marked as DF_1_NODELETE or DF_1_GLOBAL. They need 1899 * to grow their own DAG. 1900 * 1901 * For DF_1_GLOBAL, DAG is required for symbol lookups in 1902 * symlook_global() to work. 1903 * 1904 * For DF_1_NODELETE, the DAG should have its reference upped. 1905 */ 1906 STAILQ_FOREACH(elm, &root->dagmembers, link) { 1907 obj = elm->obj; 1908 if (obj == NULL) 1909 continue; 1910 if (obj->z_nodelete && !obj->ref_nodel) { 1911 dbg("obj %s -z nodelete", obj->path); 1912 init_dag(obj); 1913 ref_dag(obj); 1914 obj->ref_nodel = true; 1915 } 1916 if (obj->z_global && objlist_find(&list_global, obj) == NULL) { 1917 dbg("obj %s -z global", obj->path); 1918 objlist_push_tail(&list_global, obj); 1919 init_dag(obj); 1920 } 1921 } 1922 } 1923 /* 1924 * Initialize the dynamic linker. The argument is the address at which 1925 * the dynamic linker has been mapped into memory. The primary task of 1926 * this function is to relocate the dynamic linker. 1927 */ 1928 static void 1929 init_rtld(caddr_t mapbase, Elf_Auxinfo **aux_info) 1930 { 1931 Obj_Entry objtmp; /* Temporary rtld object */ 1932 const Elf_Ehdr *ehdr; 1933 const Elf_Dyn *dyn_rpath; 1934 const Elf_Dyn *dyn_soname; 1935 const Elf_Dyn *dyn_runpath; 1936 1937 #ifdef RTLD_INIT_PAGESIZES_EARLY 1938 /* The page size is required by the dynamic memory allocator. */ 1939 init_pagesizes(aux_info); 1940 #endif 1941 1942 /* 1943 * Conjure up an Obj_Entry structure for the dynamic linker. 1944 * 1945 * The "path" member can't be initialized yet because string constants 1946 * cannot yet be accessed. Below we will set it correctly. 1947 */ 1948 memset(&objtmp, 0, sizeof(objtmp)); 1949 objtmp.path = NULL; 1950 objtmp.rtld = true; 1951 objtmp.mapbase = mapbase; 1952 #ifdef PIC 1953 objtmp.relocbase = mapbase; 1954 #endif 1955 1956 objtmp.dynamic = rtld_dynamic(&objtmp); 1957 digest_dynamic1(&objtmp, 1, &dyn_rpath, &dyn_soname, &dyn_runpath); 1958 assert(objtmp.needed == NULL); 1959 #if !defined(__mips__) 1960 /* MIPS has a bogus DT_TEXTREL. */ 1961 assert(!objtmp.textrel); 1962 #endif 1963 /* 1964 * Temporarily put the dynamic linker entry into the object list, so 1965 * that symbols can be found. 1966 */ 1967 relocate_objects(&objtmp, true, &objtmp, 0, NULL); 1968 1969 ehdr = (Elf_Ehdr *)mapbase; 1970 objtmp.phdr = (Elf_Phdr *)((char *)mapbase + ehdr->e_phoff); 1971 objtmp.phsize = ehdr->e_phnum * sizeof(objtmp.phdr[0]); 1972 1973 /* Initialize the object list. */ 1974 TAILQ_INIT(&obj_list); 1975 1976 /* Now that non-local variables can be accesses, copy out obj_rtld. */ 1977 memcpy(&obj_rtld, &objtmp, sizeof(obj_rtld)); 1978 1979 #ifndef RTLD_INIT_PAGESIZES_EARLY 1980 /* The page size is required by the dynamic memory allocator. */ 1981 init_pagesizes(aux_info); 1982 #endif 1983 1984 if (aux_info[AT_OSRELDATE] != NULL) 1985 osreldate = aux_info[AT_OSRELDATE]->a_un.a_val; 1986 1987 digest_dynamic2(&obj_rtld, dyn_rpath, dyn_soname, dyn_runpath); 1988 1989 /* Replace the path with a dynamically allocated copy. */ 1990 obj_rtld.path = xstrdup(ld_path_rtld); 1991 1992 r_debug.r_brk = r_debug_state; 1993 r_debug.r_state = RT_CONSISTENT; 1994 } 1995 1996 /* 1997 * Retrieve the array of supported page sizes. The kernel provides the page 1998 * sizes in increasing order. 1999 */ 2000 static void 2001 init_pagesizes(Elf_Auxinfo **aux_info) 2002 { 2003 static size_t psa[MAXPAGESIZES]; 2004 int mib[2]; 2005 size_t len, size; 2006 2007 if (aux_info[AT_PAGESIZES] != NULL && aux_info[AT_PAGESIZESLEN] != 2008 NULL) { 2009 size = aux_info[AT_PAGESIZESLEN]->a_un.a_val; 2010 pagesizes = aux_info[AT_PAGESIZES]->a_un.a_ptr; 2011 } else { 2012 len = 2; 2013 if (sysctlnametomib("hw.pagesizes", mib, &len) == 0) 2014 size = sizeof(psa); 2015 else { 2016 /* As a fallback, retrieve the base page size. */ 2017 size = sizeof(psa[0]); 2018 if (aux_info[AT_PAGESZ] != NULL) { 2019 psa[0] = aux_info[AT_PAGESZ]->a_un.a_val; 2020 goto psa_filled; 2021 } else { 2022 mib[0] = CTL_HW; 2023 mib[1] = HW_PAGESIZE; 2024 len = 2; 2025 } 2026 } 2027 if (sysctl(mib, len, psa, &size, NULL, 0) == -1) { 2028 _rtld_error("sysctl for hw.pagesize(s) failed"); 2029 rtld_die(); 2030 } 2031 psa_filled: 2032 pagesizes = psa; 2033 } 2034 npagesizes = size / sizeof(pagesizes[0]); 2035 /* Discard any invalid entries at the end of the array. */ 2036 while (npagesizes > 0 && pagesizes[npagesizes - 1] == 0) 2037 npagesizes--; 2038 } 2039 2040 /* 2041 * Add the init functions from a needed object list (and its recursive 2042 * needed objects) to "list". This is not used directly; it is a helper 2043 * function for initlist_add_objects(). The write lock must be held 2044 * when this function is called. 2045 */ 2046 static void 2047 initlist_add_neededs(Needed_Entry *needed, Objlist *list) 2048 { 2049 /* Recursively process the successor needed objects. */ 2050 if (needed->next != NULL) 2051 initlist_add_neededs(needed->next, list); 2052 2053 /* Process the current needed object. */ 2054 if (needed->obj != NULL) 2055 initlist_add_objects(needed->obj, needed->obj, list); 2056 } 2057 2058 /* 2059 * Scan all of the DAGs rooted in the range of objects from "obj" to 2060 * "tail" and add their init functions to "list". This recurses over 2061 * the DAGs and ensure the proper init ordering such that each object's 2062 * needed libraries are initialized before the object itself. At the 2063 * same time, this function adds the objects to the global finalization 2064 * list "list_fini" in the opposite order. The write lock must be 2065 * held when this function is called. 2066 */ 2067 static void 2068 initlist_add_objects(Obj_Entry *obj, Obj_Entry *tail, Objlist *list) 2069 { 2070 Obj_Entry *nobj; 2071 2072 if (obj->init_scanned || obj->init_done) 2073 return; 2074 obj->init_scanned = true; 2075 2076 /* Recursively process the successor objects. */ 2077 nobj = globallist_next(obj); 2078 if (nobj != NULL && obj != tail) 2079 initlist_add_objects(nobj, tail, list); 2080 2081 /* Recursively process the needed objects. */ 2082 if (obj->needed != NULL) 2083 initlist_add_neededs(obj->needed, list); 2084 if (obj->needed_filtees != NULL) 2085 initlist_add_neededs(obj->needed_filtees, list); 2086 if (obj->needed_aux_filtees != NULL) 2087 initlist_add_neededs(obj->needed_aux_filtees, list); 2088 2089 /* Add the object to the init list. */ 2090 if (obj->preinit_array != (Elf_Addr)NULL || obj->init != (Elf_Addr)NULL || 2091 obj->init_array != (Elf_Addr)NULL) 2092 objlist_push_tail(list, obj); 2093 2094 /* Add the object to the global fini list in the reverse order. */ 2095 if ((obj->fini != (Elf_Addr)NULL || obj->fini_array != (Elf_Addr)NULL) 2096 && !obj->on_fini_list) { 2097 objlist_push_head(&list_fini, obj); 2098 obj->on_fini_list = true; 2099 } 2100 } 2101 2102 #ifndef FPTR_TARGET 2103 #define FPTR_TARGET(f) ((Elf_Addr) (f)) 2104 #endif 2105 2106 static void 2107 free_needed_filtees(Needed_Entry *n) 2108 { 2109 Needed_Entry *needed, *needed1; 2110 2111 for (needed = n; needed != NULL; needed = needed->next) { 2112 if (needed->obj != NULL) { 2113 dlclose(needed->obj); 2114 needed->obj = NULL; 2115 } 2116 } 2117 for (needed = n; needed != NULL; needed = needed1) { 2118 needed1 = needed->next; 2119 free(needed); 2120 } 2121 } 2122 2123 static void 2124 unload_filtees(Obj_Entry *obj) 2125 { 2126 2127 free_needed_filtees(obj->needed_filtees); 2128 obj->needed_filtees = NULL; 2129 free_needed_filtees(obj->needed_aux_filtees); 2130 obj->needed_aux_filtees = NULL; 2131 obj->filtees_loaded = false; 2132 } 2133 2134 static void 2135 load_filtee1(Obj_Entry *obj, Needed_Entry *needed, int flags, 2136 RtldLockState *lockstate) 2137 { 2138 2139 for (; needed != NULL; needed = needed->next) { 2140 needed->obj = dlopen_object(obj->strtab + needed->name, -1, obj, 2141 flags, ((ld_loadfltr || obj->z_loadfltr) ? RTLD_NOW : RTLD_LAZY) | 2142 RTLD_LOCAL, lockstate); 2143 } 2144 } 2145 2146 static void 2147 load_filtees(Obj_Entry *obj, int flags, RtldLockState *lockstate) 2148 { 2149 2150 lock_restart_for_upgrade(lockstate); 2151 if (!obj->filtees_loaded) { 2152 load_filtee1(obj, obj->needed_filtees, flags, lockstate); 2153 load_filtee1(obj, obj->needed_aux_filtees, flags, lockstate); 2154 obj->filtees_loaded = true; 2155 } 2156 } 2157 2158 static int 2159 process_needed(Obj_Entry *obj, Needed_Entry *needed, int flags) 2160 { 2161 Obj_Entry *obj1; 2162 2163 for (; needed != NULL; needed = needed->next) { 2164 obj1 = needed->obj = load_object(obj->strtab + needed->name, -1, obj, 2165 flags & ~RTLD_LO_NOLOAD); 2166 if (obj1 == NULL && !ld_tracing && (flags & RTLD_LO_FILTEES) == 0) 2167 return (-1); 2168 } 2169 return (0); 2170 } 2171 2172 /* 2173 * Given a shared object, traverse its list of needed objects, and load 2174 * each of them. Returns 0 on success. Generates an error message and 2175 * returns -1 on failure. 2176 */ 2177 static int 2178 load_needed_objects(Obj_Entry *first, int flags) 2179 { 2180 Obj_Entry *obj; 2181 2182 for (obj = first; obj != NULL; obj = TAILQ_NEXT(obj, next)) { 2183 if (obj->marker) 2184 continue; 2185 if (process_needed(obj, obj->needed, flags) == -1) 2186 return (-1); 2187 } 2188 return (0); 2189 } 2190 2191 static int 2192 load_preload_objects(void) 2193 { 2194 char *p = ld_preload; 2195 Obj_Entry *obj; 2196 static const char delim[] = " \t:;"; 2197 2198 if (p == NULL) 2199 return 0; 2200 2201 p += strspn(p, delim); 2202 while (*p != '\0') { 2203 size_t len = strcspn(p, delim); 2204 char savech; 2205 2206 savech = p[len]; 2207 p[len] = '\0'; 2208 obj = load_object(p, -1, NULL, 0); 2209 if (obj == NULL) 2210 return -1; /* XXX - cleanup */ 2211 obj->z_interpose = true; 2212 p[len] = savech; 2213 p += len; 2214 p += strspn(p, delim); 2215 } 2216 LD_UTRACE(UTRACE_PRELOAD_FINISHED, NULL, NULL, 0, 0, NULL); 2217 return 0; 2218 } 2219 2220 static const char * 2221 printable_path(const char *path) 2222 { 2223 2224 return (path == NULL ? "<unknown>" : path); 2225 } 2226 2227 /* 2228 * Load a shared object into memory, if it is not already loaded. The 2229 * object may be specified by name or by user-supplied file descriptor 2230 * fd_u. In the later case, the fd_u descriptor is not closed, but its 2231 * duplicate is. 2232 * 2233 * Returns a pointer to the Obj_Entry for the object. Returns NULL 2234 * on failure. 2235 */ 2236 static Obj_Entry * 2237 load_object(const char *name, int fd_u, const Obj_Entry *refobj, int flags) 2238 { 2239 Obj_Entry *obj; 2240 int fd; 2241 struct stat sb; 2242 char *path; 2243 2244 fd = -1; 2245 if (name != NULL) { 2246 TAILQ_FOREACH(obj, &obj_list, next) { 2247 if (obj->marker || obj->doomed) 2248 continue; 2249 if (object_match_name(obj, name)) 2250 return (obj); 2251 } 2252 2253 path = find_library(name, refobj, &fd); 2254 if (path == NULL) 2255 return (NULL); 2256 } else 2257 path = NULL; 2258 2259 if (fd >= 0) { 2260 /* 2261 * search_library_pathfds() opens a fresh file descriptor for the 2262 * library, so there is no need to dup(). 2263 */ 2264 } else if (fd_u == -1) { 2265 /* 2266 * If we didn't find a match by pathname, or the name is not 2267 * supplied, open the file and check again by device and inode. 2268 * This avoids false mismatches caused by multiple links or ".." 2269 * in pathnames. 2270 * 2271 * To avoid a race, we open the file and use fstat() rather than 2272 * using stat(). 2273 */ 2274 if ((fd = open(path, O_RDONLY | O_CLOEXEC | O_VERIFY)) == -1) { 2275 _rtld_error("Cannot open \"%s\"", path); 2276 free(path); 2277 return (NULL); 2278 } 2279 } else { 2280 fd = fcntl(fd_u, F_DUPFD_CLOEXEC, 0); 2281 if (fd == -1) { 2282 _rtld_error("Cannot dup fd"); 2283 free(path); 2284 return (NULL); 2285 } 2286 } 2287 if (fstat(fd, &sb) == -1) { 2288 _rtld_error("Cannot fstat \"%s\"", printable_path(path)); 2289 close(fd); 2290 free(path); 2291 return NULL; 2292 } 2293 TAILQ_FOREACH(obj, &obj_list, next) { 2294 if (obj->marker || obj->doomed) 2295 continue; 2296 if (obj->ino == sb.st_ino && obj->dev == sb.st_dev) 2297 break; 2298 } 2299 if (obj != NULL && name != NULL) { 2300 object_add_name(obj, name); 2301 free(path); 2302 close(fd); 2303 return obj; 2304 } 2305 if (flags & RTLD_LO_NOLOAD) { 2306 free(path); 2307 close(fd); 2308 return (NULL); 2309 } 2310 2311 /* First use of this object, so we must map it in */ 2312 obj = do_load_object(fd, name, path, &sb, flags); 2313 if (obj == NULL) 2314 free(path); 2315 close(fd); 2316 2317 return obj; 2318 } 2319 2320 static Obj_Entry * 2321 do_load_object(int fd, const char *name, char *path, struct stat *sbp, 2322 int flags) 2323 { 2324 Obj_Entry *obj; 2325 struct statfs fs; 2326 2327 /* 2328 * but first, make sure that environment variables haven't been 2329 * used to circumvent the noexec flag on a filesystem. 2330 */ 2331 if (dangerous_ld_env) { 2332 if (fstatfs(fd, &fs) != 0) { 2333 _rtld_error("Cannot fstatfs \"%s\"", printable_path(path)); 2334 return NULL; 2335 } 2336 if (fs.f_flags & MNT_NOEXEC) { 2337 _rtld_error("Cannot execute objects on %s\n", fs.f_mntonname); 2338 return NULL; 2339 } 2340 } 2341 dbg("loading \"%s\"", printable_path(path)); 2342 obj = map_object(fd, printable_path(path), sbp); 2343 if (obj == NULL) 2344 return NULL; 2345 2346 /* 2347 * If DT_SONAME is present in the object, digest_dynamic2 already 2348 * added it to the object names. 2349 */ 2350 if (name != NULL) 2351 object_add_name(obj, name); 2352 obj->path = path; 2353 digest_dynamic(obj, 0); 2354 dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d", obj->path, 2355 obj->valid_hash_sysv, obj->valid_hash_gnu, obj->dynsymcount); 2356 if (obj->z_noopen && (flags & (RTLD_LO_DLOPEN | RTLD_LO_TRACE)) == 2357 RTLD_LO_DLOPEN) { 2358 dbg("refusing to load non-loadable \"%s\"", obj->path); 2359 _rtld_error("Cannot dlopen non-loadable %s", obj->path); 2360 munmap(obj->mapbase, obj->mapsize); 2361 obj_free(obj); 2362 return (NULL); 2363 } 2364 2365 obj->dlopened = (flags & RTLD_LO_DLOPEN) != 0; 2366 TAILQ_INSERT_TAIL(&obj_list, obj, next); 2367 obj_count++; 2368 obj_loads++; 2369 linkmap_add(obj); /* for GDB & dlinfo() */ 2370 max_stack_flags |= obj->stack_flags; 2371 2372 dbg(" %p .. %p: %s", obj->mapbase, 2373 obj->mapbase + obj->mapsize - 1, obj->path); 2374 if (obj->textrel) 2375 dbg(" WARNING: %s has impure text", obj->path); 2376 LD_UTRACE(UTRACE_LOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0, 2377 obj->path); 2378 2379 return obj; 2380 } 2381 2382 static Obj_Entry * 2383 obj_from_addr(const void *addr) 2384 { 2385 Obj_Entry *obj; 2386 2387 TAILQ_FOREACH(obj, &obj_list, next) { 2388 if (obj->marker) 2389 continue; 2390 if (addr < (void *) obj->mapbase) 2391 continue; 2392 if (addr < (void *) (obj->mapbase + obj->mapsize)) 2393 return obj; 2394 } 2395 return NULL; 2396 } 2397 2398 static void 2399 preinit_main(void) 2400 { 2401 Elf_Addr *preinit_addr; 2402 int index; 2403 2404 preinit_addr = (Elf_Addr *)obj_main->preinit_array; 2405 if (preinit_addr == NULL) 2406 return; 2407 2408 for (index = 0; index < obj_main->preinit_array_num; index++) { 2409 if (preinit_addr[index] != 0 && preinit_addr[index] != 1) { 2410 dbg("calling preinit function for %s at %p", obj_main->path, 2411 (void *)preinit_addr[index]); 2412 LD_UTRACE(UTRACE_INIT_CALL, obj_main, (void *)preinit_addr[index], 2413 0, 0, obj_main->path); 2414 call_init_pointer(obj_main, preinit_addr[index]); 2415 } 2416 } 2417 } 2418 2419 /* 2420 * Call the finalization functions for each of the objects in "list" 2421 * belonging to the DAG of "root" and referenced once. If NULL "root" 2422 * is specified, every finalization function will be called regardless 2423 * of the reference count and the list elements won't be freed. All of 2424 * the objects are expected to have non-NULL fini functions. 2425 */ 2426 static void 2427 objlist_call_fini(Objlist *list, Obj_Entry *root, RtldLockState *lockstate) 2428 { 2429 Objlist_Entry *elm; 2430 char *saved_msg; 2431 Elf_Addr *fini_addr; 2432 int index; 2433 2434 assert(root == NULL || root->refcount == 1); 2435 2436 if (root != NULL) 2437 root->doomed = true; 2438 2439 /* 2440 * Preserve the current error message since a fini function might 2441 * call into the dynamic linker and overwrite it. 2442 */ 2443 saved_msg = errmsg_save(); 2444 do { 2445 STAILQ_FOREACH(elm, list, link) { 2446 if (root != NULL && (elm->obj->refcount != 1 || 2447 objlist_find(&root->dagmembers, elm->obj) == NULL)) 2448 continue; 2449 /* Remove object from fini list to prevent recursive invocation. */ 2450 STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link); 2451 /* Ensure that new references cannot be acquired. */ 2452 elm->obj->doomed = true; 2453 2454 hold_object(elm->obj); 2455 lock_release(rtld_bind_lock, lockstate); 2456 /* 2457 * It is legal to have both DT_FINI and DT_FINI_ARRAY defined. 2458 * When this happens, DT_FINI_ARRAY is processed first. 2459 */ 2460 fini_addr = (Elf_Addr *)elm->obj->fini_array; 2461 if (fini_addr != NULL && elm->obj->fini_array_num > 0) { 2462 for (index = elm->obj->fini_array_num - 1; index >= 0; 2463 index--) { 2464 if (fini_addr[index] != 0 && fini_addr[index] != 1) { 2465 dbg("calling fini function for %s at %p", 2466 elm->obj->path, (void *)fini_addr[index]); 2467 LD_UTRACE(UTRACE_FINI_CALL, elm->obj, 2468 (void *)fini_addr[index], 0, 0, elm->obj->path); 2469 call_initfini_pointer(elm->obj, fini_addr[index]); 2470 } 2471 } 2472 } 2473 if (elm->obj->fini != (Elf_Addr)NULL) { 2474 dbg("calling fini function for %s at %p", elm->obj->path, 2475 (void *)elm->obj->fini); 2476 LD_UTRACE(UTRACE_FINI_CALL, elm->obj, (void *)elm->obj->fini, 2477 0, 0, elm->obj->path); 2478 call_initfini_pointer(elm->obj, elm->obj->fini); 2479 } 2480 wlock_acquire(rtld_bind_lock, lockstate); 2481 unhold_object(elm->obj); 2482 /* No need to free anything if process is going down. */ 2483 if (root != NULL) 2484 free(elm); 2485 /* 2486 * We must restart the list traversal after every fini call 2487 * because a dlclose() call from the fini function or from 2488 * another thread might have modified the reference counts. 2489 */ 2490 break; 2491 } 2492 } while (elm != NULL); 2493 errmsg_restore(saved_msg); 2494 } 2495 2496 /* 2497 * Call the initialization functions for each of the objects in 2498 * "list". All of the objects are expected to have non-NULL init 2499 * functions. 2500 */ 2501 static void 2502 objlist_call_init(Objlist *list, RtldLockState *lockstate) 2503 { 2504 Objlist_Entry *elm; 2505 Obj_Entry *obj; 2506 char *saved_msg; 2507 Elf_Addr *init_addr; 2508 int index; 2509 2510 /* 2511 * Clean init_scanned flag so that objects can be rechecked and 2512 * possibly initialized earlier if any of vectors called below 2513 * cause the change by using dlopen. 2514 */ 2515 TAILQ_FOREACH(obj, &obj_list, next) { 2516 if (obj->marker) 2517 continue; 2518 obj->init_scanned = false; 2519 } 2520 2521 /* 2522 * Preserve the current error message since an init function might 2523 * call into the dynamic linker and overwrite it. 2524 */ 2525 saved_msg = errmsg_save(); 2526 STAILQ_FOREACH(elm, list, link) { 2527 if (elm->obj->init_done) /* Initialized early. */ 2528 continue; 2529 /* 2530 * Race: other thread might try to use this object before current 2531 * one completes the initialization. Not much can be done here 2532 * without better locking. 2533 */ 2534 elm->obj->init_done = true; 2535 hold_object(elm->obj); 2536 lock_release(rtld_bind_lock, lockstate); 2537 2538 /* 2539 * It is legal to have both DT_INIT and DT_INIT_ARRAY defined. 2540 * When this happens, DT_INIT is processed first. 2541 */ 2542 if (elm->obj->init != (Elf_Addr)NULL) { 2543 dbg("calling init function for %s at %p", elm->obj->path, 2544 (void *)elm->obj->init); 2545 LD_UTRACE(UTRACE_INIT_CALL, elm->obj, (void *)elm->obj->init, 2546 0, 0, elm->obj->path); 2547 call_initfini_pointer(elm->obj, elm->obj->init); 2548 } 2549 init_addr = (Elf_Addr *)elm->obj->init_array; 2550 if (init_addr != NULL) { 2551 for (index = 0; index < elm->obj->init_array_num; index++) { 2552 if (init_addr[index] != 0 && init_addr[index] != 1) { 2553 dbg("calling init function for %s at %p", elm->obj->path, 2554 (void *)init_addr[index]); 2555 LD_UTRACE(UTRACE_INIT_CALL, elm->obj, 2556 (void *)init_addr[index], 0, 0, elm->obj->path); 2557 call_init_pointer(elm->obj, init_addr[index]); 2558 } 2559 } 2560 } 2561 wlock_acquire(rtld_bind_lock, lockstate); 2562 unhold_object(elm->obj); 2563 } 2564 errmsg_restore(saved_msg); 2565 } 2566 2567 static void 2568 objlist_clear(Objlist *list) 2569 { 2570 Objlist_Entry *elm; 2571 2572 while (!STAILQ_EMPTY(list)) { 2573 elm = STAILQ_FIRST(list); 2574 STAILQ_REMOVE_HEAD(list, link); 2575 free(elm); 2576 } 2577 } 2578 2579 static Objlist_Entry * 2580 objlist_find(Objlist *list, const Obj_Entry *obj) 2581 { 2582 Objlist_Entry *elm; 2583 2584 STAILQ_FOREACH(elm, list, link) 2585 if (elm->obj == obj) 2586 return elm; 2587 return NULL; 2588 } 2589 2590 static void 2591 objlist_init(Objlist *list) 2592 { 2593 STAILQ_INIT(list); 2594 } 2595 2596 static void 2597 objlist_push_head(Objlist *list, Obj_Entry *obj) 2598 { 2599 Objlist_Entry *elm; 2600 2601 elm = NEW(Objlist_Entry); 2602 elm->obj = obj; 2603 STAILQ_INSERT_HEAD(list, elm, link); 2604 } 2605 2606 static void 2607 objlist_push_tail(Objlist *list, Obj_Entry *obj) 2608 { 2609 Objlist_Entry *elm; 2610 2611 elm = NEW(Objlist_Entry); 2612 elm->obj = obj; 2613 STAILQ_INSERT_TAIL(list, elm, link); 2614 } 2615 2616 static void 2617 objlist_put_after(Objlist *list, Obj_Entry *listobj, Obj_Entry *obj) 2618 { 2619 Objlist_Entry *elm, *listelm; 2620 2621 STAILQ_FOREACH(listelm, list, link) { 2622 if (listelm->obj == listobj) 2623 break; 2624 } 2625 elm = NEW(Objlist_Entry); 2626 elm->obj = obj; 2627 if (listelm != NULL) 2628 STAILQ_INSERT_AFTER(list, listelm, elm, link); 2629 else 2630 STAILQ_INSERT_TAIL(list, elm, link); 2631 } 2632 2633 static void 2634 objlist_remove(Objlist *list, Obj_Entry *obj) 2635 { 2636 Objlist_Entry *elm; 2637 2638 if ((elm = objlist_find(list, obj)) != NULL) { 2639 STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link); 2640 free(elm); 2641 } 2642 } 2643 2644 /* 2645 * Relocate dag rooted in the specified object. 2646 * Returns 0 on success, or -1 on failure. 2647 */ 2648 2649 static int 2650 relocate_object_dag(Obj_Entry *root, bool bind_now, Obj_Entry *rtldobj, 2651 int flags, RtldLockState *lockstate) 2652 { 2653 Objlist_Entry *elm; 2654 int error; 2655 2656 error = 0; 2657 STAILQ_FOREACH(elm, &root->dagmembers, link) { 2658 error = relocate_object(elm->obj, bind_now, rtldobj, flags, 2659 lockstate); 2660 if (error == -1) 2661 break; 2662 } 2663 return (error); 2664 } 2665 2666 /* 2667 * Prepare for, or clean after, relocating an object marked with 2668 * DT_TEXTREL or DF_TEXTREL. Before relocating, all read-only 2669 * segments are remapped read-write. After relocations are done, the 2670 * segment's permissions are returned back to the modes specified in 2671 * the phdrs. If any relocation happened, or always for wired 2672 * program, COW is triggered. 2673 */ 2674 static int 2675 reloc_textrel_prot(Obj_Entry *obj, bool before) 2676 { 2677 const Elf_Phdr *ph; 2678 void *base; 2679 size_t l, sz; 2680 int prot; 2681 2682 for (l = obj->phsize / sizeof(*ph), ph = obj->phdr; l > 0; 2683 l--, ph++) { 2684 if (ph->p_type != PT_LOAD || (ph->p_flags & PF_W) != 0) 2685 continue; 2686 base = obj->relocbase + trunc_page(ph->p_vaddr); 2687 sz = round_page(ph->p_vaddr + ph->p_filesz) - 2688 trunc_page(ph->p_vaddr); 2689 prot = convert_prot(ph->p_flags) | (before ? PROT_WRITE : 0); 2690 if (mprotect(base, sz, prot) == -1) { 2691 _rtld_error("%s: Cannot write-%sable text segment: %s", 2692 obj->path, before ? "en" : "dis", 2693 rtld_strerror(errno)); 2694 return (-1); 2695 } 2696 } 2697 return (0); 2698 } 2699 2700 /* 2701 * Relocate single object. 2702 * Returns 0 on success, or -1 on failure. 2703 */ 2704 static int 2705 relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj, 2706 int flags, RtldLockState *lockstate) 2707 { 2708 2709 if (obj->relocated) 2710 return (0); 2711 obj->relocated = true; 2712 if (obj != rtldobj) 2713 dbg("relocating \"%s\"", obj->path); 2714 2715 if (obj->symtab == NULL || obj->strtab == NULL || 2716 !(obj->valid_hash_sysv || obj->valid_hash_gnu)) { 2717 _rtld_error("%s: Shared object has no run-time symbol table", 2718 obj->path); 2719 return (-1); 2720 } 2721 2722 /* There are relocations to the write-protected text segment. */ 2723 if (obj->textrel && reloc_textrel_prot(obj, true) != 0) 2724 return (-1); 2725 2726 /* Process the non-PLT non-IFUNC relocations. */ 2727 if (reloc_non_plt(obj, rtldobj, flags, lockstate)) 2728 return (-1); 2729 2730 /* Re-protected the text segment. */ 2731 if (obj->textrel && reloc_textrel_prot(obj, false) != 0) 2732 return (-1); 2733 2734 /* Set the special PLT or GOT entries. */ 2735 init_pltgot(obj); 2736 2737 /* Process the PLT relocations. */ 2738 if (reloc_plt(obj) == -1) 2739 return (-1); 2740 /* Relocate the jump slots if we are doing immediate binding. */ 2741 if (obj->bind_now || bind_now) 2742 if (reloc_jmpslots(obj, flags, lockstate) == -1) 2743 return (-1); 2744 2745 /* 2746 * Process the non-PLT IFUNC relocations. The relocations are 2747 * processed in two phases, because IFUNC resolvers may 2748 * reference other symbols, which must be readily processed 2749 * before resolvers are called. 2750 */ 2751 if (obj->non_plt_gnu_ifunc && 2752 reloc_non_plt(obj, rtldobj, flags | SYMLOOK_IFUNC, lockstate)) 2753 return (-1); 2754 2755 if (!obj->mainprog && obj_enforce_relro(obj) == -1) 2756 return (-1); 2757 2758 /* 2759 * Set up the magic number and version in the Obj_Entry. These 2760 * were checked in the crt1.o from the original ElfKit, so we 2761 * set them for backward compatibility. 2762 */ 2763 obj->magic = RTLD_MAGIC; 2764 obj->version = RTLD_VERSION; 2765 2766 return (0); 2767 } 2768 2769 /* 2770 * Relocate newly-loaded shared objects. The argument is a pointer to 2771 * the Obj_Entry for the first such object. All objects from the first 2772 * to the end of the list of objects are relocated. Returns 0 on success, 2773 * or -1 on failure. 2774 */ 2775 static int 2776 relocate_objects(Obj_Entry *first, bool bind_now, Obj_Entry *rtldobj, 2777 int flags, RtldLockState *lockstate) 2778 { 2779 Obj_Entry *obj; 2780 int error; 2781 2782 for (error = 0, obj = first; obj != NULL; 2783 obj = TAILQ_NEXT(obj, next)) { 2784 if (obj->marker) 2785 continue; 2786 error = relocate_object(obj, bind_now, rtldobj, flags, 2787 lockstate); 2788 if (error == -1) 2789 break; 2790 } 2791 return (error); 2792 } 2793 2794 /* 2795 * The handling of R_MACHINE_IRELATIVE relocations and jumpslots 2796 * referencing STT_GNU_IFUNC symbols is postponed till the other 2797 * relocations are done. The indirect functions specified as 2798 * ifunc are allowed to call other symbols, so we need to have 2799 * objects relocated before asking for resolution from indirects. 2800 * 2801 * The R_MACHINE_IRELATIVE slots are resolved in greedy fashion, 2802 * instead of the usual lazy handling of PLT slots. It is 2803 * consistent with how GNU does it. 2804 */ 2805 static int 2806 resolve_object_ifunc(Obj_Entry *obj, bool bind_now, int flags, 2807 RtldLockState *lockstate) 2808 { 2809 if (obj->irelative && reloc_iresolve(obj, lockstate) == -1) 2810 return (-1); 2811 if ((obj->bind_now || bind_now) && obj->gnu_ifunc && 2812 reloc_gnu_ifunc(obj, flags, lockstate) == -1) 2813 return (-1); 2814 return (0); 2815 } 2816 2817 static int 2818 resolve_objects_ifunc(Obj_Entry *first, bool bind_now, int flags, 2819 RtldLockState *lockstate) 2820 { 2821 Obj_Entry *obj; 2822 2823 for (obj = first; obj != NULL; obj = TAILQ_NEXT(obj, next)) { 2824 if (obj->marker) 2825 continue; 2826 if (resolve_object_ifunc(obj, bind_now, flags, lockstate) == -1) 2827 return (-1); 2828 } 2829 return (0); 2830 } 2831 2832 static int 2833 initlist_objects_ifunc(Objlist *list, bool bind_now, int flags, 2834 RtldLockState *lockstate) 2835 { 2836 Objlist_Entry *elm; 2837 2838 STAILQ_FOREACH(elm, list, link) { 2839 if (resolve_object_ifunc(elm->obj, bind_now, flags, 2840 lockstate) == -1) 2841 return (-1); 2842 } 2843 return (0); 2844 } 2845 2846 /* 2847 * Cleanup procedure. It will be called (by the atexit mechanism) just 2848 * before the process exits. 2849 */ 2850 static void 2851 rtld_exit(void) 2852 { 2853 RtldLockState lockstate; 2854 2855 wlock_acquire(rtld_bind_lock, &lockstate); 2856 dbg("rtld_exit()"); 2857 objlist_call_fini(&list_fini, NULL, &lockstate); 2858 /* No need to remove the items from the list, since we are exiting. */ 2859 if (!libmap_disable) 2860 lm_fini(); 2861 lock_release(rtld_bind_lock, &lockstate); 2862 } 2863 2864 /* 2865 * Iterate over a search path, translate each element, and invoke the 2866 * callback on the result. 2867 */ 2868 static void * 2869 path_enumerate(const char *path, path_enum_proc callback, void *arg) 2870 { 2871 const char *trans; 2872 if (path == NULL) 2873 return (NULL); 2874 2875 path += strspn(path, ":;"); 2876 while (*path != '\0') { 2877 size_t len; 2878 char *res; 2879 2880 len = strcspn(path, ":;"); 2881 trans = lm_findn(NULL, path, len); 2882 if (trans) 2883 res = callback(trans, strlen(trans), arg); 2884 else 2885 res = callback(path, len, arg); 2886 2887 if (res != NULL) 2888 return (res); 2889 2890 path += len; 2891 path += strspn(path, ":;"); 2892 } 2893 2894 return (NULL); 2895 } 2896 2897 struct try_library_args { 2898 const char *name; 2899 size_t namelen; 2900 char *buffer; 2901 size_t buflen; 2902 }; 2903 2904 static void * 2905 try_library_path(const char *dir, size_t dirlen, void *param) 2906 { 2907 struct try_library_args *arg; 2908 2909 arg = param; 2910 if (*dir == '/' || trust) { 2911 char *pathname; 2912 2913 if (dirlen + 1 + arg->namelen + 1 > arg->buflen) 2914 return (NULL); 2915 2916 pathname = arg->buffer; 2917 strncpy(pathname, dir, dirlen); 2918 pathname[dirlen] = '/'; 2919 strcpy(pathname + dirlen + 1, arg->name); 2920 2921 dbg(" Trying \"%s\"", pathname); 2922 if (access(pathname, F_OK) == 0) { /* We found it */ 2923 pathname = xmalloc(dirlen + 1 + arg->namelen + 1); 2924 strcpy(pathname, arg->buffer); 2925 return (pathname); 2926 } 2927 } 2928 return (NULL); 2929 } 2930 2931 static char * 2932 search_library_path(const char *name, const char *path) 2933 { 2934 char *p; 2935 struct try_library_args arg; 2936 2937 if (path == NULL) 2938 return NULL; 2939 2940 arg.name = name; 2941 arg.namelen = strlen(name); 2942 arg.buffer = xmalloc(PATH_MAX); 2943 arg.buflen = PATH_MAX; 2944 2945 p = path_enumerate(path, try_library_path, &arg); 2946 2947 free(arg.buffer); 2948 2949 return (p); 2950 } 2951 2952 2953 /* 2954 * Finds the library with the given name using the directory descriptors 2955 * listed in the LD_LIBRARY_PATH_FDS environment variable. 2956 * 2957 * Returns a freshly-opened close-on-exec file descriptor for the library, 2958 * or -1 if the library cannot be found. 2959 */ 2960 static char * 2961 search_library_pathfds(const char *name, const char *path, int *fdp) 2962 { 2963 char *envcopy, *fdstr, *found, *last_token; 2964 size_t len; 2965 int dirfd, fd; 2966 2967 dbg("%s('%s', '%s', fdp)", __func__, name, path); 2968 2969 /* Don't load from user-specified libdirs into setuid binaries. */ 2970 if (!trust) 2971 return (NULL); 2972 2973 /* We can't do anything if LD_LIBRARY_PATH_FDS isn't set. */ 2974 if (path == NULL) 2975 return (NULL); 2976 2977 /* LD_LIBRARY_PATH_FDS only works with relative paths. */ 2978 if (name[0] == '/') { 2979 dbg("Absolute path (%s) passed to %s", name, __func__); 2980 return (NULL); 2981 } 2982 2983 /* 2984 * Use strtok_r() to walk the FD:FD:FD list. This requires a local 2985 * copy of the path, as strtok_r rewrites separator tokens 2986 * with '\0'. 2987 */ 2988 found = NULL; 2989 envcopy = xstrdup(path); 2990 for (fdstr = strtok_r(envcopy, ":", &last_token); fdstr != NULL; 2991 fdstr = strtok_r(NULL, ":", &last_token)) { 2992 dirfd = parse_libdir(fdstr); 2993 if (dirfd < 0) 2994 break; 2995 fd = __sys_openat(dirfd, name, O_RDONLY | O_CLOEXEC | O_VERIFY); 2996 if (fd >= 0) { 2997 *fdp = fd; 2998 len = strlen(fdstr) + strlen(name) + 3; 2999 found = xmalloc(len); 3000 if (rtld_snprintf(found, len, "#%d/%s", dirfd, name) < 0) { 3001 _rtld_error("error generating '%d/%s'", 3002 dirfd, name); 3003 rtld_die(); 3004 } 3005 dbg("open('%s') => %d", found, fd); 3006 break; 3007 } 3008 } 3009 free(envcopy); 3010 3011 return (found); 3012 } 3013 3014 3015 int 3016 dlclose(void *handle) 3017 { 3018 Obj_Entry *root; 3019 RtldLockState lockstate; 3020 3021 wlock_acquire(rtld_bind_lock, &lockstate); 3022 root = dlcheck(handle); 3023 if (root == NULL) { 3024 lock_release(rtld_bind_lock, &lockstate); 3025 return -1; 3026 } 3027 LD_UTRACE(UTRACE_DLCLOSE_START, handle, NULL, 0, root->dl_refcount, 3028 root->path); 3029 3030 /* Unreference the object and its dependencies. */ 3031 root->dl_refcount--; 3032 3033 if (root->refcount == 1) { 3034 /* 3035 * The object will be no longer referenced, so we must unload it. 3036 * First, call the fini functions. 3037 */ 3038 objlist_call_fini(&list_fini, root, &lockstate); 3039 3040 unref_dag(root); 3041 3042 /* Finish cleaning up the newly-unreferenced objects. */ 3043 GDB_STATE(RT_DELETE,&root->linkmap); 3044 unload_object(root); 3045 GDB_STATE(RT_CONSISTENT,NULL); 3046 } else 3047 unref_dag(root); 3048 3049 LD_UTRACE(UTRACE_DLCLOSE_STOP, handle, NULL, 0, 0, NULL); 3050 lock_release(rtld_bind_lock, &lockstate); 3051 return 0; 3052 } 3053 3054 char * 3055 dlerror(void) 3056 { 3057 char *msg = error_message; 3058 error_message = NULL; 3059 return msg; 3060 } 3061 3062 /* 3063 * This function is deprecated and has no effect. 3064 */ 3065 void 3066 dllockinit(void *context, 3067 void *(*lock_create)(void *context), 3068 void (*rlock_acquire)(void *lock), 3069 void (*wlock_acquire)(void *lock), 3070 void (*lock_release)(void *lock), 3071 void (*lock_destroy)(void *lock), 3072 void (*context_destroy)(void *context)) 3073 { 3074 static void *cur_context; 3075 static void (*cur_context_destroy)(void *); 3076 3077 /* Just destroy the context from the previous call, if necessary. */ 3078 if (cur_context_destroy != NULL) 3079 cur_context_destroy(cur_context); 3080 cur_context = context; 3081 cur_context_destroy = context_destroy; 3082 } 3083 3084 void * 3085 dlopen(const char *name, int mode) 3086 { 3087 3088 return (rtld_dlopen(name, -1, mode)); 3089 } 3090 3091 void * 3092 fdlopen(int fd, int mode) 3093 { 3094 3095 return (rtld_dlopen(NULL, fd, mode)); 3096 } 3097 3098 static void * 3099 rtld_dlopen(const char *name, int fd, int mode) 3100 { 3101 RtldLockState lockstate; 3102 int lo_flags; 3103 3104 LD_UTRACE(UTRACE_DLOPEN_START, NULL, NULL, 0, mode, name); 3105 ld_tracing = (mode & RTLD_TRACE) == 0 ? NULL : "1"; 3106 if (ld_tracing != NULL) { 3107 rlock_acquire(rtld_bind_lock, &lockstate); 3108 if (sigsetjmp(lockstate.env, 0) != 0) 3109 lock_upgrade(rtld_bind_lock, &lockstate); 3110 environ = (char **)*get_program_var_addr("environ", &lockstate); 3111 lock_release(rtld_bind_lock, &lockstate); 3112 } 3113 lo_flags = RTLD_LO_DLOPEN; 3114 if (mode & RTLD_NODELETE) 3115 lo_flags |= RTLD_LO_NODELETE; 3116 if (mode & RTLD_NOLOAD) 3117 lo_flags |= RTLD_LO_NOLOAD; 3118 if (ld_tracing != NULL) 3119 lo_flags |= RTLD_LO_TRACE; 3120 3121 return (dlopen_object(name, fd, obj_main, lo_flags, 3122 mode & (RTLD_MODEMASK | RTLD_GLOBAL), NULL)); 3123 } 3124 3125 static void 3126 dlopen_cleanup(Obj_Entry *obj) 3127 { 3128 3129 obj->dl_refcount--; 3130 unref_dag(obj); 3131 if (obj->refcount == 0) 3132 unload_object(obj); 3133 } 3134 3135 static Obj_Entry * 3136 dlopen_object(const char *name, int fd, Obj_Entry *refobj, int lo_flags, 3137 int mode, RtldLockState *lockstate) 3138 { 3139 Obj_Entry *old_obj_tail; 3140 Obj_Entry *obj; 3141 Objlist initlist; 3142 RtldLockState mlockstate; 3143 int result; 3144 3145 objlist_init(&initlist); 3146 3147 if (lockstate == NULL && !(lo_flags & RTLD_LO_EARLY)) { 3148 wlock_acquire(rtld_bind_lock, &mlockstate); 3149 lockstate = &mlockstate; 3150 } 3151 GDB_STATE(RT_ADD,NULL); 3152 3153 old_obj_tail = globallist_curr(TAILQ_LAST(&obj_list, obj_entry_q)); 3154 obj = NULL; 3155 if (name == NULL && fd == -1) { 3156 obj = obj_main; 3157 obj->refcount++; 3158 } else { 3159 obj = load_object(name, fd, refobj, lo_flags); 3160 } 3161 3162 if (obj) { 3163 obj->dl_refcount++; 3164 if (mode & RTLD_GLOBAL && objlist_find(&list_global, obj) == NULL) 3165 objlist_push_tail(&list_global, obj); 3166 if (globallist_next(old_obj_tail) != NULL) { 3167 /* We loaded something new. */ 3168 assert(globallist_next(old_obj_tail) == obj); 3169 result = load_needed_objects(obj, 3170 lo_flags & (RTLD_LO_DLOPEN | RTLD_LO_EARLY)); 3171 init_dag(obj); 3172 ref_dag(obj); 3173 if (result != -1) 3174 result = rtld_verify_versions(&obj->dagmembers); 3175 if (result != -1 && ld_tracing) 3176 goto trace; 3177 if (result == -1 || relocate_object_dag(obj, 3178 (mode & RTLD_MODEMASK) == RTLD_NOW, &obj_rtld, 3179 (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0, 3180 lockstate) == -1) { 3181 dlopen_cleanup(obj); 3182 obj = NULL; 3183 } else if (lo_flags & RTLD_LO_EARLY) { 3184 /* 3185 * Do not call the init functions for early loaded 3186 * filtees. The image is still not initialized enough 3187 * for them to work. 3188 * 3189 * Our object is found by the global object list and 3190 * will be ordered among all init calls done right 3191 * before transferring control to main. 3192 */ 3193 } else { 3194 /* Make list of init functions to call. */ 3195 initlist_add_objects(obj, obj, &initlist); 3196 } 3197 /* 3198 * Process all no_delete or global objects here, given 3199 * them own DAGs to prevent their dependencies from being 3200 * unloaded. This has to be done after we have loaded all 3201 * of the dependencies, so that we do not miss any. 3202 */ 3203 if (obj != NULL) 3204 process_z(obj); 3205 } else { 3206 /* 3207 * Bump the reference counts for objects on this DAG. If 3208 * this is the first dlopen() call for the object that was 3209 * already loaded as a dependency, initialize the dag 3210 * starting at it. 3211 */ 3212 init_dag(obj); 3213 ref_dag(obj); 3214 3215 if ((lo_flags & RTLD_LO_TRACE) != 0) 3216 goto trace; 3217 } 3218 if (obj != NULL && ((lo_flags & RTLD_LO_NODELETE) != 0 || 3219 obj->z_nodelete) && !obj->ref_nodel) { 3220 dbg("obj %s nodelete", obj->path); 3221 ref_dag(obj); 3222 obj->z_nodelete = obj->ref_nodel = true; 3223 } 3224 } 3225 3226 LD_UTRACE(UTRACE_DLOPEN_STOP, obj, NULL, 0, obj ? obj->dl_refcount : 0, 3227 name); 3228 GDB_STATE(RT_CONSISTENT,obj ? &obj->linkmap : NULL); 3229 3230 if (!(lo_flags & RTLD_LO_EARLY)) { 3231 map_stacks_exec(lockstate); 3232 } 3233 3234 if (initlist_objects_ifunc(&initlist, (mode & RTLD_MODEMASK) == RTLD_NOW, 3235 (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0, 3236 lockstate) == -1) { 3237 objlist_clear(&initlist); 3238 dlopen_cleanup(obj); 3239 if (lockstate == &mlockstate) 3240 lock_release(rtld_bind_lock, lockstate); 3241 return (NULL); 3242 } 3243 3244 if (!(lo_flags & RTLD_LO_EARLY)) { 3245 /* Call the init functions. */ 3246 objlist_call_init(&initlist, lockstate); 3247 } 3248 objlist_clear(&initlist); 3249 if (lockstate == &mlockstate) 3250 lock_release(rtld_bind_lock, lockstate); 3251 return obj; 3252 trace: 3253 trace_loaded_objects(obj); 3254 if (lockstate == &mlockstate) 3255 lock_release(rtld_bind_lock, lockstate); 3256 exit(0); 3257 } 3258 3259 static void * 3260 do_dlsym(void *handle, const char *name, void *retaddr, const Ver_Entry *ve, 3261 int flags) 3262 { 3263 DoneList donelist; 3264 const Obj_Entry *obj, *defobj; 3265 const Elf_Sym *def; 3266 SymLook req; 3267 RtldLockState lockstate; 3268 tls_index ti; 3269 void *sym; 3270 int res; 3271 3272 def = NULL; 3273 defobj = NULL; 3274 symlook_init(&req, name); 3275 req.ventry = ve; 3276 req.flags = flags | SYMLOOK_IN_PLT; 3277 req.lockstate = &lockstate; 3278 3279 LD_UTRACE(UTRACE_DLSYM_START, handle, NULL, 0, 0, name); 3280 rlock_acquire(rtld_bind_lock, &lockstate); 3281 if (sigsetjmp(lockstate.env, 0) != 0) 3282 lock_upgrade(rtld_bind_lock, &lockstate); 3283 if (handle == NULL || handle == RTLD_NEXT || 3284 handle == RTLD_DEFAULT || handle == RTLD_SELF) { 3285 3286 if ((obj = obj_from_addr(retaddr)) == NULL) { 3287 _rtld_error("Cannot determine caller's shared object"); 3288 lock_release(rtld_bind_lock, &lockstate); 3289 LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name); 3290 return NULL; 3291 } 3292 if (handle == NULL) { /* Just the caller's shared object. */ 3293 res = symlook_obj(&req, obj); 3294 if (res == 0) { 3295 def = req.sym_out; 3296 defobj = req.defobj_out; 3297 } 3298 } else if (handle == RTLD_NEXT || /* Objects after caller's */ 3299 handle == RTLD_SELF) { /* ... caller included */ 3300 if (handle == RTLD_NEXT) 3301 obj = globallist_next(obj); 3302 for (; obj != NULL; obj = TAILQ_NEXT(obj, next)) { 3303 if (obj->marker) 3304 continue; 3305 res = symlook_obj(&req, obj); 3306 if (res == 0) { 3307 if (def == NULL || 3308 ELF_ST_BIND(req.sym_out->st_info) != STB_WEAK) { 3309 def = req.sym_out; 3310 defobj = req.defobj_out; 3311 if (ELF_ST_BIND(def->st_info) != STB_WEAK) 3312 break; 3313 } 3314 } 3315 } 3316 /* 3317 * Search the dynamic linker itself, and possibly resolve the 3318 * symbol from there. This is how the application links to 3319 * dynamic linker services such as dlopen. 3320 */ 3321 if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) { 3322 res = symlook_obj(&req, &obj_rtld); 3323 if (res == 0) { 3324 def = req.sym_out; 3325 defobj = req.defobj_out; 3326 } 3327 } 3328 } else { 3329 assert(handle == RTLD_DEFAULT); 3330 res = symlook_default(&req, obj); 3331 if (res == 0) { 3332 defobj = req.defobj_out; 3333 def = req.sym_out; 3334 } 3335 } 3336 } else { 3337 if ((obj = dlcheck(handle)) == NULL) { 3338 lock_release(rtld_bind_lock, &lockstate); 3339 LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name); 3340 return NULL; 3341 } 3342 3343 donelist_init(&donelist); 3344 if (obj->mainprog) { 3345 /* Handle obtained by dlopen(NULL, ...) implies global scope. */ 3346 res = symlook_global(&req, &donelist); 3347 if (res == 0) { 3348 def = req.sym_out; 3349 defobj = req.defobj_out; 3350 } 3351 /* 3352 * Search the dynamic linker itself, and possibly resolve the 3353 * symbol from there. This is how the application links to 3354 * dynamic linker services such as dlopen. 3355 */ 3356 if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) { 3357 res = symlook_obj(&req, &obj_rtld); 3358 if (res == 0) { 3359 def = req.sym_out; 3360 defobj = req.defobj_out; 3361 } 3362 } 3363 } 3364 else { 3365 /* Search the whole DAG rooted at the given object. */ 3366 res = symlook_list(&req, &obj->dagmembers, &donelist); 3367 if (res == 0) { 3368 def = req.sym_out; 3369 defobj = req.defobj_out; 3370 } 3371 } 3372 } 3373 3374 if (def != NULL) { 3375 lock_release(rtld_bind_lock, &lockstate); 3376 3377 /* 3378 * The value required by the caller is derived from the value 3379 * of the symbol. this is simply the relocated value of the 3380 * symbol. 3381 */ 3382 if (ELF_ST_TYPE(def->st_info) == STT_FUNC) 3383 sym = make_function_pointer(def, defobj); 3384 else if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) 3385 sym = rtld_resolve_ifunc(defobj, def); 3386 else if (ELF_ST_TYPE(def->st_info) == STT_TLS) { 3387 ti.ti_module = defobj->tlsindex; 3388 ti.ti_offset = def->st_value; 3389 sym = __tls_get_addr(&ti); 3390 } else 3391 sym = defobj->relocbase + def->st_value; 3392 LD_UTRACE(UTRACE_DLSYM_STOP, handle, sym, 0, 0, name); 3393 return (sym); 3394 } 3395 3396 _rtld_error("Undefined symbol \"%s\"", name); 3397 lock_release(rtld_bind_lock, &lockstate); 3398 LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name); 3399 return NULL; 3400 } 3401 3402 void * 3403 dlsym(void *handle, const char *name) 3404 { 3405 return do_dlsym(handle, name, __builtin_return_address(0), NULL, 3406 SYMLOOK_DLSYM); 3407 } 3408 3409 dlfunc_t 3410 dlfunc(void *handle, const char *name) 3411 { 3412 union { 3413 void *d; 3414 dlfunc_t f; 3415 } rv; 3416 3417 rv.d = do_dlsym(handle, name, __builtin_return_address(0), NULL, 3418 SYMLOOK_DLSYM); 3419 return (rv.f); 3420 } 3421 3422 void * 3423 dlvsym(void *handle, const char *name, const char *version) 3424 { 3425 Ver_Entry ventry; 3426 3427 ventry.name = version; 3428 ventry.file = NULL; 3429 ventry.hash = elf_hash(version); 3430 ventry.flags= 0; 3431 return do_dlsym(handle, name, __builtin_return_address(0), &ventry, 3432 SYMLOOK_DLSYM); 3433 } 3434 3435 int 3436 _rtld_addr_phdr(const void *addr, struct dl_phdr_info *phdr_info) 3437 { 3438 const Obj_Entry *obj; 3439 RtldLockState lockstate; 3440 3441 rlock_acquire(rtld_bind_lock, &lockstate); 3442 obj = obj_from_addr(addr); 3443 if (obj == NULL) { 3444 _rtld_error("No shared object contains address"); 3445 lock_release(rtld_bind_lock, &lockstate); 3446 return (0); 3447 } 3448 rtld_fill_dl_phdr_info(obj, phdr_info); 3449 lock_release(rtld_bind_lock, &lockstate); 3450 return (1); 3451 } 3452 3453 int 3454 dladdr(const void *addr, Dl_info *info) 3455 { 3456 const Obj_Entry *obj; 3457 const Elf_Sym *def; 3458 void *symbol_addr; 3459 unsigned long symoffset; 3460 RtldLockState lockstate; 3461 3462 rlock_acquire(rtld_bind_lock, &lockstate); 3463 obj = obj_from_addr(addr); 3464 if (obj == NULL) { 3465 _rtld_error("No shared object contains address"); 3466 lock_release(rtld_bind_lock, &lockstate); 3467 return 0; 3468 } 3469 info->dli_fname = obj->path; 3470 info->dli_fbase = obj->mapbase; 3471 info->dli_saddr = (void *)0; 3472 info->dli_sname = NULL; 3473 3474 /* 3475 * Walk the symbol list looking for the symbol whose address is 3476 * closest to the address sent in. 3477 */ 3478 for (symoffset = 0; symoffset < obj->dynsymcount; symoffset++) { 3479 def = obj->symtab + symoffset; 3480 3481 /* 3482 * For skip the symbol if st_shndx is either SHN_UNDEF or 3483 * SHN_COMMON. 3484 */ 3485 if (def->st_shndx == SHN_UNDEF || def->st_shndx == SHN_COMMON) 3486 continue; 3487 3488 /* 3489 * If the symbol is greater than the specified address, or if it 3490 * is further away from addr than the current nearest symbol, 3491 * then reject it. 3492 */ 3493 symbol_addr = obj->relocbase + def->st_value; 3494 if (symbol_addr > addr || symbol_addr < info->dli_saddr) 3495 continue; 3496 3497 /* Update our idea of the nearest symbol. */ 3498 info->dli_sname = obj->strtab + def->st_name; 3499 info->dli_saddr = symbol_addr; 3500 3501 /* Exact match? */ 3502 if (info->dli_saddr == addr) 3503 break; 3504 } 3505 lock_release(rtld_bind_lock, &lockstate); 3506 return 1; 3507 } 3508 3509 int 3510 dlinfo(void *handle, int request, void *p) 3511 { 3512 const Obj_Entry *obj; 3513 RtldLockState lockstate; 3514 int error; 3515 3516 rlock_acquire(rtld_bind_lock, &lockstate); 3517 3518 if (handle == NULL || handle == RTLD_SELF) { 3519 void *retaddr; 3520 3521 retaddr = __builtin_return_address(0); /* __GNUC__ only */ 3522 if ((obj = obj_from_addr(retaddr)) == NULL) 3523 _rtld_error("Cannot determine caller's shared object"); 3524 } else 3525 obj = dlcheck(handle); 3526 3527 if (obj == NULL) { 3528 lock_release(rtld_bind_lock, &lockstate); 3529 return (-1); 3530 } 3531 3532 error = 0; 3533 switch (request) { 3534 case RTLD_DI_LINKMAP: 3535 *((struct link_map const **)p) = &obj->linkmap; 3536 break; 3537 case RTLD_DI_ORIGIN: 3538 error = rtld_dirname(obj->path, p); 3539 break; 3540 3541 case RTLD_DI_SERINFOSIZE: 3542 case RTLD_DI_SERINFO: 3543 error = do_search_info(obj, request, (struct dl_serinfo *)p); 3544 break; 3545 3546 default: 3547 _rtld_error("Invalid request %d passed to dlinfo()", request); 3548 error = -1; 3549 } 3550 3551 lock_release(rtld_bind_lock, &lockstate); 3552 3553 return (error); 3554 } 3555 3556 static void 3557 rtld_fill_dl_phdr_info(const Obj_Entry *obj, struct dl_phdr_info *phdr_info) 3558 { 3559 3560 phdr_info->dlpi_addr = (Elf_Addr)obj->relocbase; 3561 phdr_info->dlpi_name = obj->path; 3562 phdr_info->dlpi_phdr = obj->phdr; 3563 phdr_info->dlpi_phnum = obj->phsize / sizeof(obj->phdr[0]); 3564 phdr_info->dlpi_tls_modid = obj->tlsindex; 3565 phdr_info->dlpi_tls_data = obj->tlsinit; 3566 phdr_info->dlpi_adds = obj_loads; 3567 phdr_info->dlpi_subs = obj_loads - obj_count; 3568 } 3569 3570 int 3571 dl_iterate_phdr(__dl_iterate_hdr_callback callback, void *param) 3572 { 3573 struct dl_phdr_info phdr_info; 3574 Obj_Entry *obj, marker; 3575 RtldLockState bind_lockstate, phdr_lockstate; 3576 int error; 3577 3578 init_marker(&marker); 3579 error = 0; 3580 3581 wlock_acquire(rtld_phdr_lock, &phdr_lockstate); 3582 wlock_acquire(rtld_bind_lock, &bind_lockstate); 3583 for (obj = globallist_curr(TAILQ_FIRST(&obj_list)); obj != NULL;) { 3584 TAILQ_INSERT_AFTER(&obj_list, obj, &marker, next); 3585 rtld_fill_dl_phdr_info(obj, &phdr_info); 3586 hold_object(obj); 3587 lock_release(rtld_bind_lock, &bind_lockstate); 3588 3589 error = callback(&phdr_info, sizeof phdr_info, param); 3590 3591 wlock_acquire(rtld_bind_lock, &bind_lockstate); 3592 unhold_object(obj); 3593 obj = globallist_next(&marker); 3594 TAILQ_REMOVE(&obj_list, &marker, next); 3595 if (error != 0) { 3596 lock_release(rtld_bind_lock, &bind_lockstate); 3597 lock_release(rtld_phdr_lock, &phdr_lockstate); 3598 return (error); 3599 } 3600 } 3601 3602 if (error == 0) { 3603 rtld_fill_dl_phdr_info(&obj_rtld, &phdr_info); 3604 lock_release(rtld_bind_lock, &bind_lockstate); 3605 error = callback(&phdr_info, sizeof(phdr_info), param); 3606 } 3607 lock_release(rtld_phdr_lock, &phdr_lockstate); 3608 return (error); 3609 } 3610 3611 static void * 3612 fill_search_info(const char *dir, size_t dirlen, void *param) 3613 { 3614 struct fill_search_info_args *arg; 3615 3616 arg = param; 3617 3618 if (arg->request == RTLD_DI_SERINFOSIZE) { 3619 arg->serinfo->dls_cnt ++; 3620 arg->serinfo->dls_size += sizeof(struct dl_serpath) + dirlen + 1; 3621 } else { 3622 struct dl_serpath *s_entry; 3623 3624 s_entry = arg->serpath; 3625 s_entry->dls_name = arg->strspace; 3626 s_entry->dls_flags = arg->flags; 3627 3628 strncpy(arg->strspace, dir, dirlen); 3629 arg->strspace[dirlen] = '\0'; 3630 3631 arg->strspace += dirlen + 1; 3632 arg->serpath++; 3633 } 3634 3635 return (NULL); 3636 } 3637 3638 static int 3639 do_search_info(const Obj_Entry *obj, int request, struct dl_serinfo *info) 3640 { 3641 struct dl_serinfo _info; 3642 struct fill_search_info_args args; 3643 3644 args.request = RTLD_DI_SERINFOSIZE; 3645 args.serinfo = &_info; 3646 3647 _info.dls_size = __offsetof(struct dl_serinfo, dls_serpath); 3648 _info.dls_cnt = 0; 3649 3650 path_enumerate(obj->rpath, fill_search_info, &args); 3651 path_enumerate(ld_library_path, fill_search_info, &args); 3652 path_enumerate(obj->runpath, fill_search_info, &args); 3653 path_enumerate(gethints(obj->z_nodeflib), fill_search_info, &args); 3654 if (!obj->z_nodeflib) 3655 path_enumerate(ld_standard_library_path, fill_search_info, &args); 3656 3657 3658 if (request == RTLD_DI_SERINFOSIZE) { 3659 info->dls_size = _info.dls_size; 3660 info->dls_cnt = _info.dls_cnt; 3661 return (0); 3662 } 3663 3664 if (info->dls_cnt != _info.dls_cnt || info->dls_size != _info.dls_size) { 3665 _rtld_error("Uninitialized Dl_serinfo struct passed to dlinfo()"); 3666 return (-1); 3667 } 3668 3669 args.request = RTLD_DI_SERINFO; 3670 args.serinfo = info; 3671 args.serpath = &info->dls_serpath[0]; 3672 args.strspace = (char *)&info->dls_serpath[_info.dls_cnt]; 3673 3674 args.flags = LA_SER_RUNPATH; 3675 if (path_enumerate(obj->rpath, fill_search_info, &args) != NULL) 3676 return (-1); 3677 3678 args.flags = LA_SER_LIBPATH; 3679 if (path_enumerate(ld_library_path, fill_search_info, &args) != NULL) 3680 return (-1); 3681 3682 args.flags = LA_SER_RUNPATH; 3683 if (path_enumerate(obj->runpath, fill_search_info, &args) != NULL) 3684 return (-1); 3685 3686 args.flags = LA_SER_CONFIG; 3687 if (path_enumerate(gethints(obj->z_nodeflib), fill_search_info, &args) 3688 != NULL) 3689 return (-1); 3690 3691 args.flags = LA_SER_DEFAULT; 3692 if (!obj->z_nodeflib && 3693 path_enumerate(ld_standard_library_path, fill_search_info, &args) != NULL) 3694 return (-1); 3695 return (0); 3696 } 3697 3698 static int 3699 rtld_dirname(const char *path, char *bname) 3700 { 3701 const char *endp; 3702 3703 /* Empty or NULL string gets treated as "." */ 3704 if (path == NULL || *path == '\0') { 3705 bname[0] = '.'; 3706 bname[1] = '\0'; 3707 return (0); 3708 } 3709 3710 /* Strip trailing slashes */ 3711 endp = path + strlen(path) - 1; 3712 while (endp > path && *endp == '/') 3713 endp--; 3714 3715 /* Find the start of the dir */ 3716 while (endp > path && *endp != '/') 3717 endp--; 3718 3719 /* Either the dir is "/" or there are no slashes */ 3720 if (endp == path) { 3721 bname[0] = *endp == '/' ? '/' : '.'; 3722 bname[1] = '\0'; 3723 return (0); 3724 } else { 3725 do { 3726 endp--; 3727 } while (endp > path && *endp == '/'); 3728 } 3729 3730 if (endp - path + 2 > PATH_MAX) 3731 { 3732 _rtld_error("Filename is too long: %s", path); 3733 return(-1); 3734 } 3735 3736 strncpy(bname, path, endp - path + 1); 3737 bname[endp - path + 1] = '\0'; 3738 return (0); 3739 } 3740 3741 static int 3742 rtld_dirname_abs(const char *path, char *base) 3743 { 3744 char *last; 3745 3746 if (realpath(path, base) == NULL) 3747 return (-1); 3748 dbg("%s -> %s", path, base); 3749 last = strrchr(base, '/'); 3750 if (last == NULL) 3751 return (-1); 3752 if (last != base) 3753 *last = '\0'; 3754 return (0); 3755 } 3756 3757 static void 3758 linkmap_add(Obj_Entry *obj) 3759 { 3760 struct link_map *l = &obj->linkmap; 3761 struct link_map *prev; 3762 3763 obj->linkmap.l_name = obj->path; 3764 obj->linkmap.l_addr = obj->mapbase; 3765 obj->linkmap.l_ld = obj->dynamic; 3766 #ifdef __mips__ 3767 /* GDB needs load offset on MIPS to use the symbols */ 3768 obj->linkmap.l_offs = obj->relocbase; 3769 #endif 3770 3771 if (r_debug.r_map == NULL) { 3772 r_debug.r_map = l; 3773 return; 3774 } 3775 3776 /* 3777 * Scan to the end of the list, but not past the entry for the 3778 * dynamic linker, which we want to keep at the very end. 3779 */ 3780 for (prev = r_debug.r_map; 3781 prev->l_next != NULL && prev->l_next != &obj_rtld.linkmap; 3782 prev = prev->l_next) 3783 ; 3784 3785 /* Link in the new entry. */ 3786 l->l_prev = prev; 3787 l->l_next = prev->l_next; 3788 if (l->l_next != NULL) 3789 l->l_next->l_prev = l; 3790 prev->l_next = l; 3791 } 3792 3793 static void 3794 linkmap_delete(Obj_Entry *obj) 3795 { 3796 struct link_map *l = &obj->linkmap; 3797 3798 if (l->l_prev == NULL) { 3799 if ((r_debug.r_map = l->l_next) != NULL) 3800 l->l_next->l_prev = NULL; 3801 return; 3802 } 3803 3804 if ((l->l_prev->l_next = l->l_next) != NULL) 3805 l->l_next->l_prev = l->l_prev; 3806 } 3807 3808 /* 3809 * Function for the debugger to set a breakpoint on to gain control. 3810 * 3811 * The two parameters allow the debugger to easily find and determine 3812 * what the runtime loader is doing and to whom it is doing it. 3813 * 3814 * When the loadhook trap is hit (r_debug_state, set at program 3815 * initialization), the arguments can be found on the stack: 3816 * 3817 * +8 struct link_map *m 3818 * +4 struct r_debug *rd 3819 * +0 RetAddr 3820 */ 3821 void 3822 r_debug_state(struct r_debug* rd, struct link_map *m) 3823 { 3824 /* 3825 * The following is a hack to force the compiler to emit calls to 3826 * this function, even when optimizing. If the function is empty, 3827 * the compiler is not obliged to emit any code for calls to it, 3828 * even when marked __noinline. However, gdb depends on those 3829 * calls being made. 3830 */ 3831 __compiler_membar(); 3832 } 3833 3834 /* 3835 * A function called after init routines have completed. This can be used to 3836 * break before a program's entry routine is called, and can be used when 3837 * main is not available in the symbol table. 3838 */ 3839 void 3840 _r_debug_postinit(struct link_map *m) 3841 { 3842 3843 /* See r_debug_state(). */ 3844 __compiler_membar(); 3845 } 3846 3847 static void 3848 release_object(Obj_Entry *obj) 3849 { 3850 3851 if (obj->holdcount > 0) { 3852 obj->unholdfree = true; 3853 return; 3854 } 3855 munmap(obj->mapbase, obj->mapsize); 3856 linkmap_delete(obj); 3857 obj_free(obj); 3858 } 3859 3860 /* 3861 * Get address of the pointer variable in the main program. 3862 * Prefer non-weak symbol over the weak one. 3863 */ 3864 static const void ** 3865 get_program_var_addr(const char *name, RtldLockState *lockstate) 3866 { 3867 SymLook req; 3868 DoneList donelist; 3869 3870 symlook_init(&req, name); 3871 req.lockstate = lockstate; 3872 donelist_init(&donelist); 3873 if (symlook_global(&req, &donelist) != 0) 3874 return (NULL); 3875 if (ELF_ST_TYPE(req.sym_out->st_info) == STT_FUNC) 3876 return ((const void **)make_function_pointer(req.sym_out, 3877 req.defobj_out)); 3878 else if (ELF_ST_TYPE(req.sym_out->st_info) == STT_GNU_IFUNC) 3879 return ((const void **)rtld_resolve_ifunc(req.defobj_out, req.sym_out)); 3880 else 3881 return ((const void **)(req.defobj_out->relocbase + 3882 req.sym_out->st_value)); 3883 } 3884 3885 /* 3886 * Set a pointer variable in the main program to the given value. This 3887 * is used to set key variables such as "environ" before any of the 3888 * init functions are called. 3889 */ 3890 static void 3891 set_program_var(const char *name, const void *value) 3892 { 3893 const void **addr; 3894 3895 if ((addr = get_program_var_addr(name, NULL)) != NULL) { 3896 dbg("\"%s\": *%p <-- %p", name, addr, value); 3897 *addr = value; 3898 } 3899 } 3900 3901 /* 3902 * Search the global objects, including dependencies and main object, 3903 * for the given symbol. 3904 */ 3905 static int 3906 symlook_global(SymLook *req, DoneList *donelist) 3907 { 3908 SymLook req1; 3909 const Objlist_Entry *elm; 3910 int res; 3911 3912 symlook_init_from_req(&req1, req); 3913 3914 /* Search all objects loaded at program start up. */ 3915 if (req->defobj_out == NULL || 3916 ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) { 3917 res = symlook_list(&req1, &list_main, donelist); 3918 if (res == 0 && (req->defobj_out == NULL || 3919 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) { 3920 req->sym_out = req1.sym_out; 3921 req->defobj_out = req1.defobj_out; 3922 assert(req->defobj_out != NULL); 3923 } 3924 } 3925 3926 /* Search all DAGs whose roots are RTLD_GLOBAL objects. */ 3927 STAILQ_FOREACH(elm, &list_global, link) { 3928 if (req->defobj_out != NULL && 3929 ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK) 3930 break; 3931 res = symlook_list(&req1, &elm->obj->dagmembers, donelist); 3932 if (res == 0 && (req->defobj_out == NULL || 3933 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) { 3934 req->sym_out = req1.sym_out; 3935 req->defobj_out = req1.defobj_out; 3936 assert(req->defobj_out != NULL); 3937 } 3938 } 3939 3940 return (req->sym_out != NULL ? 0 : ESRCH); 3941 } 3942 3943 /* 3944 * Given a symbol name in a referencing object, find the corresponding 3945 * definition of the symbol. Returns a pointer to the symbol, or NULL if 3946 * no definition was found. Returns a pointer to the Obj_Entry of the 3947 * defining object via the reference parameter DEFOBJ_OUT. 3948 */ 3949 static int 3950 symlook_default(SymLook *req, const Obj_Entry *refobj) 3951 { 3952 DoneList donelist; 3953 const Objlist_Entry *elm; 3954 SymLook req1; 3955 int res; 3956 3957 donelist_init(&donelist); 3958 symlook_init_from_req(&req1, req); 3959 3960 /* Look first in the referencing object if linked symbolically. */ 3961 if (refobj->symbolic && !donelist_check(&donelist, refobj)) { 3962 res = symlook_obj(&req1, refobj); 3963 if (res == 0) { 3964 req->sym_out = req1.sym_out; 3965 req->defobj_out = req1.defobj_out; 3966 assert(req->defobj_out != NULL); 3967 } 3968 } 3969 3970 symlook_global(req, &donelist); 3971 3972 /* Search all dlopened DAGs containing the referencing object. */ 3973 STAILQ_FOREACH(elm, &refobj->dldags, link) { 3974 if (req->sym_out != NULL && 3975 ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK) 3976 break; 3977 res = symlook_list(&req1, &elm->obj->dagmembers, &donelist); 3978 if (res == 0 && (req->sym_out == NULL || 3979 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) { 3980 req->sym_out = req1.sym_out; 3981 req->defobj_out = req1.defobj_out; 3982 assert(req->defobj_out != NULL); 3983 } 3984 } 3985 3986 /* 3987 * Search the dynamic linker itself, and possibly resolve the 3988 * symbol from there. This is how the application links to 3989 * dynamic linker services such as dlopen. 3990 */ 3991 if (req->sym_out == NULL || 3992 ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) { 3993 res = symlook_obj(&req1, &obj_rtld); 3994 if (res == 0) { 3995 req->sym_out = req1.sym_out; 3996 req->defobj_out = req1.defobj_out; 3997 assert(req->defobj_out != NULL); 3998 } 3999 } 4000 4001 return (req->sym_out != NULL ? 0 : ESRCH); 4002 } 4003 4004 static int 4005 symlook_list(SymLook *req, const Objlist *objlist, DoneList *dlp) 4006 { 4007 const Elf_Sym *def; 4008 const Obj_Entry *defobj; 4009 const Objlist_Entry *elm; 4010 SymLook req1; 4011 int res; 4012 4013 def = NULL; 4014 defobj = NULL; 4015 STAILQ_FOREACH(elm, objlist, link) { 4016 if (donelist_check(dlp, elm->obj)) 4017 continue; 4018 symlook_init_from_req(&req1, req); 4019 if ((res = symlook_obj(&req1, elm->obj)) == 0) { 4020 if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) { 4021 def = req1.sym_out; 4022 defobj = req1.defobj_out; 4023 if (ELF_ST_BIND(def->st_info) != STB_WEAK) 4024 break; 4025 } 4026 } 4027 } 4028 if (def != NULL) { 4029 req->sym_out = def; 4030 req->defobj_out = defobj; 4031 return (0); 4032 } 4033 return (ESRCH); 4034 } 4035 4036 /* 4037 * Search the chain of DAGS cointed to by the given Needed_Entry 4038 * for a symbol of the given name. Each DAG is scanned completely 4039 * before advancing to the next one. Returns a pointer to the symbol, 4040 * or NULL if no definition was found. 4041 */ 4042 static int 4043 symlook_needed(SymLook *req, const Needed_Entry *needed, DoneList *dlp) 4044 { 4045 const Elf_Sym *def; 4046 const Needed_Entry *n; 4047 const Obj_Entry *defobj; 4048 SymLook req1; 4049 int res; 4050 4051 def = NULL; 4052 defobj = NULL; 4053 symlook_init_from_req(&req1, req); 4054 for (n = needed; n != NULL; n = n->next) { 4055 if (n->obj == NULL || 4056 (res = symlook_list(&req1, &n->obj->dagmembers, dlp)) != 0) 4057 continue; 4058 if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) { 4059 def = req1.sym_out; 4060 defobj = req1.defobj_out; 4061 if (ELF_ST_BIND(def->st_info) != STB_WEAK) 4062 break; 4063 } 4064 } 4065 if (def != NULL) { 4066 req->sym_out = def; 4067 req->defobj_out = defobj; 4068 return (0); 4069 } 4070 return (ESRCH); 4071 } 4072 4073 /* 4074 * Search the symbol table of a single shared object for a symbol of 4075 * the given name and version, if requested. Returns a pointer to the 4076 * symbol, or NULL if no definition was found. If the object is 4077 * filter, return filtered symbol from filtee. 4078 * 4079 * The symbol's hash value is passed in for efficiency reasons; that 4080 * eliminates many recomputations of the hash value. 4081 */ 4082 int 4083 symlook_obj(SymLook *req, const Obj_Entry *obj) 4084 { 4085 DoneList donelist; 4086 SymLook req1; 4087 int flags, res, mres; 4088 4089 /* 4090 * If there is at least one valid hash at this point, we prefer to 4091 * use the faster GNU version if available. 4092 */ 4093 if (obj->valid_hash_gnu) 4094 mres = symlook_obj1_gnu(req, obj); 4095 else if (obj->valid_hash_sysv) 4096 mres = symlook_obj1_sysv(req, obj); 4097 else 4098 return (EINVAL); 4099 4100 if (mres == 0) { 4101 if (obj->needed_filtees != NULL) { 4102 flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0; 4103 load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate); 4104 donelist_init(&donelist); 4105 symlook_init_from_req(&req1, req); 4106 res = symlook_needed(&req1, obj->needed_filtees, &donelist); 4107 if (res == 0) { 4108 req->sym_out = req1.sym_out; 4109 req->defobj_out = req1.defobj_out; 4110 } 4111 return (res); 4112 } 4113 if (obj->needed_aux_filtees != NULL) { 4114 flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0; 4115 load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate); 4116 donelist_init(&donelist); 4117 symlook_init_from_req(&req1, req); 4118 res = symlook_needed(&req1, obj->needed_aux_filtees, &donelist); 4119 if (res == 0) { 4120 req->sym_out = req1.sym_out; 4121 req->defobj_out = req1.defobj_out; 4122 return (res); 4123 } 4124 } 4125 } 4126 return (mres); 4127 } 4128 4129 /* Symbol match routine common to both hash functions */ 4130 static bool 4131 matched_symbol(SymLook *req, const Obj_Entry *obj, Sym_Match_Result *result, 4132 const unsigned long symnum) 4133 { 4134 Elf_Versym verndx; 4135 const Elf_Sym *symp; 4136 const char *strp; 4137 4138 symp = obj->symtab + symnum; 4139 strp = obj->strtab + symp->st_name; 4140 4141 switch (ELF_ST_TYPE(symp->st_info)) { 4142 case STT_FUNC: 4143 case STT_NOTYPE: 4144 case STT_OBJECT: 4145 case STT_COMMON: 4146 case STT_GNU_IFUNC: 4147 if (symp->st_value == 0) 4148 return (false); 4149 /* fallthrough */ 4150 case STT_TLS: 4151 if (symp->st_shndx != SHN_UNDEF) 4152 break; 4153 #ifndef __mips__ 4154 else if (((req->flags & SYMLOOK_IN_PLT) == 0) && 4155 (ELF_ST_TYPE(symp->st_info) == STT_FUNC)) 4156 break; 4157 /* fallthrough */ 4158 #endif 4159 default: 4160 return (false); 4161 } 4162 if (req->name[0] != strp[0] || strcmp(req->name, strp) != 0) 4163 return (false); 4164 4165 if (req->ventry == NULL) { 4166 if (obj->versyms != NULL) { 4167 verndx = VER_NDX(obj->versyms[symnum]); 4168 if (verndx > obj->vernum) { 4169 _rtld_error( 4170 "%s: symbol %s references wrong version %d", 4171 obj->path, obj->strtab + symnum, verndx); 4172 return (false); 4173 } 4174 /* 4175 * If we are not called from dlsym (i.e. this 4176 * is a normal relocation from unversioned 4177 * binary), accept the symbol immediately if 4178 * it happens to have first version after this 4179 * shared object became versioned. Otherwise, 4180 * if symbol is versioned and not hidden, 4181 * remember it. If it is the only symbol with 4182 * this name exported by the shared object, it 4183 * will be returned as a match by the calling 4184 * function. If symbol is global (verndx < 2) 4185 * accept it unconditionally. 4186 */ 4187 if ((req->flags & SYMLOOK_DLSYM) == 0 && 4188 verndx == VER_NDX_GIVEN) { 4189 result->sym_out = symp; 4190 return (true); 4191 } 4192 else if (verndx >= VER_NDX_GIVEN) { 4193 if ((obj->versyms[symnum] & VER_NDX_HIDDEN) 4194 == 0) { 4195 if (result->vsymp == NULL) 4196 result->vsymp = symp; 4197 result->vcount++; 4198 } 4199 return (false); 4200 } 4201 } 4202 result->sym_out = symp; 4203 return (true); 4204 } 4205 if (obj->versyms == NULL) { 4206 if (object_match_name(obj, req->ventry->name)) { 4207 _rtld_error("%s: object %s should provide version %s " 4208 "for symbol %s", obj_rtld.path, obj->path, 4209 req->ventry->name, obj->strtab + symnum); 4210 return (false); 4211 } 4212 } else { 4213 verndx = VER_NDX(obj->versyms[symnum]); 4214 if (verndx > obj->vernum) { 4215 _rtld_error("%s: symbol %s references wrong version %d", 4216 obj->path, obj->strtab + symnum, verndx); 4217 return (false); 4218 } 4219 if (obj->vertab[verndx].hash != req->ventry->hash || 4220 strcmp(obj->vertab[verndx].name, req->ventry->name)) { 4221 /* 4222 * Version does not match. Look if this is a 4223 * global symbol and if it is not hidden. If 4224 * global symbol (verndx < 2) is available, 4225 * use it. Do not return symbol if we are 4226 * called by dlvsym, because dlvsym looks for 4227 * a specific version and default one is not 4228 * what dlvsym wants. 4229 */ 4230 if ((req->flags & SYMLOOK_DLSYM) || 4231 (verndx >= VER_NDX_GIVEN) || 4232 (obj->versyms[symnum] & VER_NDX_HIDDEN)) 4233 return (false); 4234 } 4235 } 4236 result->sym_out = symp; 4237 return (true); 4238 } 4239 4240 /* 4241 * Search for symbol using SysV hash function. 4242 * obj->buckets is known not to be NULL at this point; the test for this was 4243 * performed with the obj->valid_hash_sysv assignment. 4244 */ 4245 static int 4246 symlook_obj1_sysv(SymLook *req, const Obj_Entry *obj) 4247 { 4248 unsigned long symnum; 4249 Sym_Match_Result matchres; 4250 4251 matchres.sym_out = NULL; 4252 matchres.vsymp = NULL; 4253 matchres.vcount = 0; 4254 4255 for (symnum = obj->buckets[req->hash % obj->nbuckets]; 4256 symnum != STN_UNDEF; symnum = obj->chains[symnum]) { 4257 if (symnum >= obj->nchains) 4258 return (ESRCH); /* Bad object */ 4259 4260 if (matched_symbol(req, obj, &matchres, symnum)) { 4261 req->sym_out = matchres.sym_out; 4262 req->defobj_out = obj; 4263 return (0); 4264 } 4265 } 4266 if (matchres.vcount == 1) { 4267 req->sym_out = matchres.vsymp; 4268 req->defobj_out = obj; 4269 return (0); 4270 } 4271 return (ESRCH); 4272 } 4273 4274 /* Search for symbol using GNU hash function */ 4275 static int 4276 symlook_obj1_gnu(SymLook *req, const Obj_Entry *obj) 4277 { 4278 Elf_Addr bloom_word; 4279 const Elf32_Word *hashval; 4280 Elf32_Word bucket; 4281 Sym_Match_Result matchres; 4282 unsigned int h1, h2; 4283 unsigned long symnum; 4284 4285 matchres.sym_out = NULL; 4286 matchres.vsymp = NULL; 4287 matchres.vcount = 0; 4288 4289 /* Pick right bitmask word from Bloom filter array */ 4290 bloom_word = obj->bloom_gnu[(req->hash_gnu / __ELF_WORD_SIZE) & 4291 obj->maskwords_bm_gnu]; 4292 4293 /* Calculate modulus word size of gnu hash and its derivative */ 4294 h1 = req->hash_gnu & (__ELF_WORD_SIZE - 1); 4295 h2 = ((req->hash_gnu >> obj->shift2_gnu) & (__ELF_WORD_SIZE - 1)); 4296 4297 /* Filter out the "definitely not in set" queries */ 4298 if (((bloom_word >> h1) & (bloom_word >> h2) & 1) == 0) 4299 return (ESRCH); 4300 4301 /* Locate hash chain and corresponding value element*/ 4302 bucket = obj->buckets_gnu[req->hash_gnu % obj->nbuckets_gnu]; 4303 if (bucket == 0) 4304 return (ESRCH); 4305 hashval = &obj->chain_zero_gnu[bucket]; 4306 do { 4307 if (((*hashval ^ req->hash_gnu) >> 1) == 0) { 4308 symnum = hashval - obj->chain_zero_gnu; 4309 if (matched_symbol(req, obj, &matchres, symnum)) { 4310 req->sym_out = matchres.sym_out; 4311 req->defobj_out = obj; 4312 return (0); 4313 } 4314 } 4315 } while ((*hashval++ & 1) == 0); 4316 if (matchres.vcount == 1) { 4317 req->sym_out = matchres.vsymp; 4318 req->defobj_out = obj; 4319 return (0); 4320 } 4321 return (ESRCH); 4322 } 4323 4324 static void 4325 trace_loaded_objects(Obj_Entry *obj) 4326 { 4327 char *fmt1, *fmt2, *fmt, *main_local, *list_containers; 4328 int c; 4329 4330 if ((main_local = getenv(_LD("TRACE_LOADED_OBJECTS_PROGNAME"))) == NULL) 4331 main_local = ""; 4332 4333 if ((fmt1 = getenv(_LD("TRACE_LOADED_OBJECTS_FMT1"))) == NULL) 4334 fmt1 = "\t%o => %p (%x)\n"; 4335 4336 if ((fmt2 = getenv(_LD("TRACE_LOADED_OBJECTS_FMT2"))) == NULL) 4337 fmt2 = "\t%o (%x)\n"; 4338 4339 list_containers = getenv(_LD("TRACE_LOADED_OBJECTS_ALL")); 4340 4341 for (; obj != NULL; obj = TAILQ_NEXT(obj, next)) { 4342 Needed_Entry *needed; 4343 char *name, *path; 4344 bool is_lib; 4345 4346 if (obj->marker) 4347 continue; 4348 if (list_containers && obj->needed != NULL) 4349 rtld_printf("%s:\n", obj->path); 4350 for (needed = obj->needed; needed; needed = needed->next) { 4351 if (needed->obj != NULL) { 4352 if (needed->obj->traced && !list_containers) 4353 continue; 4354 needed->obj->traced = true; 4355 path = needed->obj->path; 4356 } else 4357 path = "not found"; 4358 4359 name = (char *)obj->strtab + needed->name; 4360 is_lib = strncmp(name, "lib", 3) == 0; /* XXX - bogus */ 4361 4362 fmt = is_lib ? fmt1 : fmt2; 4363 while ((c = *fmt++) != '\0') { 4364 switch (c) { 4365 default: 4366 rtld_putchar(c); 4367 continue; 4368 case '\\': 4369 switch (c = *fmt) { 4370 case '\0': 4371 continue; 4372 case 'n': 4373 rtld_putchar('\n'); 4374 break; 4375 case 't': 4376 rtld_putchar('\t'); 4377 break; 4378 } 4379 break; 4380 case '%': 4381 switch (c = *fmt) { 4382 case '\0': 4383 continue; 4384 case '%': 4385 default: 4386 rtld_putchar(c); 4387 break; 4388 case 'A': 4389 rtld_putstr(main_local); 4390 break; 4391 case 'a': 4392 rtld_putstr(obj_main->path); 4393 break; 4394 case 'o': 4395 rtld_putstr(name); 4396 break; 4397 #if 0 4398 case 'm': 4399 rtld_printf("%d", sodp->sod_major); 4400 break; 4401 case 'n': 4402 rtld_printf("%d", sodp->sod_minor); 4403 break; 4404 #endif 4405 case 'p': 4406 rtld_putstr(path); 4407 break; 4408 case 'x': 4409 rtld_printf("%p", needed->obj ? needed->obj->mapbase : 4410 0); 4411 break; 4412 } 4413 break; 4414 } 4415 ++fmt; 4416 } 4417 } 4418 } 4419 } 4420 4421 /* 4422 * Unload a dlopened object and its dependencies from memory and from 4423 * our data structures. It is assumed that the DAG rooted in the 4424 * object has already been unreferenced, and that the object has a 4425 * reference count of 0. 4426 */ 4427 static void 4428 unload_object(Obj_Entry *root) 4429 { 4430 Obj_Entry marker, *obj, *next; 4431 4432 assert(root->refcount == 0); 4433 4434 /* 4435 * Pass over the DAG removing unreferenced objects from 4436 * appropriate lists. 4437 */ 4438 unlink_object(root); 4439 4440 /* Unmap all objects that are no longer referenced. */ 4441 for (obj = TAILQ_FIRST(&obj_list); obj != NULL; obj = next) { 4442 next = TAILQ_NEXT(obj, next); 4443 if (obj->marker || obj->refcount != 0) 4444 continue; 4445 LD_UTRACE(UTRACE_UNLOAD_OBJECT, obj, obj->mapbase, 4446 obj->mapsize, 0, obj->path); 4447 dbg("unloading \"%s\"", obj->path); 4448 /* 4449 * Unlink the object now to prevent new references from 4450 * being acquired while the bind lock is dropped in 4451 * recursive dlclose() invocations. 4452 */ 4453 TAILQ_REMOVE(&obj_list, obj, next); 4454 obj_count--; 4455 4456 if (obj->filtees_loaded) { 4457 if (next != NULL) { 4458 init_marker(&marker); 4459 TAILQ_INSERT_BEFORE(next, &marker, next); 4460 unload_filtees(obj); 4461 next = TAILQ_NEXT(&marker, next); 4462 TAILQ_REMOVE(&obj_list, &marker, next); 4463 } else 4464 unload_filtees(obj); 4465 } 4466 release_object(obj); 4467 } 4468 } 4469 4470 static void 4471 unlink_object(Obj_Entry *root) 4472 { 4473 Objlist_Entry *elm; 4474 4475 if (root->refcount == 0) { 4476 /* Remove the object from the RTLD_GLOBAL list. */ 4477 objlist_remove(&list_global, root); 4478 4479 /* Remove the object from all objects' DAG lists. */ 4480 STAILQ_FOREACH(elm, &root->dagmembers, link) { 4481 objlist_remove(&elm->obj->dldags, root); 4482 if (elm->obj != root) 4483 unlink_object(elm->obj); 4484 } 4485 } 4486 } 4487 4488 static void 4489 ref_dag(Obj_Entry *root) 4490 { 4491 Objlist_Entry *elm; 4492 4493 assert(root->dag_inited); 4494 STAILQ_FOREACH(elm, &root->dagmembers, link) 4495 elm->obj->refcount++; 4496 } 4497 4498 static void 4499 unref_dag(Obj_Entry *root) 4500 { 4501 Objlist_Entry *elm; 4502 4503 assert(root->dag_inited); 4504 STAILQ_FOREACH(elm, &root->dagmembers, link) 4505 elm->obj->refcount--; 4506 } 4507 4508 /* 4509 * Common code for MD __tls_get_addr(). 4510 */ 4511 static void *tls_get_addr_slow(Elf_Addr **, int, size_t) __noinline; 4512 static void * 4513 tls_get_addr_slow(Elf_Addr **dtvp, int index, size_t offset) 4514 { 4515 Elf_Addr *newdtv, *dtv; 4516 RtldLockState lockstate; 4517 int to_copy; 4518 4519 dtv = *dtvp; 4520 /* Check dtv generation in case new modules have arrived */ 4521 if (dtv[0] != tls_dtv_generation) { 4522 wlock_acquire(rtld_bind_lock, &lockstate); 4523 newdtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr)); 4524 to_copy = dtv[1]; 4525 if (to_copy > tls_max_index) 4526 to_copy = tls_max_index; 4527 memcpy(&newdtv[2], &dtv[2], to_copy * sizeof(Elf_Addr)); 4528 newdtv[0] = tls_dtv_generation; 4529 newdtv[1] = tls_max_index; 4530 free(dtv); 4531 lock_release(rtld_bind_lock, &lockstate); 4532 dtv = *dtvp = newdtv; 4533 } 4534 4535 /* Dynamically allocate module TLS if necessary */ 4536 if (dtv[index + 1] == 0) { 4537 /* Signal safe, wlock will block out signals. */ 4538 wlock_acquire(rtld_bind_lock, &lockstate); 4539 if (!dtv[index + 1]) 4540 dtv[index + 1] = (Elf_Addr)allocate_module_tls(index); 4541 lock_release(rtld_bind_lock, &lockstate); 4542 } 4543 return ((void *)(dtv[index + 1] + offset)); 4544 } 4545 4546 void * 4547 tls_get_addr_common(Elf_Addr **dtvp, int index, size_t offset) 4548 { 4549 Elf_Addr *dtv; 4550 4551 dtv = *dtvp; 4552 /* Check dtv generation in case new modules have arrived */ 4553 if (__predict_true(dtv[0] == tls_dtv_generation && 4554 dtv[index + 1] != 0)) 4555 return ((void *)(dtv[index + 1] + offset)); 4556 return (tls_get_addr_slow(dtvp, index, offset)); 4557 } 4558 4559 #if defined(__aarch64__) || defined(__arm__) || defined(__mips__) || \ 4560 defined(__powerpc__) || defined(__riscv__) 4561 4562 /* 4563 * Allocate Static TLS using the Variant I method. 4564 */ 4565 void * 4566 allocate_tls(Obj_Entry *objs, void *oldtcb, size_t tcbsize, size_t tcbalign) 4567 { 4568 Obj_Entry *obj; 4569 char *tcb; 4570 Elf_Addr **tls; 4571 Elf_Addr *dtv; 4572 Elf_Addr addr; 4573 int i; 4574 4575 if (oldtcb != NULL && tcbsize == TLS_TCB_SIZE) 4576 return (oldtcb); 4577 4578 assert(tcbsize >= TLS_TCB_SIZE); 4579 tcb = xcalloc(1, tls_static_space - TLS_TCB_SIZE + tcbsize); 4580 tls = (Elf_Addr **)(tcb + tcbsize - TLS_TCB_SIZE); 4581 4582 if (oldtcb != NULL) { 4583 memcpy(tls, oldtcb, tls_static_space); 4584 free(oldtcb); 4585 4586 /* Adjust the DTV. */ 4587 dtv = tls[0]; 4588 for (i = 0; i < dtv[1]; i++) { 4589 if (dtv[i+2] >= (Elf_Addr)oldtcb && 4590 dtv[i+2] < (Elf_Addr)oldtcb + tls_static_space) { 4591 dtv[i+2] = dtv[i+2] - (Elf_Addr)oldtcb + (Elf_Addr)tls; 4592 } 4593 } 4594 } else { 4595 dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr)); 4596 tls[0] = dtv; 4597 dtv[0] = tls_dtv_generation; 4598 dtv[1] = tls_max_index; 4599 4600 for (obj = globallist_curr(objs); obj != NULL; 4601 obj = globallist_next(obj)) { 4602 if (obj->tlsoffset > 0) { 4603 addr = (Elf_Addr)tls + obj->tlsoffset; 4604 if (obj->tlsinitsize > 0) 4605 memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize); 4606 if (obj->tlssize > obj->tlsinitsize) 4607 memset((void*) (addr + obj->tlsinitsize), 0, 4608 obj->tlssize - obj->tlsinitsize); 4609 dtv[obj->tlsindex + 1] = addr; 4610 } 4611 } 4612 } 4613 4614 return (tcb); 4615 } 4616 4617 void 4618 free_tls(void *tcb, size_t tcbsize, size_t tcbalign) 4619 { 4620 Elf_Addr *dtv; 4621 Elf_Addr tlsstart, tlsend; 4622 int dtvsize, i; 4623 4624 assert(tcbsize >= TLS_TCB_SIZE); 4625 4626 tlsstart = (Elf_Addr)tcb + tcbsize - TLS_TCB_SIZE; 4627 tlsend = tlsstart + tls_static_space; 4628 4629 dtv = *(Elf_Addr **)tlsstart; 4630 dtvsize = dtv[1]; 4631 for (i = 0; i < dtvsize; i++) { 4632 if (dtv[i+2] && (dtv[i+2] < tlsstart || dtv[i+2] >= tlsend)) { 4633 free((void*)dtv[i+2]); 4634 } 4635 } 4636 free(dtv); 4637 free(tcb); 4638 } 4639 4640 #endif 4641 4642 #if defined(__i386__) || defined(__amd64__) || defined(__sparc64__) 4643 4644 /* 4645 * Allocate Static TLS using the Variant II method. 4646 */ 4647 void * 4648 allocate_tls(Obj_Entry *objs, void *oldtls, size_t tcbsize, size_t tcbalign) 4649 { 4650 Obj_Entry *obj; 4651 size_t size, ralign; 4652 char *tls; 4653 Elf_Addr *dtv, *olddtv; 4654 Elf_Addr segbase, oldsegbase, addr; 4655 int i; 4656 4657 ralign = tcbalign; 4658 if (tls_static_max_align > ralign) 4659 ralign = tls_static_max_align; 4660 size = round(tls_static_space, ralign) + round(tcbsize, ralign); 4661 4662 assert(tcbsize >= 2*sizeof(Elf_Addr)); 4663 tls = malloc_aligned(size, ralign); 4664 dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr)); 4665 4666 segbase = (Elf_Addr)(tls + round(tls_static_space, ralign)); 4667 ((Elf_Addr*)segbase)[0] = segbase; 4668 ((Elf_Addr*)segbase)[1] = (Elf_Addr) dtv; 4669 4670 dtv[0] = tls_dtv_generation; 4671 dtv[1] = tls_max_index; 4672 4673 if (oldtls) { 4674 /* 4675 * Copy the static TLS block over whole. 4676 */ 4677 oldsegbase = (Elf_Addr) oldtls; 4678 memcpy((void *)(segbase - tls_static_space), 4679 (const void *)(oldsegbase - tls_static_space), 4680 tls_static_space); 4681 4682 /* 4683 * If any dynamic TLS blocks have been created tls_get_addr(), 4684 * move them over. 4685 */ 4686 olddtv = ((Elf_Addr**)oldsegbase)[1]; 4687 for (i = 0; i < olddtv[1]; i++) { 4688 if (olddtv[i+2] < oldsegbase - size || olddtv[i+2] > oldsegbase) { 4689 dtv[i+2] = olddtv[i+2]; 4690 olddtv[i+2] = 0; 4691 } 4692 } 4693 4694 /* 4695 * We assume that this block was the one we created with 4696 * allocate_initial_tls(). 4697 */ 4698 free_tls(oldtls, 2*sizeof(Elf_Addr), sizeof(Elf_Addr)); 4699 } else { 4700 for (obj = objs; obj != NULL; obj = TAILQ_NEXT(obj, next)) { 4701 if (obj->marker || obj->tlsoffset == 0) 4702 continue; 4703 addr = segbase - obj->tlsoffset; 4704 memset((void*) (addr + obj->tlsinitsize), 4705 0, obj->tlssize - obj->tlsinitsize); 4706 if (obj->tlsinit) 4707 memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize); 4708 dtv[obj->tlsindex + 1] = addr; 4709 } 4710 } 4711 4712 return (void*) segbase; 4713 } 4714 4715 void 4716 free_tls(void *tls, size_t tcbsize, size_t tcbalign) 4717 { 4718 Elf_Addr* dtv; 4719 size_t size, ralign; 4720 int dtvsize, i; 4721 Elf_Addr tlsstart, tlsend; 4722 4723 /* 4724 * Figure out the size of the initial TLS block so that we can 4725 * find stuff which ___tls_get_addr() allocated dynamically. 4726 */ 4727 ralign = tcbalign; 4728 if (tls_static_max_align > ralign) 4729 ralign = tls_static_max_align; 4730 size = round(tls_static_space, ralign); 4731 4732 dtv = ((Elf_Addr**)tls)[1]; 4733 dtvsize = dtv[1]; 4734 tlsend = (Elf_Addr) tls; 4735 tlsstart = tlsend - size; 4736 for (i = 0; i < dtvsize; i++) { 4737 if (dtv[i + 2] != 0 && (dtv[i + 2] < tlsstart || dtv[i + 2] > tlsend)) { 4738 free_aligned((void *)dtv[i + 2]); 4739 } 4740 } 4741 4742 free_aligned((void *)tlsstart); 4743 free((void*) dtv); 4744 } 4745 4746 #endif 4747 4748 /* 4749 * Allocate TLS block for module with given index. 4750 */ 4751 void * 4752 allocate_module_tls(int index) 4753 { 4754 Obj_Entry* obj; 4755 char* p; 4756 4757 TAILQ_FOREACH(obj, &obj_list, next) { 4758 if (obj->marker) 4759 continue; 4760 if (obj->tlsindex == index) 4761 break; 4762 } 4763 if (!obj) { 4764 _rtld_error("Can't find module with TLS index %d", index); 4765 rtld_die(); 4766 } 4767 4768 p = malloc_aligned(obj->tlssize, obj->tlsalign); 4769 memcpy(p, obj->tlsinit, obj->tlsinitsize); 4770 memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize); 4771 4772 return p; 4773 } 4774 4775 bool 4776 allocate_tls_offset(Obj_Entry *obj) 4777 { 4778 size_t off; 4779 4780 if (obj->tls_done) 4781 return true; 4782 4783 if (obj->tlssize == 0) { 4784 obj->tls_done = true; 4785 return true; 4786 } 4787 4788 if (tls_last_offset == 0) 4789 off = calculate_first_tls_offset(obj->tlssize, obj->tlsalign); 4790 else 4791 off = calculate_tls_offset(tls_last_offset, tls_last_size, 4792 obj->tlssize, obj->tlsalign); 4793 4794 /* 4795 * If we have already fixed the size of the static TLS block, we 4796 * must stay within that size. When allocating the static TLS, we 4797 * leave a small amount of space spare to be used for dynamically 4798 * loading modules which use static TLS. 4799 */ 4800 if (tls_static_space != 0) { 4801 if (calculate_tls_end(off, obj->tlssize) > tls_static_space) 4802 return false; 4803 } else if (obj->tlsalign > tls_static_max_align) { 4804 tls_static_max_align = obj->tlsalign; 4805 } 4806 4807 tls_last_offset = obj->tlsoffset = off; 4808 tls_last_size = obj->tlssize; 4809 obj->tls_done = true; 4810 4811 return true; 4812 } 4813 4814 void 4815 free_tls_offset(Obj_Entry *obj) 4816 { 4817 4818 /* 4819 * If we were the last thing to allocate out of the static TLS 4820 * block, we give our space back to the 'allocator'. This is a 4821 * simplistic workaround to allow libGL.so.1 to be loaded and 4822 * unloaded multiple times. 4823 */ 4824 if (calculate_tls_end(obj->tlsoffset, obj->tlssize) 4825 == calculate_tls_end(tls_last_offset, tls_last_size)) { 4826 tls_last_offset -= obj->tlssize; 4827 tls_last_size = 0; 4828 } 4829 } 4830 4831 void * 4832 _rtld_allocate_tls(void *oldtls, size_t tcbsize, size_t tcbalign) 4833 { 4834 void *ret; 4835 RtldLockState lockstate; 4836 4837 wlock_acquire(rtld_bind_lock, &lockstate); 4838 ret = allocate_tls(globallist_curr(TAILQ_FIRST(&obj_list)), oldtls, 4839 tcbsize, tcbalign); 4840 lock_release(rtld_bind_lock, &lockstate); 4841 return (ret); 4842 } 4843 4844 void 4845 _rtld_free_tls(void *tcb, size_t tcbsize, size_t tcbalign) 4846 { 4847 RtldLockState lockstate; 4848 4849 wlock_acquire(rtld_bind_lock, &lockstate); 4850 free_tls(tcb, tcbsize, tcbalign); 4851 lock_release(rtld_bind_lock, &lockstate); 4852 } 4853 4854 static void 4855 object_add_name(Obj_Entry *obj, const char *name) 4856 { 4857 Name_Entry *entry; 4858 size_t len; 4859 4860 len = strlen(name); 4861 entry = malloc(sizeof(Name_Entry) + len); 4862 4863 if (entry != NULL) { 4864 strcpy(entry->name, name); 4865 STAILQ_INSERT_TAIL(&obj->names, entry, link); 4866 } 4867 } 4868 4869 static int 4870 object_match_name(const Obj_Entry *obj, const char *name) 4871 { 4872 Name_Entry *entry; 4873 4874 STAILQ_FOREACH(entry, &obj->names, link) { 4875 if (strcmp(name, entry->name) == 0) 4876 return (1); 4877 } 4878 return (0); 4879 } 4880 4881 static Obj_Entry * 4882 locate_dependency(const Obj_Entry *obj, const char *name) 4883 { 4884 const Objlist_Entry *entry; 4885 const Needed_Entry *needed; 4886 4887 STAILQ_FOREACH(entry, &list_main, link) { 4888 if (object_match_name(entry->obj, name)) 4889 return entry->obj; 4890 } 4891 4892 for (needed = obj->needed; needed != NULL; needed = needed->next) { 4893 if (strcmp(obj->strtab + needed->name, name) == 0 || 4894 (needed->obj != NULL && object_match_name(needed->obj, name))) { 4895 /* 4896 * If there is DT_NEEDED for the name we are looking for, 4897 * we are all set. Note that object might not be found if 4898 * dependency was not loaded yet, so the function can 4899 * return NULL here. This is expected and handled 4900 * properly by the caller. 4901 */ 4902 return (needed->obj); 4903 } 4904 } 4905 _rtld_error("%s: Unexpected inconsistency: dependency %s not found", 4906 obj->path, name); 4907 rtld_die(); 4908 } 4909 4910 static int 4911 check_object_provided_version(Obj_Entry *refobj, const Obj_Entry *depobj, 4912 const Elf_Vernaux *vna) 4913 { 4914 const Elf_Verdef *vd; 4915 const char *vername; 4916 4917 vername = refobj->strtab + vna->vna_name; 4918 vd = depobj->verdef; 4919 if (vd == NULL) { 4920 _rtld_error("%s: version %s required by %s not defined", 4921 depobj->path, vername, refobj->path); 4922 return (-1); 4923 } 4924 for (;;) { 4925 if (vd->vd_version != VER_DEF_CURRENT) { 4926 _rtld_error("%s: Unsupported version %d of Elf_Verdef entry", 4927 depobj->path, vd->vd_version); 4928 return (-1); 4929 } 4930 if (vna->vna_hash == vd->vd_hash) { 4931 const Elf_Verdaux *aux = (const Elf_Verdaux *) 4932 ((char *)vd + vd->vd_aux); 4933 if (strcmp(vername, depobj->strtab + aux->vda_name) == 0) 4934 return (0); 4935 } 4936 if (vd->vd_next == 0) 4937 break; 4938 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next); 4939 } 4940 if (vna->vna_flags & VER_FLG_WEAK) 4941 return (0); 4942 _rtld_error("%s: version %s required by %s not found", 4943 depobj->path, vername, refobj->path); 4944 return (-1); 4945 } 4946 4947 static int 4948 rtld_verify_object_versions(Obj_Entry *obj) 4949 { 4950 const Elf_Verneed *vn; 4951 const Elf_Verdef *vd; 4952 const Elf_Verdaux *vda; 4953 const Elf_Vernaux *vna; 4954 const Obj_Entry *depobj; 4955 int maxvernum, vernum; 4956 4957 if (obj->ver_checked) 4958 return (0); 4959 obj->ver_checked = true; 4960 4961 maxvernum = 0; 4962 /* 4963 * Walk over defined and required version records and figure out 4964 * max index used by any of them. Do very basic sanity checking 4965 * while there. 4966 */ 4967 vn = obj->verneed; 4968 while (vn != NULL) { 4969 if (vn->vn_version != VER_NEED_CURRENT) { 4970 _rtld_error("%s: Unsupported version %d of Elf_Verneed entry", 4971 obj->path, vn->vn_version); 4972 return (-1); 4973 } 4974 vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux); 4975 for (;;) { 4976 vernum = VER_NEED_IDX(vna->vna_other); 4977 if (vernum > maxvernum) 4978 maxvernum = vernum; 4979 if (vna->vna_next == 0) 4980 break; 4981 vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next); 4982 } 4983 if (vn->vn_next == 0) 4984 break; 4985 vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next); 4986 } 4987 4988 vd = obj->verdef; 4989 while (vd != NULL) { 4990 if (vd->vd_version != VER_DEF_CURRENT) { 4991 _rtld_error("%s: Unsupported version %d of Elf_Verdef entry", 4992 obj->path, vd->vd_version); 4993 return (-1); 4994 } 4995 vernum = VER_DEF_IDX(vd->vd_ndx); 4996 if (vernum > maxvernum) 4997 maxvernum = vernum; 4998 if (vd->vd_next == 0) 4999 break; 5000 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next); 5001 } 5002 5003 if (maxvernum == 0) 5004 return (0); 5005 5006 /* 5007 * Store version information in array indexable by version index. 5008 * Verify that object version requirements are satisfied along the 5009 * way. 5010 */ 5011 obj->vernum = maxvernum + 1; 5012 obj->vertab = xcalloc(obj->vernum, sizeof(Ver_Entry)); 5013 5014 vd = obj->verdef; 5015 while (vd != NULL) { 5016 if ((vd->vd_flags & VER_FLG_BASE) == 0) { 5017 vernum = VER_DEF_IDX(vd->vd_ndx); 5018 assert(vernum <= maxvernum); 5019 vda = (const Elf_Verdaux *)((char *)vd + vd->vd_aux); 5020 obj->vertab[vernum].hash = vd->vd_hash; 5021 obj->vertab[vernum].name = obj->strtab + vda->vda_name; 5022 obj->vertab[vernum].file = NULL; 5023 obj->vertab[vernum].flags = 0; 5024 } 5025 if (vd->vd_next == 0) 5026 break; 5027 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next); 5028 } 5029 5030 vn = obj->verneed; 5031 while (vn != NULL) { 5032 depobj = locate_dependency(obj, obj->strtab + vn->vn_file); 5033 if (depobj == NULL) 5034 return (-1); 5035 vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux); 5036 for (;;) { 5037 if (check_object_provided_version(obj, depobj, vna)) 5038 return (-1); 5039 vernum = VER_NEED_IDX(vna->vna_other); 5040 assert(vernum <= maxvernum); 5041 obj->vertab[vernum].hash = vna->vna_hash; 5042 obj->vertab[vernum].name = obj->strtab + vna->vna_name; 5043 obj->vertab[vernum].file = obj->strtab + vn->vn_file; 5044 obj->vertab[vernum].flags = (vna->vna_other & VER_NEED_HIDDEN) ? 5045 VER_INFO_HIDDEN : 0; 5046 if (vna->vna_next == 0) 5047 break; 5048 vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next); 5049 } 5050 if (vn->vn_next == 0) 5051 break; 5052 vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next); 5053 } 5054 return 0; 5055 } 5056 5057 static int 5058 rtld_verify_versions(const Objlist *objlist) 5059 { 5060 Objlist_Entry *entry; 5061 int rc; 5062 5063 rc = 0; 5064 STAILQ_FOREACH(entry, objlist, link) { 5065 /* 5066 * Skip dummy objects or objects that have their version requirements 5067 * already checked. 5068 */ 5069 if (entry->obj->strtab == NULL || entry->obj->vertab != NULL) 5070 continue; 5071 if (rtld_verify_object_versions(entry->obj) == -1) { 5072 rc = -1; 5073 if (ld_tracing == NULL) 5074 break; 5075 } 5076 } 5077 if (rc == 0 || ld_tracing != NULL) 5078 rc = rtld_verify_object_versions(&obj_rtld); 5079 return rc; 5080 } 5081 5082 const Ver_Entry * 5083 fetch_ventry(const Obj_Entry *obj, unsigned long symnum) 5084 { 5085 Elf_Versym vernum; 5086 5087 if (obj->vertab) { 5088 vernum = VER_NDX(obj->versyms[symnum]); 5089 if (vernum >= obj->vernum) { 5090 _rtld_error("%s: symbol %s has wrong verneed value %d", 5091 obj->path, obj->strtab + symnum, vernum); 5092 } else if (obj->vertab[vernum].hash != 0) { 5093 return &obj->vertab[vernum]; 5094 } 5095 } 5096 return NULL; 5097 } 5098 5099 int 5100 _rtld_get_stack_prot(void) 5101 { 5102 5103 return (stack_prot); 5104 } 5105 5106 int 5107 _rtld_is_dlopened(void *arg) 5108 { 5109 Obj_Entry *obj; 5110 RtldLockState lockstate; 5111 int res; 5112 5113 rlock_acquire(rtld_bind_lock, &lockstate); 5114 obj = dlcheck(arg); 5115 if (obj == NULL) 5116 obj = obj_from_addr(arg); 5117 if (obj == NULL) { 5118 _rtld_error("No shared object contains address"); 5119 lock_release(rtld_bind_lock, &lockstate); 5120 return (-1); 5121 } 5122 res = obj->dlopened ? 1 : 0; 5123 lock_release(rtld_bind_lock, &lockstate); 5124 return (res); 5125 } 5126 5127 int 5128 obj_enforce_relro(Obj_Entry *obj) 5129 { 5130 5131 if (obj->relro_size > 0 && mprotect(obj->relro_page, obj->relro_size, 5132 PROT_READ) == -1) { 5133 _rtld_error("%s: Cannot enforce relro protection: %s", 5134 obj->path, rtld_strerror(errno)); 5135 return (-1); 5136 } 5137 return (0); 5138 } 5139 5140 static void 5141 map_stacks_exec(RtldLockState *lockstate) 5142 { 5143 void (*thr_map_stacks_exec)(void); 5144 5145 if ((max_stack_flags & PF_X) == 0 || (stack_prot & PROT_EXEC) != 0) 5146 return; 5147 thr_map_stacks_exec = (void (*)(void))(uintptr_t) 5148 get_program_var_addr("__pthread_map_stacks_exec", lockstate); 5149 if (thr_map_stacks_exec != NULL) { 5150 stack_prot |= PROT_EXEC; 5151 thr_map_stacks_exec(); 5152 } 5153 } 5154 5155 void 5156 symlook_init(SymLook *dst, const char *name) 5157 { 5158 5159 bzero(dst, sizeof(*dst)); 5160 dst->name = name; 5161 dst->hash = elf_hash(name); 5162 dst->hash_gnu = gnu_hash(name); 5163 } 5164 5165 static void 5166 symlook_init_from_req(SymLook *dst, const SymLook *src) 5167 { 5168 5169 dst->name = src->name; 5170 dst->hash = src->hash; 5171 dst->hash_gnu = src->hash_gnu; 5172 dst->ventry = src->ventry; 5173 dst->flags = src->flags; 5174 dst->defobj_out = NULL; 5175 dst->sym_out = NULL; 5176 dst->lockstate = src->lockstate; 5177 } 5178 5179 5180 /* 5181 * Parse a file descriptor number without pulling in more of libc (e.g. atoi). 5182 */ 5183 static int 5184 parse_libdir(const char *str) 5185 { 5186 static const int RADIX = 10; /* XXXJA: possibly support hex? */ 5187 const char *orig; 5188 int fd; 5189 char c; 5190 5191 orig = str; 5192 fd = 0; 5193 for (c = *str; c != '\0'; c = *++str) { 5194 if (c < '0' || c > '9') 5195 return (-1); 5196 5197 fd *= RADIX; 5198 fd += c - '0'; 5199 } 5200 5201 /* Make sure we actually parsed something. */ 5202 if (str == orig) { 5203 _rtld_error("failed to parse directory FD from '%s'", str); 5204 return (-1); 5205 } 5206 return (fd); 5207 } 5208 5209 /* 5210 * Overrides for libc_pic-provided functions. 5211 */ 5212 5213 int 5214 __getosreldate(void) 5215 { 5216 size_t len; 5217 int oid[2]; 5218 int error, osrel; 5219 5220 if (osreldate != 0) 5221 return (osreldate); 5222 5223 oid[0] = CTL_KERN; 5224 oid[1] = KERN_OSRELDATE; 5225 osrel = 0; 5226 len = sizeof(osrel); 5227 error = sysctl(oid, 2, &osrel, &len, NULL, 0); 5228 if (error == 0 && osrel > 0 && len == sizeof(osrel)) 5229 osreldate = osrel; 5230 return (osreldate); 5231 } 5232 5233 void 5234 exit(int status) 5235 { 5236 5237 _exit(status); 5238 } 5239 5240 void (*__cleanup)(void); 5241 int __isthreaded = 0; 5242 int _thread_autoinit_dummy_decl = 1; 5243 5244 /* 5245 * No unresolved symbols for rtld. 5246 */ 5247 void 5248 __pthread_cxa_finalize(struct dl_phdr_info *a) 5249 { 5250 } 5251 5252 void 5253 __stack_chk_fail(void) 5254 { 5255 5256 _rtld_error("stack overflow detected; terminated"); 5257 rtld_die(); 5258 } 5259 __weak_reference(__stack_chk_fail, __stack_chk_fail_local); 5260 5261 void 5262 __chk_fail(void) 5263 { 5264 5265 _rtld_error("buffer overflow detected; terminated"); 5266 rtld_die(); 5267 } 5268 5269 const char * 5270 rtld_strerror(int errnum) 5271 { 5272 5273 if (errnum < 0 || errnum >= sys_nerr) 5274 return ("Unknown error"); 5275 return (sys_errlist[errnum]); 5276 } 5277