1 /*- 2 * Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra. 3 * Copyright 2003 Alexander Kabaev <kan@FreeBSD.ORG>. 4 * Copyright 2009-2012 Konstantin Belousov <kib@FreeBSD.ORG>. 5 * Copyright 2012 John Marino <draco@marino.st>. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 /* 32 * Dynamic linker for ELF. 33 * 34 * John Polstra <jdp@polstra.com>. 35 */ 36 37 #include <sys/param.h> 38 #include <sys/mount.h> 39 #include <sys/mman.h> 40 #include <sys/stat.h> 41 #include <sys/sysctl.h> 42 #include <sys/uio.h> 43 #include <sys/utsname.h> 44 #include <sys/ktrace.h> 45 46 #include <dlfcn.h> 47 #include <err.h> 48 #include <errno.h> 49 #include <fcntl.h> 50 #include <stdarg.h> 51 #include <stdio.h> 52 #include <stdlib.h> 53 #include <string.h> 54 #include <unistd.h> 55 56 #include "debug.h" 57 #include "rtld.h" 58 #include "libmap.h" 59 #include "paths.h" 60 #include "rtld_tls.h" 61 #include "rtld_printf.h" 62 #include "rtld_utrace.h" 63 #include "notes.h" 64 65 /* Types. */ 66 typedef void (*func_ptr_type)(); 67 typedef void * (*path_enum_proc) (const char *path, size_t len, void *arg); 68 69 /* 70 * Function declarations. 71 */ 72 static const char *basename(const char *); 73 static void digest_dynamic1(Obj_Entry *, int, const Elf_Dyn **, 74 const Elf_Dyn **, const Elf_Dyn **); 75 static void digest_dynamic2(Obj_Entry *, const Elf_Dyn *, const Elf_Dyn *, 76 const Elf_Dyn *); 77 static void digest_dynamic(Obj_Entry *, int); 78 static Obj_Entry *digest_phdr(const Elf_Phdr *, int, caddr_t, const char *); 79 static Obj_Entry *dlcheck(void *); 80 static int dlclose_locked(void *, RtldLockState *); 81 static Obj_Entry *dlopen_object(const char *name, int fd, Obj_Entry *refobj, 82 int lo_flags, int mode, RtldLockState *lockstate); 83 static Obj_Entry *do_load_object(int, const char *, char *, struct stat *, int); 84 static int do_search_info(const Obj_Entry *obj, int, struct dl_serinfo *); 85 static bool donelist_check(DoneList *, const Obj_Entry *); 86 static void errmsg_restore(char *); 87 static char *errmsg_save(void); 88 static void *fill_search_info(const char *, size_t, void *); 89 static char *find_library(const char *, const Obj_Entry *, int *); 90 static const char *gethints(bool); 91 static void hold_object(Obj_Entry *); 92 static void unhold_object(Obj_Entry *); 93 static void init_dag(Obj_Entry *); 94 static void init_marker(Obj_Entry *); 95 static void init_pagesizes(Elf_Auxinfo **aux_info); 96 static void init_rtld(caddr_t, Elf_Auxinfo **); 97 static void initlist_add_neededs(Needed_Entry *, Objlist *); 98 static void initlist_add_objects(Obj_Entry *, Obj_Entry *, Objlist *); 99 static void linkmap_add(Obj_Entry *); 100 static void linkmap_delete(Obj_Entry *); 101 static void load_filtees(Obj_Entry *, int flags, RtldLockState *); 102 static void unload_filtees(Obj_Entry *, RtldLockState *); 103 static int load_needed_objects(Obj_Entry *, int); 104 static int load_preload_objects(void); 105 static Obj_Entry *load_object(const char *, int fd, const Obj_Entry *, int); 106 static void map_stacks_exec(RtldLockState *); 107 static int obj_enforce_relro(Obj_Entry *); 108 static Obj_Entry *obj_from_addr(const void *); 109 static void objlist_call_fini(Objlist *, Obj_Entry *, RtldLockState *); 110 static void objlist_call_init(Objlist *, RtldLockState *); 111 static void objlist_clear(Objlist *); 112 static Objlist_Entry *objlist_find(Objlist *, const Obj_Entry *); 113 static void objlist_init(Objlist *); 114 static void objlist_push_head(Objlist *, Obj_Entry *); 115 static void objlist_push_tail(Objlist *, Obj_Entry *); 116 static void objlist_put_after(Objlist *, Obj_Entry *, Obj_Entry *); 117 static void objlist_remove(Objlist *, Obj_Entry *); 118 static int parse_libdir(const char *); 119 static void *path_enumerate(const char *, path_enum_proc, void *); 120 static void release_object(Obj_Entry *); 121 static int relocate_object_dag(Obj_Entry *root, bool bind_now, 122 Obj_Entry *rtldobj, int flags, RtldLockState *lockstate); 123 static int relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj, 124 int flags, RtldLockState *lockstate); 125 static int relocate_objects(Obj_Entry *, bool, Obj_Entry *, int, 126 RtldLockState *); 127 static int resolve_objects_ifunc(Obj_Entry *first, bool bind_now, 128 int flags, RtldLockState *lockstate); 129 static int rtld_dirname(const char *, char *); 130 static int rtld_dirname_abs(const char *, char *); 131 static void *rtld_dlopen(const char *name, int fd, int mode); 132 static void rtld_exit(void); 133 static char *search_library_path(const char *, const char *); 134 static char *search_library_pathfds(const char *, const char *, int *); 135 static const void **get_program_var_addr(const char *, RtldLockState *); 136 static void set_program_var(const char *, const void *); 137 static int symlook_default(SymLook *, const Obj_Entry *refobj); 138 static int symlook_global(SymLook *, DoneList *); 139 static void symlook_init_from_req(SymLook *, const SymLook *); 140 static int symlook_list(SymLook *, const Objlist *, DoneList *); 141 static int symlook_needed(SymLook *, const Needed_Entry *, DoneList *); 142 static int symlook_obj1_sysv(SymLook *, const Obj_Entry *); 143 static int symlook_obj1_gnu(SymLook *, const Obj_Entry *); 144 static void trace_loaded_objects(Obj_Entry *); 145 static void unlink_object(Obj_Entry *); 146 static void unload_object(Obj_Entry *, RtldLockState *lockstate); 147 static void unref_dag(Obj_Entry *); 148 static void ref_dag(Obj_Entry *); 149 static char *origin_subst_one(Obj_Entry *, char *, const char *, 150 const char *, bool); 151 static char *origin_subst(Obj_Entry *, char *); 152 static bool obj_resolve_origin(Obj_Entry *obj); 153 static void preinit_main(void); 154 static int rtld_verify_versions(const Objlist *); 155 static int rtld_verify_object_versions(Obj_Entry *); 156 static void object_add_name(Obj_Entry *, const char *); 157 static int object_match_name(const Obj_Entry *, const char *); 158 static void ld_utrace_log(int, void *, void *, size_t, int, const char *); 159 static void rtld_fill_dl_phdr_info(const Obj_Entry *obj, 160 struct dl_phdr_info *phdr_info); 161 static uint32_t gnu_hash(const char *); 162 static bool matched_symbol(SymLook *, const Obj_Entry *, Sym_Match_Result *, 163 const unsigned long); 164 165 void r_debug_state(struct r_debug *, struct link_map *) __noinline __exported; 166 void _r_debug_postinit(struct link_map *) __noinline __exported; 167 168 int __sys_openat(int, const char *, int, ...); 169 170 /* 171 * Data declarations. 172 */ 173 static char *error_message; /* Message for dlerror(), or NULL */ 174 struct r_debug r_debug __exported; /* for GDB; */ 175 static bool libmap_disable; /* Disable libmap */ 176 static bool ld_loadfltr; /* Immediate filters processing */ 177 static char *libmap_override; /* Maps to use in addition to libmap.conf */ 178 static bool trust; /* False for setuid and setgid programs */ 179 static bool dangerous_ld_env; /* True if environment variables have been 180 used to affect the libraries loaded */ 181 bool ld_bind_not; /* Disable PLT update */ 182 static char *ld_bind_now; /* Environment variable for immediate binding */ 183 static char *ld_debug; /* Environment variable for debugging */ 184 static char *ld_library_path; /* Environment variable for search path */ 185 static char *ld_library_dirs; /* Environment variable for library descriptors */ 186 static char *ld_preload; /* Environment variable for libraries to 187 load first */ 188 static char *ld_elf_hints_path; /* Environment variable for alternative hints path */ 189 static char *ld_tracing; /* Called from ldd to print libs */ 190 static char *ld_utrace; /* Use utrace() to log events. */ 191 static struct obj_entry_q obj_list; /* Queue of all loaded objects */ 192 static Obj_Entry *obj_main; /* The main program shared object */ 193 static Obj_Entry obj_rtld; /* The dynamic linker shared object */ 194 static unsigned int obj_count; /* Number of objects in obj_list */ 195 static unsigned int obj_loads; /* Number of loads of objects (gen count) */ 196 197 static Objlist list_global = /* Objects dlopened with RTLD_GLOBAL */ 198 STAILQ_HEAD_INITIALIZER(list_global); 199 static Objlist list_main = /* Objects loaded at program startup */ 200 STAILQ_HEAD_INITIALIZER(list_main); 201 static Objlist list_fini = /* Objects needing fini() calls */ 202 STAILQ_HEAD_INITIALIZER(list_fini); 203 204 Elf_Sym sym_zero; /* For resolving undefined weak refs. */ 205 206 #define GDB_STATE(s,m) r_debug.r_state = s; r_debug_state(&r_debug,m); 207 208 extern Elf_Dyn _DYNAMIC; 209 #pragma weak _DYNAMIC 210 211 int dlclose(void *) __exported; 212 char *dlerror(void) __exported; 213 void *dlopen(const char *, int) __exported; 214 void *fdlopen(int, int) __exported; 215 void *dlsym(void *, const char *) __exported; 216 dlfunc_t dlfunc(void *, const char *) __exported; 217 void *dlvsym(void *, const char *, const char *) __exported; 218 int dladdr(const void *, Dl_info *) __exported; 219 void dllockinit(void *, void *(*)(void *), void (*)(void *), void (*)(void *), 220 void (*)(void *), void (*)(void *), void (*)(void *)) __exported; 221 int dlinfo(void *, int , void *) __exported; 222 int dl_iterate_phdr(__dl_iterate_hdr_callback, void *) __exported; 223 int _rtld_addr_phdr(const void *, struct dl_phdr_info *) __exported; 224 int _rtld_get_stack_prot(void) __exported; 225 int _rtld_is_dlopened(void *) __exported; 226 void _rtld_error(const char *, ...) __exported; 227 228 int npagesizes, osreldate; 229 size_t *pagesizes; 230 231 long __stack_chk_guard[8] = {0, 0, 0, 0, 0, 0, 0, 0}; 232 233 static int stack_prot = PROT_READ | PROT_WRITE | RTLD_DEFAULT_STACK_EXEC; 234 static int max_stack_flags; 235 236 /* 237 * Global declarations normally provided by crt1. The dynamic linker is 238 * not built with crt1, so we have to provide them ourselves. 239 */ 240 char *__progname; 241 char **environ; 242 243 /* 244 * Used to pass argc, argv to init functions. 245 */ 246 int main_argc; 247 char **main_argv; 248 249 /* 250 * Globals to control TLS allocation. 251 */ 252 size_t tls_last_offset; /* Static TLS offset of last module */ 253 size_t tls_last_size; /* Static TLS size of last module */ 254 size_t tls_static_space; /* Static TLS space allocated */ 255 size_t tls_static_max_align; 256 int tls_dtv_generation = 1; /* Used to detect when dtv size changes */ 257 int tls_max_index = 1; /* Largest module index allocated */ 258 259 bool ld_library_path_rpath = false; 260 261 /* 262 * Globals for path names, and such 263 */ 264 char *ld_elf_hints_default = _PATH_ELF_HINTS; 265 char *ld_path_libmap_conf = _PATH_LIBMAP_CONF; 266 char *ld_path_rtld = _PATH_RTLD; 267 char *ld_standard_library_path = STANDARD_LIBRARY_PATH; 268 char *ld_env_prefix = LD_; 269 270 /* 271 * Fill in a DoneList with an allocation large enough to hold all of 272 * the currently-loaded objects. Keep this as a macro since it calls 273 * alloca and we want that to occur within the scope of the caller. 274 */ 275 #define donelist_init(dlp) \ 276 ((dlp)->objs = alloca(obj_count * sizeof (dlp)->objs[0]), \ 277 assert((dlp)->objs != NULL), \ 278 (dlp)->num_alloc = obj_count, \ 279 (dlp)->num_used = 0) 280 281 #define LD_UTRACE(e, h, mb, ms, r, n) do { \ 282 if (ld_utrace != NULL) \ 283 ld_utrace_log(e, h, mb, ms, r, n); \ 284 } while (0) 285 286 static void 287 ld_utrace_log(int event, void *handle, void *mapbase, size_t mapsize, 288 int refcnt, const char *name) 289 { 290 struct utrace_rtld ut; 291 static const char rtld_utrace_sig[RTLD_UTRACE_SIG_SZ] = RTLD_UTRACE_SIG; 292 293 memcpy(ut.sig, rtld_utrace_sig, sizeof(ut.sig)); 294 ut.event = event; 295 ut.handle = handle; 296 ut.mapbase = mapbase; 297 ut.mapsize = mapsize; 298 ut.refcnt = refcnt; 299 bzero(ut.name, sizeof(ut.name)); 300 if (name) 301 strlcpy(ut.name, name, sizeof(ut.name)); 302 utrace(&ut, sizeof(ut)); 303 } 304 305 #ifdef RTLD_VARIANT_ENV_NAMES 306 /* 307 * construct the env variable based on the type of binary that's 308 * running. 309 */ 310 static inline const char * 311 _LD(const char *var) 312 { 313 static char buffer[128]; 314 315 strlcpy(buffer, ld_env_prefix, sizeof(buffer)); 316 strlcat(buffer, var, sizeof(buffer)); 317 return (buffer); 318 } 319 #else 320 #define _LD(x) LD_ x 321 #endif 322 323 /* 324 * Main entry point for dynamic linking. The first argument is the 325 * stack pointer. The stack is expected to be laid out as described 326 * in the SVR4 ABI specification, Intel 386 Processor Supplement. 327 * Specifically, the stack pointer points to a word containing 328 * ARGC. Following that in the stack is a null-terminated sequence 329 * of pointers to argument strings. Then comes a null-terminated 330 * sequence of pointers to environment strings. Finally, there is a 331 * sequence of "auxiliary vector" entries. 332 * 333 * The second argument points to a place to store the dynamic linker's 334 * exit procedure pointer and the third to a place to store the main 335 * program's object. 336 * 337 * The return value is the main program's entry point. 338 */ 339 func_ptr_type 340 _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp) 341 { 342 Elf_Auxinfo *aux_info[AT_COUNT]; 343 int i; 344 int argc; 345 char **argv; 346 char **env; 347 Elf_Auxinfo *aux; 348 Elf_Auxinfo *auxp; 349 const char *argv0; 350 Objlist_Entry *entry; 351 Obj_Entry *obj; 352 Obj_Entry *preload_tail; 353 Obj_Entry *last_interposer; 354 Objlist initlist; 355 RtldLockState lockstate; 356 char *library_path_rpath; 357 int mib[2]; 358 size_t len; 359 360 /* 361 * On entry, the dynamic linker itself has not been relocated yet. 362 * Be very careful not to reference any global data until after 363 * init_rtld has returned. It is OK to reference file-scope statics 364 * and string constants, and to call static and global functions. 365 */ 366 367 /* Find the auxiliary vector on the stack. */ 368 argc = *sp++; 369 argv = (char **) sp; 370 sp += argc + 1; /* Skip over arguments and NULL terminator */ 371 env = (char **) sp; 372 while (*sp++ != 0) /* Skip over environment, and NULL terminator */ 373 ; 374 aux = (Elf_Auxinfo *) sp; 375 376 /* Digest the auxiliary vector. */ 377 for (i = 0; i < AT_COUNT; i++) 378 aux_info[i] = NULL; 379 for (auxp = aux; auxp->a_type != AT_NULL; auxp++) { 380 if (auxp->a_type < AT_COUNT) 381 aux_info[auxp->a_type] = auxp; 382 } 383 384 /* Initialize and relocate ourselves. */ 385 assert(aux_info[AT_BASE] != NULL); 386 init_rtld((caddr_t) aux_info[AT_BASE]->a_un.a_ptr, aux_info); 387 388 __progname = obj_rtld.path; 389 argv0 = argv[0] != NULL ? argv[0] : "(null)"; 390 environ = env; 391 main_argc = argc; 392 main_argv = argv; 393 394 if (aux_info[AT_CANARY] != NULL && 395 aux_info[AT_CANARY]->a_un.a_ptr != NULL) { 396 i = aux_info[AT_CANARYLEN]->a_un.a_val; 397 if (i > sizeof(__stack_chk_guard)) 398 i = sizeof(__stack_chk_guard); 399 memcpy(__stack_chk_guard, aux_info[AT_CANARY]->a_un.a_ptr, i); 400 } else { 401 mib[0] = CTL_KERN; 402 mib[1] = KERN_ARND; 403 404 len = sizeof(__stack_chk_guard); 405 if (sysctl(mib, 2, __stack_chk_guard, &len, NULL, 0) == -1 || 406 len != sizeof(__stack_chk_guard)) { 407 /* If sysctl was unsuccessful, use the "terminator canary". */ 408 ((unsigned char *)(void *)__stack_chk_guard)[0] = 0; 409 ((unsigned char *)(void *)__stack_chk_guard)[1] = 0; 410 ((unsigned char *)(void *)__stack_chk_guard)[2] = '\n'; 411 ((unsigned char *)(void *)__stack_chk_guard)[3] = 255; 412 } 413 } 414 415 trust = !issetugid(); 416 417 md_abi_variant_hook(aux_info); 418 419 ld_bind_now = getenv(_LD("BIND_NOW")); 420 421 /* 422 * If the process is tainted, then we un-set the dangerous environment 423 * variables. The process will be marked as tainted until setuid(2) 424 * is called. If any child process calls setuid(2) we do not want any 425 * future processes to honor the potentially un-safe variables. 426 */ 427 if (!trust) { 428 if (unsetenv(_LD("PRELOAD")) || unsetenv(_LD("LIBMAP")) || 429 unsetenv(_LD("LIBRARY_PATH")) || unsetenv(_LD("LIBRARY_PATH_FDS")) || 430 unsetenv(_LD("LIBMAP_DISABLE")) || unsetenv(_LD("BIND_NOT")) || 431 unsetenv(_LD("DEBUG")) || unsetenv(_LD("ELF_HINTS_PATH")) || 432 unsetenv(_LD("LOADFLTR")) || unsetenv(_LD("LIBRARY_PATH_RPATH"))) { 433 _rtld_error("environment corrupt; aborting"); 434 rtld_die(); 435 } 436 } 437 ld_debug = getenv(_LD("DEBUG")); 438 if (ld_bind_now == NULL) 439 ld_bind_not = getenv(_LD("BIND_NOT")) != NULL; 440 libmap_disable = getenv(_LD("LIBMAP_DISABLE")) != NULL; 441 libmap_override = getenv(_LD("LIBMAP")); 442 ld_library_path = getenv(_LD("LIBRARY_PATH")); 443 ld_library_dirs = getenv(_LD("LIBRARY_PATH_FDS")); 444 ld_preload = getenv(_LD("PRELOAD")); 445 ld_elf_hints_path = getenv(_LD("ELF_HINTS_PATH")); 446 ld_loadfltr = getenv(_LD("LOADFLTR")) != NULL; 447 library_path_rpath = getenv(_LD("LIBRARY_PATH_RPATH")); 448 if (library_path_rpath != NULL) { 449 if (library_path_rpath[0] == 'y' || 450 library_path_rpath[0] == 'Y' || 451 library_path_rpath[0] == '1') 452 ld_library_path_rpath = true; 453 else 454 ld_library_path_rpath = false; 455 } 456 dangerous_ld_env = libmap_disable || (libmap_override != NULL) || 457 (ld_library_path != NULL) || (ld_preload != NULL) || 458 (ld_elf_hints_path != NULL) || ld_loadfltr; 459 ld_tracing = getenv(_LD("TRACE_LOADED_OBJECTS")); 460 ld_utrace = getenv(_LD("UTRACE")); 461 462 if ((ld_elf_hints_path == NULL) || strlen(ld_elf_hints_path) == 0) 463 ld_elf_hints_path = ld_elf_hints_default; 464 465 if (ld_debug != NULL && *ld_debug != '\0') 466 debug = 1; 467 dbg("%s is initialized, base address = %p", __progname, 468 (caddr_t) aux_info[AT_BASE]->a_un.a_ptr); 469 dbg("RTLD dynamic = %p", obj_rtld.dynamic); 470 dbg("RTLD pltgot = %p", obj_rtld.pltgot); 471 472 dbg("initializing thread locks"); 473 lockdflt_init(); 474 475 /* 476 * Load the main program, or process its program header if it is 477 * already loaded. 478 */ 479 if (aux_info[AT_EXECFD] != NULL) { /* Load the main program. */ 480 int fd = aux_info[AT_EXECFD]->a_un.a_val; 481 dbg("loading main program"); 482 obj_main = map_object(fd, argv0, NULL); 483 close(fd); 484 if (obj_main == NULL) 485 rtld_die(); 486 max_stack_flags = obj->stack_flags; 487 } else { /* Main program already loaded. */ 488 const Elf_Phdr *phdr; 489 int phnum; 490 caddr_t entry; 491 492 dbg("processing main program's program header"); 493 assert(aux_info[AT_PHDR] != NULL); 494 phdr = (const Elf_Phdr *) aux_info[AT_PHDR]->a_un.a_ptr; 495 assert(aux_info[AT_PHNUM] != NULL); 496 phnum = aux_info[AT_PHNUM]->a_un.a_val; 497 assert(aux_info[AT_PHENT] != NULL); 498 assert(aux_info[AT_PHENT]->a_un.a_val == sizeof(Elf_Phdr)); 499 assert(aux_info[AT_ENTRY] != NULL); 500 entry = (caddr_t) aux_info[AT_ENTRY]->a_un.a_ptr; 501 if ((obj_main = digest_phdr(phdr, phnum, entry, argv0)) == NULL) 502 rtld_die(); 503 } 504 505 if (aux_info[AT_EXECPATH] != NULL) { 506 char *kexecpath; 507 char buf[MAXPATHLEN]; 508 509 kexecpath = aux_info[AT_EXECPATH]->a_un.a_ptr; 510 dbg("AT_EXECPATH %p %s", kexecpath, kexecpath); 511 if (kexecpath[0] == '/') 512 obj_main->path = kexecpath; 513 else if (getcwd(buf, sizeof(buf)) == NULL || 514 strlcat(buf, "/", sizeof(buf)) >= sizeof(buf) || 515 strlcat(buf, kexecpath, sizeof(buf)) >= sizeof(buf)) 516 obj_main->path = xstrdup(argv0); 517 else 518 obj_main->path = xstrdup(buf); 519 } else { 520 dbg("No AT_EXECPATH"); 521 obj_main->path = xstrdup(argv0); 522 } 523 dbg("obj_main path %s", obj_main->path); 524 obj_main->mainprog = true; 525 526 if (aux_info[AT_STACKPROT] != NULL && 527 aux_info[AT_STACKPROT]->a_un.a_val != 0) 528 stack_prot = aux_info[AT_STACKPROT]->a_un.a_val; 529 530 #ifndef COMPAT_32BIT 531 /* 532 * Get the actual dynamic linker pathname from the executable if 533 * possible. (It should always be possible.) That ensures that 534 * gdb will find the right dynamic linker even if a non-standard 535 * one is being used. 536 */ 537 if (obj_main->interp != NULL && 538 strcmp(obj_main->interp, obj_rtld.path) != 0) { 539 free(obj_rtld.path); 540 obj_rtld.path = xstrdup(obj_main->interp); 541 __progname = obj_rtld.path; 542 } 543 #endif 544 545 digest_dynamic(obj_main, 0); 546 dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d", 547 obj_main->path, obj_main->valid_hash_sysv, obj_main->valid_hash_gnu, 548 obj_main->dynsymcount); 549 550 linkmap_add(obj_main); 551 linkmap_add(&obj_rtld); 552 553 /* Link the main program into the list of objects. */ 554 TAILQ_INSERT_HEAD(&obj_list, obj_main, next); 555 obj_count++; 556 obj_loads++; 557 558 /* Initialize a fake symbol for resolving undefined weak references. */ 559 sym_zero.st_info = ELF_ST_INFO(STB_GLOBAL, STT_NOTYPE); 560 sym_zero.st_shndx = SHN_UNDEF; 561 sym_zero.st_value = -(uintptr_t)obj_main->relocbase; 562 563 if (!libmap_disable) 564 libmap_disable = (bool)lm_init(libmap_override); 565 566 dbg("loading LD_PRELOAD libraries"); 567 if (load_preload_objects() == -1) 568 rtld_die(); 569 preload_tail = globallist_curr(TAILQ_LAST(&obj_list, obj_entry_q)); 570 571 dbg("loading needed objects"); 572 if (load_needed_objects(obj_main, 0) == -1) 573 rtld_die(); 574 575 /* Make a list of all objects loaded at startup. */ 576 last_interposer = obj_main; 577 TAILQ_FOREACH(obj, &obj_list, next) { 578 if (obj->marker) 579 continue; 580 if (obj->z_interpose && obj != obj_main) { 581 objlist_put_after(&list_main, last_interposer, obj); 582 last_interposer = obj; 583 } else { 584 objlist_push_tail(&list_main, obj); 585 } 586 obj->refcount++; 587 } 588 589 dbg("checking for required versions"); 590 if (rtld_verify_versions(&list_main) == -1 && !ld_tracing) 591 rtld_die(); 592 593 if (ld_tracing) { /* We're done */ 594 trace_loaded_objects(obj_main); 595 exit(0); 596 } 597 598 if (getenv(_LD("DUMP_REL_PRE")) != NULL) { 599 dump_relocations(obj_main); 600 exit (0); 601 } 602 603 /* 604 * Processing tls relocations requires having the tls offsets 605 * initialized. Prepare offsets before starting initial 606 * relocation processing. 607 */ 608 dbg("initializing initial thread local storage offsets"); 609 STAILQ_FOREACH(entry, &list_main, link) { 610 /* 611 * Allocate all the initial objects out of the static TLS 612 * block even if they didn't ask for it. 613 */ 614 allocate_tls_offset(entry->obj); 615 } 616 617 if (relocate_objects(obj_main, 618 ld_bind_now != NULL && *ld_bind_now != '\0', 619 &obj_rtld, SYMLOOK_EARLY, NULL) == -1) 620 rtld_die(); 621 622 dbg("doing copy relocations"); 623 if (do_copy_relocations(obj_main) == -1) 624 rtld_die(); 625 626 dbg("enforcing main obj relro"); 627 if (obj_enforce_relro(obj_main) == -1) 628 rtld_die(); 629 630 if (getenv(_LD("DUMP_REL_POST")) != NULL) { 631 dump_relocations(obj_main); 632 exit (0); 633 } 634 635 /* 636 * Setup TLS for main thread. This must be done after the 637 * relocations are processed, since tls initialization section 638 * might be the subject for relocations. 639 */ 640 dbg("initializing initial thread local storage"); 641 allocate_initial_tls(globallist_curr(TAILQ_FIRST(&obj_list))); 642 643 dbg("initializing key program variables"); 644 set_program_var("__progname", argv[0] != NULL ? basename(argv[0]) : ""); 645 set_program_var("environ", env); 646 set_program_var("__elf_aux_vector", aux); 647 648 /* Make a list of init functions to call. */ 649 objlist_init(&initlist); 650 initlist_add_objects(globallist_curr(TAILQ_FIRST(&obj_list)), 651 preload_tail, &initlist); 652 653 r_debug_state(NULL, &obj_main->linkmap); /* say hello to gdb! */ 654 655 map_stacks_exec(NULL); 656 ifunc_init(aux); 657 658 dbg("resolving ifuncs"); 659 if (resolve_objects_ifunc(obj_main, 660 ld_bind_now != NULL && *ld_bind_now != '\0', SYMLOOK_EARLY, 661 NULL) == -1) 662 rtld_die(); 663 664 if (!obj_main->crt_no_init) { 665 /* 666 * Make sure we don't call the main program's init and fini 667 * functions for binaries linked with old crt1 which calls 668 * _init itself. 669 */ 670 obj_main->init = obj_main->fini = (Elf_Addr)NULL; 671 obj_main->preinit_array = obj_main->init_array = 672 obj_main->fini_array = (Elf_Addr)NULL; 673 } 674 675 wlock_acquire(rtld_bind_lock, &lockstate); 676 if (obj_main->crt_no_init) 677 preinit_main(); 678 objlist_call_init(&initlist, &lockstate); 679 _r_debug_postinit(&obj_main->linkmap); 680 objlist_clear(&initlist); 681 dbg("loading filtees"); 682 TAILQ_FOREACH(obj, &obj_list, next) { 683 if (obj->marker) 684 continue; 685 if (ld_loadfltr || obj->z_loadfltr) 686 load_filtees(obj, 0, &lockstate); 687 } 688 lock_release(rtld_bind_lock, &lockstate); 689 690 dbg("transferring control to program entry point = %p", obj_main->entry); 691 692 /* Return the exit procedure and the program entry point. */ 693 *exit_proc = rtld_exit; 694 *objp = obj_main; 695 return (func_ptr_type) obj_main->entry; 696 } 697 698 void * 699 rtld_resolve_ifunc(const Obj_Entry *obj, const Elf_Sym *def) 700 { 701 void *ptr; 702 Elf_Addr target; 703 704 ptr = (void *)make_function_pointer(def, obj); 705 target = call_ifunc_resolver(ptr); 706 return ((void *)target); 707 } 708 709 /* 710 * NB: MIPS uses a private version of this function (_mips_rtld_bind). 711 * Changes to this function should be applied there as well. 712 */ 713 Elf_Addr 714 _rtld_bind(Obj_Entry *obj, Elf_Size reloff) 715 { 716 const Elf_Rel *rel; 717 const Elf_Sym *def; 718 const Obj_Entry *defobj; 719 Elf_Addr *where; 720 Elf_Addr target; 721 RtldLockState lockstate; 722 723 rlock_acquire(rtld_bind_lock, &lockstate); 724 if (sigsetjmp(lockstate.env, 0) != 0) 725 lock_upgrade(rtld_bind_lock, &lockstate); 726 if (obj->pltrel) 727 rel = (const Elf_Rel *) ((caddr_t) obj->pltrel + reloff); 728 else 729 rel = (const Elf_Rel *) ((caddr_t) obj->pltrela + reloff); 730 731 where = (Elf_Addr *) (obj->relocbase + rel->r_offset); 732 def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, SYMLOOK_IN_PLT, 733 NULL, &lockstate); 734 if (def == NULL) 735 rtld_die(); 736 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) 737 target = (Elf_Addr)rtld_resolve_ifunc(defobj, def); 738 else 739 target = (Elf_Addr)(defobj->relocbase + def->st_value); 740 741 dbg("\"%s\" in \"%s\" ==> %p in \"%s\"", 742 defobj->strtab + def->st_name, basename(obj->path), 743 (void *)target, basename(defobj->path)); 744 745 /* 746 * Write the new contents for the jmpslot. Note that depending on 747 * architecture, the value which we need to return back to the 748 * lazy binding trampoline may or may not be the target 749 * address. The value returned from reloc_jmpslot() is the value 750 * that the trampoline needs. 751 */ 752 target = reloc_jmpslot(where, target, defobj, obj, rel); 753 lock_release(rtld_bind_lock, &lockstate); 754 return target; 755 } 756 757 /* 758 * Error reporting function. Use it like printf. If formats the message 759 * into a buffer, and sets things up so that the next call to dlerror() 760 * will return the message. 761 */ 762 void 763 _rtld_error(const char *fmt, ...) 764 { 765 static char buf[512]; 766 va_list ap; 767 768 va_start(ap, fmt); 769 rtld_vsnprintf(buf, sizeof buf, fmt, ap); 770 error_message = buf; 771 va_end(ap); 772 LD_UTRACE(UTRACE_RTLD_ERROR, NULL, NULL, 0, 0, error_message); 773 } 774 775 /* 776 * Return a dynamically-allocated copy of the current error message, if any. 777 */ 778 static char * 779 errmsg_save(void) 780 { 781 return error_message == NULL ? NULL : xstrdup(error_message); 782 } 783 784 /* 785 * Restore the current error message from a copy which was previously saved 786 * by errmsg_save(). The copy is freed. 787 */ 788 static void 789 errmsg_restore(char *saved_msg) 790 { 791 if (saved_msg == NULL) 792 error_message = NULL; 793 else { 794 _rtld_error("%s", saved_msg); 795 free(saved_msg); 796 } 797 } 798 799 static const char * 800 basename(const char *name) 801 { 802 const char *p = strrchr(name, '/'); 803 return p != NULL ? p + 1 : name; 804 } 805 806 static struct utsname uts; 807 808 static char * 809 origin_subst_one(Obj_Entry *obj, char *real, const char *kw, 810 const char *subst, bool may_free) 811 { 812 char *p, *p1, *res, *resp; 813 int subst_len, kw_len, subst_count, old_len, new_len; 814 815 kw_len = strlen(kw); 816 817 /* 818 * First, count the number of the keyword occurrences, to 819 * preallocate the final string. 820 */ 821 for (p = real, subst_count = 0;; p = p1 + kw_len, subst_count++) { 822 p1 = strstr(p, kw); 823 if (p1 == NULL) 824 break; 825 } 826 827 /* 828 * If the keyword is not found, just return. 829 * 830 * Return non-substituted string if resolution failed. We 831 * cannot do anything more reasonable, the failure mode of the 832 * caller is unresolved library anyway. 833 */ 834 if (subst_count == 0 || (obj != NULL && !obj_resolve_origin(obj))) 835 return (may_free ? real : xstrdup(real)); 836 if (obj != NULL) 837 subst = obj->origin_path; 838 839 /* 840 * There is indeed something to substitute. Calculate the 841 * length of the resulting string, and allocate it. 842 */ 843 subst_len = strlen(subst); 844 old_len = strlen(real); 845 new_len = old_len + (subst_len - kw_len) * subst_count; 846 res = xmalloc(new_len + 1); 847 848 /* 849 * Now, execute the substitution loop. 850 */ 851 for (p = real, resp = res, *resp = '\0';;) { 852 p1 = strstr(p, kw); 853 if (p1 != NULL) { 854 /* Copy the prefix before keyword. */ 855 memcpy(resp, p, p1 - p); 856 resp += p1 - p; 857 /* Keyword replacement. */ 858 memcpy(resp, subst, subst_len); 859 resp += subst_len; 860 *resp = '\0'; 861 p = p1 + kw_len; 862 } else 863 break; 864 } 865 866 /* Copy to the end of string and finish. */ 867 strcat(resp, p); 868 if (may_free) 869 free(real); 870 return (res); 871 } 872 873 static char * 874 origin_subst(Obj_Entry *obj, char *real) 875 { 876 char *res1, *res2, *res3, *res4; 877 878 if (obj == NULL || !trust) 879 return (xstrdup(real)); 880 if (uts.sysname[0] == '\0') { 881 if (uname(&uts) != 0) { 882 _rtld_error("utsname failed: %d", errno); 883 return (NULL); 884 } 885 } 886 res1 = origin_subst_one(obj, real, "$ORIGIN", NULL, false); 887 res2 = origin_subst_one(NULL, res1, "$OSNAME", uts.sysname, true); 888 res3 = origin_subst_one(NULL, res2, "$OSREL", uts.release, true); 889 res4 = origin_subst_one(NULL, res3, "$PLATFORM", uts.machine, true); 890 return (res4); 891 } 892 893 void 894 rtld_die(void) 895 { 896 const char *msg = dlerror(); 897 898 if (msg == NULL) 899 msg = "Fatal error"; 900 rtld_fdputstr(STDERR_FILENO, msg); 901 rtld_fdputchar(STDERR_FILENO, '\n'); 902 _exit(1); 903 } 904 905 /* 906 * Process a shared object's DYNAMIC section, and save the important 907 * information in its Obj_Entry structure. 908 */ 909 static void 910 digest_dynamic1(Obj_Entry *obj, int early, const Elf_Dyn **dyn_rpath, 911 const Elf_Dyn **dyn_soname, const Elf_Dyn **dyn_runpath) 912 { 913 const Elf_Dyn *dynp; 914 Needed_Entry **needed_tail = &obj->needed; 915 Needed_Entry **needed_filtees_tail = &obj->needed_filtees; 916 Needed_Entry **needed_aux_filtees_tail = &obj->needed_aux_filtees; 917 const Elf_Hashelt *hashtab; 918 const Elf32_Word *hashval; 919 Elf32_Word bkt, nmaskwords; 920 int bloom_size32; 921 int plttype = DT_REL; 922 923 *dyn_rpath = NULL; 924 *dyn_soname = NULL; 925 *dyn_runpath = NULL; 926 927 obj->bind_now = false; 928 for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) { 929 switch (dynp->d_tag) { 930 931 case DT_REL: 932 obj->rel = (const Elf_Rel *) (obj->relocbase + dynp->d_un.d_ptr); 933 break; 934 935 case DT_RELSZ: 936 obj->relsize = dynp->d_un.d_val; 937 break; 938 939 case DT_RELENT: 940 assert(dynp->d_un.d_val == sizeof(Elf_Rel)); 941 break; 942 943 case DT_JMPREL: 944 obj->pltrel = (const Elf_Rel *) 945 (obj->relocbase + dynp->d_un.d_ptr); 946 break; 947 948 case DT_PLTRELSZ: 949 obj->pltrelsize = dynp->d_un.d_val; 950 break; 951 952 case DT_RELA: 953 obj->rela = (const Elf_Rela *) (obj->relocbase + dynp->d_un.d_ptr); 954 break; 955 956 case DT_RELASZ: 957 obj->relasize = dynp->d_un.d_val; 958 break; 959 960 case DT_RELAENT: 961 assert(dynp->d_un.d_val == sizeof(Elf_Rela)); 962 break; 963 964 case DT_PLTREL: 965 plttype = dynp->d_un.d_val; 966 assert(dynp->d_un.d_val == DT_REL || plttype == DT_RELA); 967 break; 968 969 case DT_SYMTAB: 970 obj->symtab = (const Elf_Sym *) 971 (obj->relocbase + dynp->d_un.d_ptr); 972 break; 973 974 case DT_SYMENT: 975 assert(dynp->d_un.d_val == sizeof(Elf_Sym)); 976 break; 977 978 case DT_STRTAB: 979 obj->strtab = (const char *) (obj->relocbase + dynp->d_un.d_ptr); 980 break; 981 982 case DT_STRSZ: 983 obj->strsize = dynp->d_un.d_val; 984 break; 985 986 case DT_VERNEED: 987 obj->verneed = (const Elf_Verneed *) (obj->relocbase + 988 dynp->d_un.d_val); 989 break; 990 991 case DT_VERNEEDNUM: 992 obj->verneednum = dynp->d_un.d_val; 993 break; 994 995 case DT_VERDEF: 996 obj->verdef = (const Elf_Verdef *) (obj->relocbase + 997 dynp->d_un.d_val); 998 break; 999 1000 case DT_VERDEFNUM: 1001 obj->verdefnum = dynp->d_un.d_val; 1002 break; 1003 1004 case DT_VERSYM: 1005 obj->versyms = (const Elf_Versym *)(obj->relocbase + 1006 dynp->d_un.d_val); 1007 break; 1008 1009 case DT_HASH: 1010 { 1011 hashtab = (const Elf_Hashelt *)(obj->relocbase + 1012 dynp->d_un.d_ptr); 1013 obj->nbuckets = hashtab[0]; 1014 obj->nchains = hashtab[1]; 1015 obj->buckets = hashtab + 2; 1016 obj->chains = obj->buckets + obj->nbuckets; 1017 obj->valid_hash_sysv = obj->nbuckets > 0 && obj->nchains > 0 && 1018 obj->buckets != NULL; 1019 } 1020 break; 1021 1022 case DT_GNU_HASH: 1023 { 1024 hashtab = (const Elf_Hashelt *)(obj->relocbase + 1025 dynp->d_un.d_ptr); 1026 obj->nbuckets_gnu = hashtab[0]; 1027 obj->symndx_gnu = hashtab[1]; 1028 nmaskwords = hashtab[2]; 1029 bloom_size32 = (__ELF_WORD_SIZE / 32) * nmaskwords; 1030 obj->maskwords_bm_gnu = nmaskwords - 1; 1031 obj->shift2_gnu = hashtab[3]; 1032 obj->bloom_gnu = (Elf_Addr *) (hashtab + 4); 1033 obj->buckets_gnu = hashtab + 4 + bloom_size32; 1034 obj->chain_zero_gnu = obj->buckets_gnu + obj->nbuckets_gnu - 1035 obj->symndx_gnu; 1036 /* Number of bitmask words is required to be power of 2 */ 1037 obj->valid_hash_gnu = powerof2(nmaskwords) && 1038 obj->nbuckets_gnu > 0 && obj->buckets_gnu != NULL; 1039 } 1040 break; 1041 1042 case DT_NEEDED: 1043 if (!obj->rtld) { 1044 Needed_Entry *nep = NEW(Needed_Entry); 1045 nep->name = dynp->d_un.d_val; 1046 nep->obj = NULL; 1047 nep->next = NULL; 1048 1049 *needed_tail = nep; 1050 needed_tail = &nep->next; 1051 } 1052 break; 1053 1054 case DT_FILTER: 1055 if (!obj->rtld) { 1056 Needed_Entry *nep = NEW(Needed_Entry); 1057 nep->name = dynp->d_un.d_val; 1058 nep->obj = NULL; 1059 nep->next = NULL; 1060 1061 *needed_filtees_tail = nep; 1062 needed_filtees_tail = &nep->next; 1063 } 1064 break; 1065 1066 case DT_AUXILIARY: 1067 if (!obj->rtld) { 1068 Needed_Entry *nep = NEW(Needed_Entry); 1069 nep->name = dynp->d_un.d_val; 1070 nep->obj = NULL; 1071 nep->next = NULL; 1072 1073 *needed_aux_filtees_tail = nep; 1074 needed_aux_filtees_tail = &nep->next; 1075 } 1076 break; 1077 1078 case DT_PLTGOT: 1079 obj->pltgot = (Elf_Addr *) (obj->relocbase + dynp->d_un.d_ptr); 1080 break; 1081 1082 case DT_TEXTREL: 1083 obj->textrel = true; 1084 break; 1085 1086 case DT_SYMBOLIC: 1087 obj->symbolic = true; 1088 break; 1089 1090 case DT_RPATH: 1091 /* 1092 * We have to wait until later to process this, because we 1093 * might not have gotten the address of the string table yet. 1094 */ 1095 *dyn_rpath = dynp; 1096 break; 1097 1098 case DT_SONAME: 1099 *dyn_soname = dynp; 1100 break; 1101 1102 case DT_RUNPATH: 1103 *dyn_runpath = dynp; 1104 break; 1105 1106 case DT_INIT: 1107 obj->init = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr); 1108 break; 1109 1110 case DT_PREINIT_ARRAY: 1111 obj->preinit_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr); 1112 break; 1113 1114 case DT_PREINIT_ARRAYSZ: 1115 obj->preinit_array_num = dynp->d_un.d_val / sizeof(Elf_Addr); 1116 break; 1117 1118 case DT_INIT_ARRAY: 1119 obj->init_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr); 1120 break; 1121 1122 case DT_INIT_ARRAYSZ: 1123 obj->init_array_num = dynp->d_un.d_val / sizeof(Elf_Addr); 1124 break; 1125 1126 case DT_FINI: 1127 obj->fini = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr); 1128 break; 1129 1130 case DT_FINI_ARRAY: 1131 obj->fini_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr); 1132 break; 1133 1134 case DT_FINI_ARRAYSZ: 1135 obj->fini_array_num = dynp->d_un.d_val / sizeof(Elf_Addr); 1136 break; 1137 1138 /* 1139 * Don't process DT_DEBUG on MIPS as the dynamic section 1140 * is mapped read-only. DT_MIPS_RLD_MAP is used instead. 1141 */ 1142 1143 #ifndef __mips__ 1144 case DT_DEBUG: 1145 if (!early) 1146 dbg("Filling in DT_DEBUG entry"); 1147 ((Elf_Dyn*)dynp)->d_un.d_ptr = (Elf_Addr) &r_debug; 1148 break; 1149 #endif 1150 1151 case DT_FLAGS: 1152 if (dynp->d_un.d_val & DF_ORIGIN) 1153 obj->z_origin = true; 1154 if (dynp->d_un.d_val & DF_SYMBOLIC) 1155 obj->symbolic = true; 1156 if (dynp->d_un.d_val & DF_TEXTREL) 1157 obj->textrel = true; 1158 if (dynp->d_un.d_val & DF_BIND_NOW) 1159 obj->bind_now = true; 1160 /*if (dynp->d_un.d_val & DF_STATIC_TLS) 1161 ;*/ 1162 break; 1163 #ifdef __mips__ 1164 case DT_MIPS_LOCAL_GOTNO: 1165 obj->local_gotno = dynp->d_un.d_val; 1166 break; 1167 1168 case DT_MIPS_SYMTABNO: 1169 obj->symtabno = dynp->d_un.d_val; 1170 break; 1171 1172 case DT_MIPS_GOTSYM: 1173 obj->gotsym = dynp->d_un.d_val; 1174 break; 1175 1176 case DT_MIPS_RLD_MAP: 1177 *((Elf_Addr *)(dynp->d_un.d_ptr)) = (Elf_Addr) &r_debug; 1178 break; 1179 #endif 1180 1181 #ifdef __powerpc64__ 1182 case DT_PPC64_GLINK: 1183 obj->glink = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr); 1184 break; 1185 #endif 1186 1187 case DT_FLAGS_1: 1188 if (dynp->d_un.d_val & DF_1_NOOPEN) 1189 obj->z_noopen = true; 1190 if (dynp->d_un.d_val & DF_1_ORIGIN) 1191 obj->z_origin = true; 1192 if (dynp->d_un.d_val & DF_1_GLOBAL) 1193 obj->z_global = true; 1194 if (dynp->d_un.d_val & DF_1_BIND_NOW) 1195 obj->bind_now = true; 1196 if (dynp->d_un.d_val & DF_1_NODELETE) 1197 obj->z_nodelete = true; 1198 if (dynp->d_un.d_val & DF_1_LOADFLTR) 1199 obj->z_loadfltr = true; 1200 if (dynp->d_un.d_val & DF_1_INTERPOSE) 1201 obj->z_interpose = true; 1202 if (dynp->d_un.d_val & DF_1_NODEFLIB) 1203 obj->z_nodeflib = true; 1204 break; 1205 1206 default: 1207 if (!early) { 1208 dbg("Ignoring d_tag %ld = %#lx", (long)dynp->d_tag, 1209 (long)dynp->d_tag); 1210 } 1211 break; 1212 } 1213 } 1214 1215 obj->traced = false; 1216 1217 if (plttype == DT_RELA) { 1218 obj->pltrela = (const Elf_Rela *) obj->pltrel; 1219 obj->pltrel = NULL; 1220 obj->pltrelasize = obj->pltrelsize; 1221 obj->pltrelsize = 0; 1222 } 1223 1224 /* Determine size of dynsym table (equal to nchains of sysv hash) */ 1225 if (obj->valid_hash_sysv) 1226 obj->dynsymcount = obj->nchains; 1227 else if (obj->valid_hash_gnu) { 1228 obj->dynsymcount = 0; 1229 for (bkt = 0; bkt < obj->nbuckets_gnu; bkt++) { 1230 if (obj->buckets_gnu[bkt] == 0) 1231 continue; 1232 hashval = &obj->chain_zero_gnu[obj->buckets_gnu[bkt]]; 1233 do 1234 obj->dynsymcount++; 1235 while ((*hashval++ & 1u) == 0); 1236 } 1237 obj->dynsymcount += obj->symndx_gnu; 1238 } 1239 } 1240 1241 static bool 1242 obj_resolve_origin(Obj_Entry *obj) 1243 { 1244 1245 if (obj->origin_path != NULL) 1246 return (true); 1247 obj->origin_path = xmalloc(PATH_MAX); 1248 return (rtld_dirname_abs(obj->path, obj->origin_path) != -1); 1249 } 1250 1251 static void 1252 digest_dynamic2(Obj_Entry *obj, const Elf_Dyn *dyn_rpath, 1253 const Elf_Dyn *dyn_soname, const Elf_Dyn *dyn_runpath) 1254 { 1255 1256 if (obj->z_origin && !obj_resolve_origin(obj)) 1257 rtld_die(); 1258 1259 if (dyn_runpath != NULL) { 1260 obj->runpath = (char *)obj->strtab + dyn_runpath->d_un.d_val; 1261 obj->runpath = origin_subst(obj, obj->runpath); 1262 } else if (dyn_rpath != NULL) { 1263 obj->rpath = (char *)obj->strtab + dyn_rpath->d_un.d_val; 1264 obj->rpath = origin_subst(obj, obj->rpath); 1265 } 1266 if (dyn_soname != NULL) 1267 object_add_name(obj, obj->strtab + dyn_soname->d_un.d_val); 1268 } 1269 1270 static void 1271 digest_dynamic(Obj_Entry *obj, int early) 1272 { 1273 const Elf_Dyn *dyn_rpath; 1274 const Elf_Dyn *dyn_soname; 1275 const Elf_Dyn *dyn_runpath; 1276 1277 digest_dynamic1(obj, early, &dyn_rpath, &dyn_soname, &dyn_runpath); 1278 digest_dynamic2(obj, dyn_rpath, dyn_soname, dyn_runpath); 1279 } 1280 1281 /* 1282 * Process a shared object's program header. This is used only for the 1283 * main program, when the kernel has already loaded the main program 1284 * into memory before calling the dynamic linker. It creates and 1285 * returns an Obj_Entry structure. 1286 */ 1287 static Obj_Entry * 1288 digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path) 1289 { 1290 Obj_Entry *obj; 1291 const Elf_Phdr *phlimit = phdr + phnum; 1292 const Elf_Phdr *ph; 1293 Elf_Addr note_start, note_end; 1294 int nsegs = 0; 1295 1296 obj = obj_new(); 1297 for (ph = phdr; ph < phlimit; ph++) { 1298 if (ph->p_type != PT_PHDR) 1299 continue; 1300 1301 obj->phdr = phdr; 1302 obj->phsize = ph->p_memsz; 1303 obj->relocbase = (caddr_t)phdr - ph->p_vaddr; 1304 break; 1305 } 1306 1307 obj->stack_flags = PF_X | PF_R | PF_W; 1308 1309 for (ph = phdr; ph < phlimit; ph++) { 1310 switch (ph->p_type) { 1311 1312 case PT_INTERP: 1313 obj->interp = (const char *)(ph->p_vaddr + obj->relocbase); 1314 break; 1315 1316 case PT_LOAD: 1317 if (nsegs == 0) { /* First load segment */ 1318 obj->vaddrbase = trunc_page(ph->p_vaddr); 1319 obj->mapbase = obj->vaddrbase + obj->relocbase; 1320 obj->textsize = round_page(ph->p_vaddr + ph->p_memsz) - 1321 obj->vaddrbase; 1322 } else { /* Last load segment */ 1323 obj->mapsize = round_page(ph->p_vaddr + ph->p_memsz) - 1324 obj->vaddrbase; 1325 } 1326 nsegs++; 1327 break; 1328 1329 case PT_DYNAMIC: 1330 obj->dynamic = (const Elf_Dyn *)(ph->p_vaddr + obj->relocbase); 1331 break; 1332 1333 case PT_TLS: 1334 obj->tlsindex = 1; 1335 obj->tlssize = ph->p_memsz; 1336 obj->tlsalign = ph->p_align; 1337 obj->tlsinitsize = ph->p_filesz; 1338 obj->tlsinit = (void*)(ph->p_vaddr + obj->relocbase); 1339 break; 1340 1341 case PT_GNU_STACK: 1342 obj->stack_flags = ph->p_flags; 1343 break; 1344 1345 case PT_GNU_RELRO: 1346 obj->relro_page = obj->relocbase + trunc_page(ph->p_vaddr); 1347 obj->relro_size = round_page(ph->p_memsz); 1348 break; 1349 1350 case PT_NOTE: 1351 note_start = (Elf_Addr)obj->relocbase + ph->p_vaddr; 1352 note_end = note_start + ph->p_filesz; 1353 digest_notes(obj, note_start, note_end); 1354 break; 1355 } 1356 } 1357 if (nsegs < 1) { 1358 _rtld_error("%s: too few PT_LOAD segments", path); 1359 return NULL; 1360 } 1361 1362 obj->entry = entry; 1363 return obj; 1364 } 1365 1366 void 1367 digest_notes(Obj_Entry *obj, Elf_Addr note_start, Elf_Addr note_end) 1368 { 1369 const Elf_Note *note; 1370 const char *note_name; 1371 uintptr_t p; 1372 1373 for (note = (const Elf_Note *)note_start; (Elf_Addr)note < note_end; 1374 note = (const Elf_Note *)((const char *)(note + 1) + 1375 roundup2(note->n_namesz, sizeof(Elf32_Addr)) + 1376 roundup2(note->n_descsz, sizeof(Elf32_Addr)))) { 1377 if (note->n_namesz != sizeof(NOTE_FREEBSD_VENDOR) || 1378 note->n_descsz != sizeof(int32_t)) 1379 continue; 1380 if (note->n_type != NT_FREEBSD_ABI_TAG && 1381 note->n_type != NT_FREEBSD_NOINIT_TAG) 1382 continue; 1383 note_name = (const char *)(note + 1); 1384 if (strncmp(NOTE_FREEBSD_VENDOR, note_name, 1385 sizeof(NOTE_FREEBSD_VENDOR)) != 0) 1386 continue; 1387 switch (note->n_type) { 1388 case NT_FREEBSD_ABI_TAG: 1389 /* FreeBSD osrel note */ 1390 p = (uintptr_t)(note + 1); 1391 p += roundup2(note->n_namesz, sizeof(Elf32_Addr)); 1392 obj->osrel = *(const int32_t *)(p); 1393 dbg("note osrel %d", obj->osrel); 1394 break; 1395 case NT_FREEBSD_NOINIT_TAG: 1396 /* FreeBSD 'crt does not call init' note */ 1397 obj->crt_no_init = true; 1398 dbg("note crt_no_init"); 1399 break; 1400 } 1401 } 1402 } 1403 1404 static Obj_Entry * 1405 dlcheck(void *handle) 1406 { 1407 Obj_Entry *obj; 1408 1409 TAILQ_FOREACH(obj, &obj_list, next) { 1410 if (obj == (Obj_Entry *) handle) 1411 break; 1412 } 1413 1414 if (obj == NULL || obj->refcount == 0 || obj->dl_refcount == 0) { 1415 _rtld_error("Invalid shared object handle %p", handle); 1416 return NULL; 1417 } 1418 return obj; 1419 } 1420 1421 /* 1422 * If the given object is already in the donelist, return true. Otherwise 1423 * add the object to the list and return false. 1424 */ 1425 static bool 1426 donelist_check(DoneList *dlp, const Obj_Entry *obj) 1427 { 1428 unsigned int i; 1429 1430 for (i = 0; i < dlp->num_used; i++) 1431 if (dlp->objs[i] == obj) 1432 return true; 1433 /* 1434 * Our donelist allocation should always be sufficient. But if 1435 * our threads locking isn't working properly, more shared objects 1436 * could have been loaded since we allocated the list. That should 1437 * never happen, but we'll handle it properly just in case it does. 1438 */ 1439 if (dlp->num_used < dlp->num_alloc) 1440 dlp->objs[dlp->num_used++] = obj; 1441 return false; 1442 } 1443 1444 /* 1445 * Hash function for symbol table lookup. Don't even think about changing 1446 * this. It is specified by the System V ABI. 1447 */ 1448 unsigned long 1449 elf_hash(const char *name) 1450 { 1451 const unsigned char *p = (const unsigned char *) name; 1452 unsigned long h = 0; 1453 unsigned long g; 1454 1455 while (*p != '\0') { 1456 h = (h << 4) + *p++; 1457 if ((g = h & 0xf0000000) != 0) 1458 h ^= g >> 24; 1459 h &= ~g; 1460 } 1461 return h; 1462 } 1463 1464 /* 1465 * The GNU hash function is the Daniel J. Bernstein hash clipped to 32 bits 1466 * unsigned in case it's implemented with a wider type. 1467 */ 1468 static uint32_t 1469 gnu_hash(const char *s) 1470 { 1471 uint32_t h; 1472 unsigned char c; 1473 1474 h = 5381; 1475 for (c = *s; c != '\0'; c = *++s) 1476 h = h * 33 + c; 1477 return (h & 0xffffffff); 1478 } 1479 1480 1481 /* 1482 * Find the library with the given name, and return its full pathname. 1483 * The returned string is dynamically allocated. Generates an error 1484 * message and returns NULL if the library cannot be found. 1485 * 1486 * If the second argument is non-NULL, then it refers to an already- 1487 * loaded shared object, whose library search path will be searched. 1488 * 1489 * If a library is successfully located via LD_LIBRARY_PATH_FDS, its 1490 * descriptor (which is close-on-exec) will be passed out via the third 1491 * argument. 1492 * 1493 * The search order is: 1494 * DT_RPATH in the referencing file _unless_ DT_RUNPATH is present (1) 1495 * DT_RPATH of the main object if DSO without defined DT_RUNPATH (1) 1496 * LD_LIBRARY_PATH 1497 * DT_RUNPATH in the referencing file 1498 * ldconfig hints (if -z nodefaultlib, filter out default library directories 1499 * from list) 1500 * /lib:/usr/lib _unless_ the referencing file is linked with -z nodefaultlib 1501 * 1502 * (1) Handled in digest_dynamic2 - rpath left NULL if runpath defined. 1503 */ 1504 static char * 1505 find_library(const char *xname, const Obj_Entry *refobj, int *fdp) 1506 { 1507 char *pathname; 1508 char *name; 1509 bool nodeflib, objgiven; 1510 1511 objgiven = refobj != NULL; 1512 if (strchr(xname, '/') != NULL) { /* Hard coded pathname */ 1513 if (xname[0] != '/' && !trust) { 1514 _rtld_error("Absolute pathname required for shared object \"%s\"", 1515 xname); 1516 return NULL; 1517 } 1518 return (origin_subst(__DECONST(Obj_Entry *, refobj), 1519 __DECONST(char *, xname))); 1520 } 1521 1522 if (libmap_disable || !objgiven || 1523 (name = lm_find(refobj->path, xname)) == NULL) 1524 name = (char *)xname; 1525 1526 dbg(" Searching for \"%s\"", name); 1527 1528 /* 1529 * If refobj->rpath != NULL, then refobj->runpath is NULL. Fall 1530 * back to pre-conforming behaviour if user requested so with 1531 * LD_LIBRARY_PATH_RPATH environment variable and ignore -z 1532 * nodeflib. 1533 */ 1534 if (objgiven && refobj->rpath != NULL && ld_library_path_rpath) { 1535 if ((pathname = search_library_path(name, ld_library_path)) != NULL || 1536 (refobj != NULL && 1537 (pathname = search_library_path(name, refobj->rpath)) != NULL) || 1538 (pathname = search_library_pathfds(name, ld_library_dirs, fdp)) != NULL || 1539 (pathname = search_library_path(name, gethints(false))) != NULL || 1540 (pathname = search_library_path(name, ld_standard_library_path)) != NULL) 1541 return (pathname); 1542 } else { 1543 nodeflib = objgiven ? refobj->z_nodeflib : false; 1544 if ((objgiven && 1545 (pathname = search_library_path(name, refobj->rpath)) != NULL) || 1546 (objgiven && refobj->runpath == NULL && refobj != obj_main && 1547 (pathname = search_library_path(name, obj_main->rpath)) != NULL) || 1548 (pathname = search_library_path(name, ld_library_path)) != NULL || 1549 (objgiven && 1550 (pathname = search_library_path(name, refobj->runpath)) != NULL) || 1551 (pathname = search_library_pathfds(name, ld_library_dirs, fdp)) != NULL || 1552 (pathname = search_library_path(name, gethints(nodeflib))) != NULL || 1553 (objgiven && !nodeflib && 1554 (pathname = search_library_path(name, ld_standard_library_path)) != NULL)) 1555 return (pathname); 1556 } 1557 1558 if (objgiven && refobj->path != NULL) { 1559 _rtld_error("Shared object \"%s\" not found, required by \"%s\"", 1560 name, basename(refobj->path)); 1561 } else { 1562 _rtld_error("Shared object \"%s\" not found", name); 1563 } 1564 return NULL; 1565 } 1566 1567 /* 1568 * Given a symbol number in a referencing object, find the corresponding 1569 * definition of the symbol. Returns a pointer to the symbol, or NULL if 1570 * no definition was found. Returns a pointer to the Obj_Entry of the 1571 * defining object via the reference parameter DEFOBJ_OUT. 1572 */ 1573 const Elf_Sym * 1574 find_symdef(unsigned long symnum, const Obj_Entry *refobj, 1575 const Obj_Entry **defobj_out, int flags, SymCache *cache, 1576 RtldLockState *lockstate) 1577 { 1578 const Elf_Sym *ref; 1579 const Elf_Sym *def; 1580 const Obj_Entry *defobj; 1581 SymLook req; 1582 const char *name; 1583 int res; 1584 1585 /* 1586 * If we have already found this symbol, get the information from 1587 * the cache. 1588 */ 1589 if (symnum >= refobj->dynsymcount) 1590 return NULL; /* Bad object */ 1591 if (cache != NULL && cache[symnum].sym != NULL) { 1592 *defobj_out = cache[symnum].obj; 1593 return cache[symnum].sym; 1594 } 1595 1596 ref = refobj->symtab + symnum; 1597 name = refobj->strtab + ref->st_name; 1598 def = NULL; 1599 defobj = NULL; 1600 1601 /* 1602 * We don't have to do a full scale lookup if the symbol is local. 1603 * We know it will bind to the instance in this load module; to 1604 * which we already have a pointer (ie ref). By not doing a lookup, 1605 * we not only improve performance, but it also avoids unresolvable 1606 * symbols when local symbols are not in the hash table. This has 1607 * been seen with the ia64 toolchain. 1608 */ 1609 if (ELF_ST_BIND(ref->st_info) != STB_LOCAL) { 1610 if (ELF_ST_TYPE(ref->st_info) == STT_SECTION) { 1611 _rtld_error("%s: Bogus symbol table entry %lu", refobj->path, 1612 symnum); 1613 } 1614 symlook_init(&req, name); 1615 req.flags = flags; 1616 req.ventry = fetch_ventry(refobj, symnum); 1617 req.lockstate = lockstate; 1618 res = symlook_default(&req, refobj); 1619 if (res == 0) { 1620 def = req.sym_out; 1621 defobj = req.defobj_out; 1622 } 1623 } else { 1624 def = ref; 1625 defobj = refobj; 1626 } 1627 1628 /* 1629 * If we found no definition and the reference is weak, treat the 1630 * symbol as having the value zero. 1631 */ 1632 if (def == NULL && ELF_ST_BIND(ref->st_info) == STB_WEAK) { 1633 def = &sym_zero; 1634 defobj = obj_main; 1635 } 1636 1637 if (def != NULL) { 1638 *defobj_out = defobj; 1639 /* Record the information in the cache to avoid subsequent lookups. */ 1640 if (cache != NULL) { 1641 cache[symnum].sym = def; 1642 cache[symnum].obj = defobj; 1643 } 1644 } else { 1645 if (refobj != &obj_rtld) 1646 _rtld_error("%s: Undefined symbol \"%s\"", refobj->path, name); 1647 } 1648 return def; 1649 } 1650 1651 /* 1652 * Return the search path from the ldconfig hints file, reading it if 1653 * necessary. If nostdlib is true, then the default search paths are 1654 * not added to result. 1655 * 1656 * Returns NULL if there are problems with the hints file, 1657 * or if the search path there is empty. 1658 */ 1659 static const char * 1660 gethints(bool nostdlib) 1661 { 1662 static char *hints, *filtered_path; 1663 static struct elfhints_hdr hdr; 1664 struct fill_search_info_args sargs, hargs; 1665 struct dl_serinfo smeta, hmeta, *SLPinfo, *hintinfo; 1666 struct dl_serpath *SLPpath, *hintpath; 1667 char *p; 1668 struct stat hint_stat; 1669 unsigned int SLPndx, hintndx, fndx, fcount; 1670 int fd; 1671 size_t flen; 1672 uint32_t dl; 1673 bool skip; 1674 1675 /* First call, read the hints file */ 1676 if (hints == NULL) { 1677 /* Keep from trying again in case the hints file is bad. */ 1678 hints = ""; 1679 1680 if ((fd = open(ld_elf_hints_path, O_RDONLY | O_CLOEXEC)) == -1) 1681 return (NULL); 1682 1683 /* 1684 * Check of hdr.dirlistlen value against type limit 1685 * intends to pacify static analyzers. Further 1686 * paranoia leads to checks that dirlist is fully 1687 * contained in the file range. 1688 */ 1689 if (read(fd, &hdr, sizeof hdr) != sizeof hdr || 1690 hdr.magic != ELFHINTS_MAGIC || 1691 hdr.version != 1 || hdr.dirlistlen > UINT_MAX / 2 || 1692 fstat(fd, &hint_stat) == -1) { 1693 cleanup1: 1694 close(fd); 1695 hdr.dirlistlen = 0; 1696 return (NULL); 1697 } 1698 dl = hdr.strtab; 1699 if (dl + hdr.dirlist < dl) 1700 goto cleanup1; 1701 dl += hdr.dirlist; 1702 if (dl + hdr.dirlistlen < dl) 1703 goto cleanup1; 1704 dl += hdr.dirlistlen; 1705 if (dl > hint_stat.st_size) 1706 goto cleanup1; 1707 p = xmalloc(hdr.dirlistlen + 1); 1708 1709 if (lseek(fd, hdr.strtab + hdr.dirlist, SEEK_SET) == -1 || 1710 read(fd, p, hdr.dirlistlen + 1) != 1711 (ssize_t)hdr.dirlistlen + 1 || p[hdr.dirlistlen] != '\0') { 1712 free(p); 1713 goto cleanup1; 1714 } 1715 hints = p; 1716 close(fd); 1717 } 1718 1719 /* 1720 * If caller agreed to receive list which includes the default 1721 * paths, we are done. Otherwise, if we still did not 1722 * calculated filtered result, do it now. 1723 */ 1724 if (!nostdlib) 1725 return (hints[0] != '\0' ? hints : NULL); 1726 if (filtered_path != NULL) 1727 goto filt_ret; 1728 1729 /* 1730 * Obtain the list of all configured search paths, and the 1731 * list of the default paths. 1732 * 1733 * First estimate the size of the results. 1734 */ 1735 smeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath); 1736 smeta.dls_cnt = 0; 1737 hmeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath); 1738 hmeta.dls_cnt = 0; 1739 1740 sargs.request = RTLD_DI_SERINFOSIZE; 1741 sargs.serinfo = &smeta; 1742 hargs.request = RTLD_DI_SERINFOSIZE; 1743 hargs.serinfo = &hmeta; 1744 1745 path_enumerate(ld_standard_library_path, fill_search_info, &sargs); 1746 path_enumerate(hints, fill_search_info, &hargs); 1747 1748 SLPinfo = xmalloc(smeta.dls_size); 1749 hintinfo = xmalloc(hmeta.dls_size); 1750 1751 /* 1752 * Next fetch both sets of paths. 1753 */ 1754 sargs.request = RTLD_DI_SERINFO; 1755 sargs.serinfo = SLPinfo; 1756 sargs.serpath = &SLPinfo->dls_serpath[0]; 1757 sargs.strspace = (char *)&SLPinfo->dls_serpath[smeta.dls_cnt]; 1758 1759 hargs.request = RTLD_DI_SERINFO; 1760 hargs.serinfo = hintinfo; 1761 hargs.serpath = &hintinfo->dls_serpath[0]; 1762 hargs.strspace = (char *)&hintinfo->dls_serpath[hmeta.dls_cnt]; 1763 1764 path_enumerate(ld_standard_library_path, fill_search_info, &sargs); 1765 path_enumerate(hints, fill_search_info, &hargs); 1766 1767 /* 1768 * Now calculate the difference between two sets, by excluding 1769 * standard paths from the full set. 1770 */ 1771 fndx = 0; 1772 fcount = 0; 1773 filtered_path = xmalloc(hdr.dirlistlen + 1); 1774 hintpath = &hintinfo->dls_serpath[0]; 1775 for (hintndx = 0; hintndx < hmeta.dls_cnt; hintndx++, hintpath++) { 1776 skip = false; 1777 SLPpath = &SLPinfo->dls_serpath[0]; 1778 /* 1779 * Check each standard path against current. 1780 */ 1781 for (SLPndx = 0; SLPndx < smeta.dls_cnt; SLPndx++, SLPpath++) { 1782 /* matched, skip the path */ 1783 if (!strcmp(hintpath->dls_name, SLPpath->dls_name)) { 1784 skip = true; 1785 break; 1786 } 1787 } 1788 if (skip) 1789 continue; 1790 /* 1791 * Not matched against any standard path, add the path 1792 * to result. Separate consequtive paths with ':'. 1793 */ 1794 if (fcount > 0) { 1795 filtered_path[fndx] = ':'; 1796 fndx++; 1797 } 1798 fcount++; 1799 flen = strlen(hintpath->dls_name); 1800 strncpy((filtered_path + fndx), hintpath->dls_name, flen); 1801 fndx += flen; 1802 } 1803 filtered_path[fndx] = '\0'; 1804 1805 free(SLPinfo); 1806 free(hintinfo); 1807 1808 filt_ret: 1809 return (filtered_path[0] != '\0' ? filtered_path : NULL); 1810 } 1811 1812 static void 1813 init_dag(Obj_Entry *root) 1814 { 1815 const Needed_Entry *needed; 1816 const Objlist_Entry *elm; 1817 DoneList donelist; 1818 1819 if (root->dag_inited) 1820 return; 1821 donelist_init(&donelist); 1822 1823 /* Root object belongs to own DAG. */ 1824 objlist_push_tail(&root->dldags, root); 1825 objlist_push_tail(&root->dagmembers, root); 1826 donelist_check(&donelist, root); 1827 1828 /* 1829 * Add dependencies of root object to DAG in breadth order 1830 * by exploiting the fact that each new object get added 1831 * to the tail of the dagmembers list. 1832 */ 1833 STAILQ_FOREACH(elm, &root->dagmembers, link) { 1834 for (needed = elm->obj->needed; needed != NULL; needed = needed->next) { 1835 if (needed->obj == NULL || donelist_check(&donelist, needed->obj)) 1836 continue; 1837 objlist_push_tail(&needed->obj->dldags, root); 1838 objlist_push_tail(&root->dagmembers, needed->obj); 1839 } 1840 } 1841 root->dag_inited = true; 1842 } 1843 1844 static void 1845 init_marker(Obj_Entry *marker) 1846 { 1847 1848 bzero(marker, sizeof(*marker)); 1849 marker->marker = true; 1850 } 1851 1852 Obj_Entry * 1853 globallist_curr(const Obj_Entry *obj) 1854 { 1855 1856 for (;;) { 1857 if (obj == NULL) 1858 return (NULL); 1859 if (!obj->marker) 1860 return (__DECONST(Obj_Entry *, obj)); 1861 obj = TAILQ_PREV(obj, obj_entry_q, next); 1862 } 1863 } 1864 1865 Obj_Entry * 1866 globallist_next(const Obj_Entry *obj) 1867 { 1868 1869 for (;;) { 1870 obj = TAILQ_NEXT(obj, next); 1871 if (obj == NULL) 1872 return (NULL); 1873 if (!obj->marker) 1874 return (__DECONST(Obj_Entry *, obj)); 1875 } 1876 } 1877 1878 /* Prevent the object from being unmapped while the bind lock is dropped. */ 1879 static void 1880 hold_object(Obj_Entry *obj) 1881 { 1882 1883 obj->holdcount++; 1884 } 1885 1886 static void 1887 unhold_object(Obj_Entry *obj) 1888 { 1889 1890 assert(obj->holdcount > 0); 1891 if (--obj->holdcount == 0 && obj->unholdfree) 1892 release_object(obj); 1893 } 1894 1895 static void 1896 process_z(Obj_Entry *root) 1897 { 1898 const Objlist_Entry *elm; 1899 Obj_Entry *obj; 1900 1901 /* 1902 * Walk over object DAG and process every dependent object 1903 * that is marked as DF_1_NODELETE or DF_1_GLOBAL. They need 1904 * to grow their own DAG. 1905 * 1906 * For DF_1_GLOBAL, DAG is required for symbol lookups in 1907 * symlook_global() to work. 1908 * 1909 * For DF_1_NODELETE, the DAG should have its reference upped. 1910 */ 1911 STAILQ_FOREACH(elm, &root->dagmembers, link) { 1912 obj = elm->obj; 1913 if (obj == NULL) 1914 continue; 1915 if (obj->z_nodelete && !obj->ref_nodel) { 1916 dbg("obj %s -z nodelete", obj->path); 1917 init_dag(obj); 1918 ref_dag(obj); 1919 obj->ref_nodel = true; 1920 } 1921 if (obj->z_global && objlist_find(&list_global, obj) == NULL) { 1922 dbg("obj %s -z global", obj->path); 1923 objlist_push_tail(&list_global, obj); 1924 init_dag(obj); 1925 } 1926 } 1927 } 1928 /* 1929 * Initialize the dynamic linker. The argument is the address at which 1930 * the dynamic linker has been mapped into memory. The primary task of 1931 * this function is to relocate the dynamic linker. 1932 */ 1933 static void 1934 init_rtld(caddr_t mapbase, Elf_Auxinfo **aux_info) 1935 { 1936 Obj_Entry objtmp; /* Temporary rtld object */ 1937 const Elf_Ehdr *ehdr; 1938 const Elf_Dyn *dyn_rpath; 1939 const Elf_Dyn *dyn_soname; 1940 const Elf_Dyn *dyn_runpath; 1941 1942 #ifdef RTLD_INIT_PAGESIZES_EARLY 1943 /* The page size is required by the dynamic memory allocator. */ 1944 init_pagesizes(aux_info); 1945 #endif 1946 1947 /* 1948 * Conjure up an Obj_Entry structure for the dynamic linker. 1949 * 1950 * The "path" member can't be initialized yet because string constants 1951 * cannot yet be accessed. Below we will set it correctly. 1952 */ 1953 memset(&objtmp, 0, sizeof(objtmp)); 1954 objtmp.path = NULL; 1955 objtmp.rtld = true; 1956 objtmp.mapbase = mapbase; 1957 #ifdef PIC 1958 objtmp.relocbase = mapbase; 1959 #endif 1960 1961 objtmp.dynamic = rtld_dynamic(&objtmp); 1962 digest_dynamic1(&objtmp, 1, &dyn_rpath, &dyn_soname, &dyn_runpath); 1963 assert(objtmp.needed == NULL); 1964 #if !defined(__mips__) 1965 /* MIPS has a bogus DT_TEXTREL. */ 1966 assert(!objtmp.textrel); 1967 #endif 1968 /* 1969 * Temporarily put the dynamic linker entry into the object list, so 1970 * that symbols can be found. 1971 */ 1972 relocate_objects(&objtmp, true, &objtmp, 0, NULL); 1973 1974 ehdr = (Elf_Ehdr *)mapbase; 1975 objtmp.phdr = (Elf_Phdr *)((char *)mapbase + ehdr->e_phoff); 1976 objtmp.phsize = ehdr->e_phnum * sizeof(objtmp.phdr[0]); 1977 1978 /* Initialize the object list. */ 1979 TAILQ_INIT(&obj_list); 1980 1981 /* Now that non-local variables can be accesses, copy out obj_rtld. */ 1982 memcpy(&obj_rtld, &objtmp, sizeof(obj_rtld)); 1983 1984 #ifndef RTLD_INIT_PAGESIZES_EARLY 1985 /* The page size is required by the dynamic memory allocator. */ 1986 init_pagesizes(aux_info); 1987 #endif 1988 1989 if (aux_info[AT_OSRELDATE] != NULL) 1990 osreldate = aux_info[AT_OSRELDATE]->a_un.a_val; 1991 1992 digest_dynamic2(&obj_rtld, dyn_rpath, dyn_soname, dyn_runpath); 1993 1994 /* Replace the path with a dynamically allocated copy. */ 1995 obj_rtld.path = xstrdup(ld_path_rtld); 1996 1997 r_debug.r_brk = r_debug_state; 1998 r_debug.r_state = RT_CONSISTENT; 1999 } 2000 2001 /* 2002 * Retrieve the array of supported page sizes. The kernel provides the page 2003 * sizes in increasing order. 2004 */ 2005 static void 2006 init_pagesizes(Elf_Auxinfo **aux_info) 2007 { 2008 static size_t psa[MAXPAGESIZES]; 2009 int mib[2]; 2010 size_t len, size; 2011 2012 if (aux_info[AT_PAGESIZES] != NULL && aux_info[AT_PAGESIZESLEN] != 2013 NULL) { 2014 size = aux_info[AT_PAGESIZESLEN]->a_un.a_val; 2015 pagesizes = aux_info[AT_PAGESIZES]->a_un.a_ptr; 2016 } else { 2017 len = 2; 2018 if (sysctlnametomib("hw.pagesizes", mib, &len) == 0) 2019 size = sizeof(psa); 2020 else { 2021 /* As a fallback, retrieve the base page size. */ 2022 size = sizeof(psa[0]); 2023 if (aux_info[AT_PAGESZ] != NULL) { 2024 psa[0] = aux_info[AT_PAGESZ]->a_un.a_val; 2025 goto psa_filled; 2026 } else { 2027 mib[0] = CTL_HW; 2028 mib[1] = HW_PAGESIZE; 2029 len = 2; 2030 } 2031 } 2032 if (sysctl(mib, len, psa, &size, NULL, 0) == -1) { 2033 _rtld_error("sysctl for hw.pagesize(s) failed"); 2034 rtld_die(); 2035 } 2036 psa_filled: 2037 pagesizes = psa; 2038 } 2039 npagesizes = size / sizeof(pagesizes[0]); 2040 /* Discard any invalid entries at the end of the array. */ 2041 while (npagesizes > 0 && pagesizes[npagesizes - 1] == 0) 2042 npagesizes--; 2043 } 2044 2045 /* 2046 * Add the init functions from a needed object list (and its recursive 2047 * needed objects) to "list". This is not used directly; it is a helper 2048 * function for initlist_add_objects(). The write lock must be held 2049 * when this function is called. 2050 */ 2051 static void 2052 initlist_add_neededs(Needed_Entry *needed, Objlist *list) 2053 { 2054 /* Recursively process the successor needed objects. */ 2055 if (needed->next != NULL) 2056 initlist_add_neededs(needed->next, list); 2057 2058 /* Process the current needed object. */ 2059 if (needed->obj != NULL) 2060 initlist_add_objects(needed->obj, needed->obj, list); 2061 } 2062 2063 /* 2064 * Scan all of the DAGs rooted in the range of objects from "obj" to 2065 * "tail" and add their init functions to "list". This recurses over 2066 * the DAGs and ensure the proper init ordering such that each object's 2067 * needed libraries are initialized before the object itself. At the 2068 * same time, this function adds the objects to the global finalization 2069 * list "list_fini" in the opposite order. The write lock must be 2070 * held when this function is called. 2071 */ 2072 static void 2073 initlist_add_objects(Obj_Entry *obj, Obj_Entry *tail, Objlist *list) 2074 { 2075 Obj_Entry *nobj; 2076 2077 if (obj->init_scanned || obj->init_done) 2078 return; 2079 obj->init_scanned = true; 2080 2081 /* Recursively process the successor objects. */ 2082 nobj = globallist_next(obj); 2083 if (nobj != NULL && obj != tail) 2084 initlist_add_objects(nobj, tail, list); 2085 2086 /* Recursively process the needed objects. */ 2087 if (obj->needed != NULL) 2088 initlist_add_neededs(obj->needed, list); 2089 if (obj->needed_filtees != NULL) 2090 initlist_add_neededs(obj->needed_filtees, list); 2091 if (obj->needed_aux_filtees != NULL) 2092 initlist_add_neededs(obj->needed_aux_filtees, list); 2093 2094 /* Add the object to the init list. */ 2095 if (obj->preinit_array != (Elf_Addr)NULL || obj->init != (Elf_Addr)NULL || 2096 obj->init_array != (Elf_Addr)NULL) 2097 objlist_push_tail(list, obj); 2098 2099 /* Add the object to the global fini list in the reverse order. */ 2100 if ((obj->fini != (Elf_Addr)NULL || obj->fini_array != (Elf_Addr)NULL) 2101 && !obj->on_fini_list) { 2102 objlist_push_head(&list_fini, obj); 2103 obj->on_fini_list = true; 2104 } 2105 } 2106 2107 #ifndef FPTR_TARGET 2108 #define FPTR_TARGET(f) ((Elf_Addr) (f)) 2109 #endif 2110 2111 static void 2112 free_needed_filtees(Needed_Entry *n, RtldLockState *lockstate) 2113 { 2114 Needed_Entry *needed, *needed1; 2115 2116 for (needed = n; needed != NULL; needed = needed->next) { 2117 if (needed->obj != NULL) { 2118 dlclose_locked(needed->obj, lockstate); 2119 needed->obj = NULL; 2120 } 2121 } 2122 for (needed = n; needed != NULL; needed = needed1) { 2123 needed1 = needed->next; 2124 free(needed); 2125 } 2126 } 2127 2128 static void 2129 unload_filtees(Obj_Entry *obj, RtldLockState *lockstate) 2130 { 2131 2132 free_needed_filtees(obj->needed_filtees, lockstate); 2133 obj->needed_filtees = NULL; 2134 free_needed_filtees(obj->needed_aux_filtees, lockstate); 2135 obj->needed_aux_filtees = NULL; 2136 obj->filtees_loaded = false; 2137 } 2138 2139 static void 2140 load_filtee1(Obj_Entry *obj, Needed_Entry *needed, int flags, 2141 RtldLockState *lockstate) 2142 { 2143 2144 for (; needed != NULL; needed = needed->next) { 2145 needed->obj = dlopen_object(obj->strtab + needed->name, -1, obj, 2146 flags, ((ld_loadfltr || obj->z_loadfltr) ? RTLD_NOW : RTLD_LAZY) | 2147 RTLD_LOCAL, lockstate); 2148 } 2149 } 2150 2151 static void 2152 load_filtees(Obj_Entry *obj, int flags, RtldLockState *lockstate) 2153 { 2154 2155 lock_restart_for_upgrade(lockstate); 2156 if (!obj->filtees_loaded) { 2157 load_filtee1(obj, obj->needed_filtees, flags, lockstate); 2158 load_filtee1(obj, obj->needed_aux_filtees, flags, lockstate); 2159 obj->filtees_loaded = true; 2160 } 2161 } 2162 2163 static int 2164 process_needed(Obj_Entry *obj, Needed_Entry *needed, int flags) 2165 { 2166 Obj_Entry *obj1; 2167 2168 for (; needed != NULL; needed = needed->next) { 2169 obj1 = needed->obj = load_object(obj->strtab + needed->name, -1, obj, 2170 flags & ~RTLD_LO_NOLOAD); 2171 if (obj1 == NULL && !ld_tracing && (flags & RTLD_LO_FILTEES) == 0) 2172 return (-1); 2173 } 2174 return (0); 2175 } 2176 2177 /* 2178 * Given a shared object, traverse its list of needed objects, and load 2179 * each of them. Returns 0 on success. Generates an error message and 2180 * returns -1 on failure. 2181 */ 2182 static int 2183 load_needed_objects(Obj_Entry *first, int flags) 2184 { 2185 Obj_Entry *obj; 2186 2187 for (obj = first; obj != NULL; obj = TAILQ_NEXT(obj, next)) { 2188 if (obj->marker) 2189 continue; 2190 if (process_needed(obj, obj->needed, flags) == -1) 2191 return (-1); 2192 } 2193 return (0); 2194 } 2195 2196 static int 2197 load_preload_objects(void) 2198 { 2199 char *p = ld_preload; 2200 Obj_Entry *obj; 2201 static const char delim[] = " \t:;"; 2202 2203 if (p == NULL) 2204 return 0; 2205 2206 p += strspn(p, delim); 2207 while (*p != '\0') { 2208 size_t len = strcspn(p, delim); 2209 char savech; 2210 2211 savech = p[len]; 2212 p[len] = '\0'; 2213 obj = load_object(p, -1, NULL, 0); 2214 if (obj == NULL) 2215 return -1; /* XXX - cleanup */ 2216 obj->z_interpose = true; 2217 p[len] = savech; 2218 p += len; 2219 p += strspn(p, delim); 2220 } 2221 LD_UTRACE(UTRACE_PRELOAD_FINISHED, NULL, NULL, 0, 0, NULL); 2222 return 0; 2223 } 2224 2225 static const char * 2226 printable_path(const char *path) 2227 { 2228 2229 return (path == NULL ? "<unknown>" : path); 2230 } 2231 2232 /* 2233 * Load a shared object into memory, if it is not already loaded. The 2234 * object may be specified by name or by user-supplied file descriptor 2235 * fd_u. In the later case, the fd_u descriptor is not closed, but its 2236 * duplicate is. 2237 * 2238 * Returns a pointer to the Obj_Entry for the object. Returns NULL 2239 * on failure. 2240 */ 2241 static Obj_Entry * 2242 load_object(const char *name, int fd_u, const Obj_Entry *refobj, int flags) 2243 { 2244 Obj_Entry *obj; 2245 int fd; 2246 struct stat sb; 2247 char *path; 2248 2249 fd = -1; 2250 if (name != NULL) { 2251 TAILQ_FOREACH(obj, &obj_list, next) { 2252 if (obj->marker || obj->doomed) 2253 continue; 2254 if (object_match_name(obj, name)) 2255 return (obj); 2256 } 2257 2258 path = find_library(name, refobj, &fd); 2259 if (path == NULL) 2260 return (NULL); 2261 } else 2262 path = NULL; 2263 2264 if (fd >= 0) { 2265 /* 2266 * search_library_pathfds() opens a fresh file descriptor for the 2267 * library, so there is no need to dup(). 2268 */ 2269 } else if (fd_u == -1) { 2270 /* 2271 * If we didn't find a match by pathname, or the name is not 2272 * supplied, open the file and check again by device and inode. 2273 * This avoids false mismatches caused by multiple links or ".." 2274 * in pathnames. 2275 * 2276 * To avoid a race, we open the file and use fstat() rather than 2277 * using stat(). 2278 */ 2279 if ((fd = open(path, O_RDONLY | O_CLOEXEC | O_VERIFY)) == -1) { 2280 _rtld_error("Cannot open \"%s\"", path); 2281 free(path); 2282 return (NULL); 2283 } 2284 } else { 2285 fd = fcntl(fd_u, F_DUPFD_CLOEXEC, 0); 2286 if (fd == -1) { 2287 _rtld_error("Cannot dup fd"); 2288 free(path); 2289 return (NULL); 2290 } 2291 } 2292 if (fstat(fd, &sb) == -1) { 2293 _rtld_error("Cannot fstat \"%s\"", printable_path(path)); 2294 close(fd); 2295 free(path); 2296 return NULL; 2297 } 2298 TAILQ_FOREACH(obj, &obj_list, next) { 2299 if (obj->marker || obj->doomed) 2300 continue; 2301 if (obj->ino == sb.st_ino && obj->dev == sb.st_dev) 2302 break; 2303 } 2304 if (obj != NULL && name != NULL) { 2305 object_add_name(obj, name); 2306 free(path); 2307 close(fd); 2308 return obj; 2309 } 2310 if (flags & RTLD_LO_NOLOAD) { 2311 free(path); 2312 close(fd); 2313 return (NULL); 2314 } 2315 2316 /* First use of this object, so we must map it in */ 2317 obj = do_load_object(fd, name, path, &sb, flags); 2318 if (obj == NULL) 2319 free(path); 2320 close(fd); 2321 2322 return obj; 2323 } 2324 2325 static Obj_Entry * 2326 do_load_object(int fd, const char *name, char *path, struct stat *sbp, 2327 int flags) 2328 { 2329 Obj_Entry *obj; 2330 struct statfs fs; 2331 2332 /* 2333 * but first, make sure that environment variables haven't been 2334 * used to circumvent the noexec flag on a filesystem. 2335 */ 2336 if (dangerous_ld_env) { 2337 if (fstatfs(fd, &fs) != 0) { 2338 _rtld_error("Cannot fstatfs \"%s\"", printable_path(path)); 2339 return NULL; 2340 } 2341 if (fs.f_flags & MNT_NOEXEC) { 2342 _rtld_error("Cannot execute objects on %s\n", fs.f_mntonname); 2343 return NULL; 2344 } 2345 } 2346 dbg("loading \"%s\"", printable_path(path)); 2347 obj = map_object(fd, printable_path(path), sbp); 2348 if (obj == NULL) 2349 return NULL; 2350 2351 /* 2352 * If DT_SONAME is present in the object, digest_dynamic2 already 2353 * added it to the object names. 2354 */ 2355 if (name != NULL) 2356 object_add_name(obj, name); 2357 obj->path = path; 2358 digest_dynamic(obj, 0); 2359 dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d", obj->path, 2360 obj->valid_hash_sysv, obj->valid_hash_gnu, obj->dynsymcount); 2361 if (obj->z_noopen && (flags & (RTLD_LO_DLOPEN | RTLD_LO_TRACE)) == 2362 RTLD_LO_DLOPEN) { 2363 dbg("refusing to load non-loadable \"%s\"", obj->path); 2364 _rtld_error("Cannot dlopen non-loadable %s", obj->path); 2365 munmap(obj->mapbase, obj->mapsize); 2366 obj_free(obj); 2367 return (NULL); 2368 } 2369 2370 obj->dlopened = (flags & RTLD_LO_DLOPEN) != 0; 2371 TAILQ_INSERT_TAIL(&obj_list, obj, next); 2372 obj_count++; 2373 obj_loads++; 2374 linkmap_add(obj); /* for GDB & dlinfo() */ 2375 max_stack_flags |= obj->stack_flags; 2376 2377 dbg(" %p .. %p: %s", obj->mapbase, 2378 obj->mapbase + obj->mapsize - 1, obj->path); 2379 if (obj->textrel) 2380 dbg(" WARNING: %s has impure text", obj->path); 2381 LD_UTRACE(UTRACE_LOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0, 2382 obj->path); 2383 2384 return obj; 2385 } 2386 2387 static Obj_Entry * 2388 obj_from_addr(const void *addr) 2389 { 2390 Obj_Entry *obj; 2391 2392 TAILQ_FOREACH(obj, &obj_list, next) { 2393 if (obj->marker) 2394 continue; 2395 if (addr < (void *) obj->mapbase) 2396 continue; 2397 if (addr < (void *) (obj->mapbase + obj->mapsize)) 2398 return obj; 2399 } 2400 return NULL; 2401 } 2402 2403 static void 2404 preinit_main(void) 2405 { 2406 Elf_Addr *preinit_addr; 2407 int index; 2408 2409 preinit_addr = (Elf_Addr *)obj_main->preinit_array; 2410 if (preinit_addr == NULL) 2411 return; 2412 2413 for (index = 0; index < obj_main->preinit_array_num; index++) { 2414 if (preinit_addr[index] != 0 && preinit_addr[index] != 1) { 2415 dbg("calling preinit function for %s at %p", obj_main->path, 2416 (void *)preinit_addr[index]); 2417 LD_UTRACE(UTRACE_INIT_CALL, obj_main, (void *)preinit_addr[index], 2418 0, 0, obj_main->path); 2419 call_init_pointer(obj_main, preinit_addr[index]); 2420 } 2421 } 2422 } 2423 2424 /* 2425 * Call the finalization functions for each of the objects in "list" 2426 * belonging to the DAG of "root" and referenced once. If NULL "root" 2427 * is specified, every finalization function will be called regardless 2428 * of the reference count and the list elements won't be freed. All of 2429 * the objects are expected to have non-NULL fini functions. 2430 */ 2431 static void 2432 objlist_call_fini(Objlist *list, Obj_Entry *root, RtldLockState *lockstate) 2433 { 2434 Objlist_Entry *elm; 2435 char *saved_msg; 2436 Elf_Addr *fini_addr; 2437 int index; 2438 2439 assert(root == NULL || root->refcount == 1); 2440 2441 if (root != NULL) 2442 root->doomed = true; 2443 2444 /* 2445 * Preserve the current error message since a fini function might 2446 * call into the dynamic linker and overwrite it. 2447 */ 2448 saved_msg = errmsg_save(); 2449 do { 2450 STAILQ_FOREACH(elm, list, link) { 2451 if (root != NULL && (elm->obj->refcount != 1 || 2452 objlist_find(&root->dagmembers, elm->obj) == NULL)) 2453 continue; 2454 /* Remove object from fini list to prevent recursive invocation. */ 2455 STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link); 2456 /* Ensure that new references cannot be acquired. */ 2457 elm->obj->doomed = true; 2458 2459 hold_object(elm->obj); 2460 lock_release(rtld_bind_lock, lockstate); 2461 /* 2462 * It is legal to have both DT_FINI and DT_FINI_ARRAY defined. 2463 * When this happens, DT_FINI_ARRAY is processed first. 2464 */ 2465 fini_addr = (Elf_Addr *)elm->obj->fini_array; 2466 if (fini_addr != NULL && elm->obj->fini_array_num > 0) { 2467 for (index = elm->obj->fini_array_num - 1; index >= 0; 2468 index--) { 2469 if (fini_addr[index] != 0 && fini_addr[index] != 1) { 2470 dbg("calling fini function for %s at %p", 2471 elm->obj->path, (void *)fini_addr[index]); 2472 LD_UTRACE(UTRACE_FINI_CALL, elm->obj, 2473 (void *)fini_addr[index], 0, 0, elm->obj->path); 2474 call_initfini_pointer(elm->obj, fini_addr[index]); 2475 } 2476 } 2477 } 2478 if (elm->obj->fini != (Elf_Addr)NULL) { 2479 dbg("calling fini function for %s at %p", elm->obj->path, 2480 (void *)elm->obj->fini); 2481 LD_UTRACE(UTRACE_FINI_CALL, elm->obj, (void *)elm->obj->fini, 2482 0, 0, elm->obj->path); 2483 call_initfini_pointer(elm->obj, elm->obj->fini); 2484 } 2485 wlock_acquire(rtld_bind_lock, lockstate); 2486 unhold_object(elm->obj); 2487 /* No need to free anything if process is going down. */ 2488 if (root != NULL) 2489 free(elm); 2490 /* 2491 * We must restart the list traversal after every fini call 2492 * because a dlclose() call from the fini function or from 2493 * another thread might have modified the reference counts. 2494 */ 2495 break; 2496 } 2497 } while (elm != NULL); 2498 errmsg_restore(saved_msg); 2499 } 2500 2501 /* 2502 * Call the initialization functions for each of the objects in 2503 * "list". All of the objects are expected to have non-NULL init 2504 * functions. 2505 */ 2506 static void 2507 objlist_call_init(Objlist *list, RtldLockState *lockstate) 2508 { 2509 Objlist_Entry *elm; 2510 Obj_Entry *obj; 2511 char *saved_msg; 2512 Elf_Addr *init_addr; 2513 int index; 2514 2515 /* 2516 * Clean init_scanned flag so that objects can be rechecked and 2517 * possibly initialized earlier if any of vectors called below 2518 * cause the change by using dlopen. 2519 */ 2520 TAILQ_FOREACH(obj, &obj_list, next) { 2521 if (obj->marker) 2522 continue; 2523 obj->init_scanned = false; 2524 } 2525 2526 /* 2527 * Preserve the current error message since an init function might 2528 * call into the dynamic linker and overwrite it. 2529 */ 2530 saved_msg = errmsg_save(); 2531 STAILQ_FOREACH(elm, list, link) { 2532 if (elm->obj->init_done) /* Initialized early. */ 2533 continue; 2534 /* 2535 * Race: other thread might try to use this object before current 2536 * one completes the initialization. Not much can be done here 2537 * without better locking. 2538 */ 2539 elm->obj->init_done = true; 2540 hold_object(elm->obj); 2541 lock_release(rtld_bind_lock, lockstate); 2542 2543 /* 2544 * It is legal to have both DT_INIT and DT_INIT_ARRAY defined. 2545 * When this happens, DT_INIT is processed first. 2546 */ 2547 if (elm->obj->init != (Elf_Addr)NULL) { 2548 dbg("calling init function for %s at %p", elm->obj->path, 2549 (void *)elm->obj->init); 2550 LD_UTRACE(UTRACE_INIT_CALL, elm->obj, (void *)elm->obj->init, 2551 0, 0, elm->obj->path); 2552 call_initfini_pointer(elm->obj, elm->obj->init); 2553 } 2554 init_addr = (Elf_Addr *)elm->obj->init_array; 2555 if (init_addr != NULL) { 2556 for (index = 0; index < elm->obj->init_array_num; index++) { 2557 if (init_addr[index] != 0 && init_addr[index] != 1) { 2558 dbg("calling init function for %s at %p", elm->obj->path, 2559 (void *)init_addr[index]); 2560 LD_UTRACE(UTRACE_INIT_CALL, elm->obj, 2561 (void *)init_addr[index], 0, 0, elm->obj->path); 2562 call_init_pointer(elm->obj, init_addr[index]); 2563 } 2564 } 2565 } 2566 wlock_acquire(rtld_bind_lock, lockstate); 2567 unhold_object(elm->obj); 2568 } 2569 errmsg_restore(saved_msg); 2570 } 2571 2572 static void 2573 objlist_clear(Objlist *list) 2574 { 2575 Objlist_Entry *elm; 2576 2577 while (!STAILQ_EMPTY(list)) { 2578 elm = STAILQ_FIRST(list); 2579 STAILQ_REMOVE_HEAD(list, link); 2580 free(elm); 2581 } 2582 } 2583 2584 static Objlist_Entry * 2585 objlist_find(Objlist *list, const Obj_Entry *obj) 2586 { 2587 Objlist_Entry *elm; 2588 2589 STAILQ_FOREACH(elm, list, link) 2590 if (elm->obj == obj) 2591 return elm; 2592 return NULL; 2593 } 2594 2595 static void 2596 objlist_init(Objlist *list) 2597 { 2598 STAILQ_INIT(list); 2599 } 2600 2601 static void 2602 objlist_push_head(Objlist *list, Obj_Entry *obj) 2603 { 2604 Objlist_Entry *elm; 2605 2606 elm = NEW(Objlist_Entry); 2607 elm->obj = obj; 2608 STAILQ_INSERT_HEAD(list, elm, link); 2609 } 2610 2611 static void 2612 objlist_push_tail(Objlist *list, Obj_Entry *obj) 2613 { 2614 Objlist_Entry *elm; 2615 2616 elm = NEW(Objlist_Entry); 2617 elm->obj = obj; 2618 STAILQ_INSERT_TAIL(list, elm, link); 2619 } 2620 2621 static void 2622 objlist_put_after(Objlist *list, Obj_Entry *listobj, Obj_Entry *obj) 2623 { 2624 Objlist_Entry *elm, *listelm; 2625 2626 STAILQ_FOREACH(listelm, list, link) { 2627 if (listelm->obj == listobj) 2628 break; 2629 } 2630 elm = NEW(Objlist_Entry); 2631 elm->obj = obj; 2632 if (listelm != NULL) 2633 STAILQ_INSERT_AFTER(list, listelm, elm, link); 2634 else 2635 STAILQ_INSERT_TAIL(list, elm, link); 2636 } 2637 2638 static void 2639 objlist_remove(Objlist *list, Obj_Entry *obj) 2640 { 2641 Objlist_Entry *elm; 2642 2643 if ((elm = objlist_find(list, obj)) != NULL) { 2644 STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link); 2645 free(elm); 2646 } 2647 } 2648 2649 /* 2650 * Relocate dag rooted in the specified object. 2651 * Returns 0 on success, or -1 on failure. 2652 */ 2653 2654 static int 2655 relocate_object_dag(Obj_Entry *root, bool bind_now, Obj_Entry *rtldobj, 2656 int flags, RtldLockState *lockstate) 2657 { 2658 Objlist_Entry *elm; 2659 int error; 2660 2661 error = 0; 2662 STAILQ_FOREACH(elm, &root->dagmembers, link) { 2663 error = relocate_object(elm->obj, bind_now, rtldobj, flags, 2664 lockstate); 2665 if (error == -1) 2666 break; 2667 } 2668 return (error); 2669 } 2670 2671 /* 2672 * Prepare for, or clean after, relocating an object marked with 2673 * DT_TEXTREL or DF_TEXTREL. Before relocating, all read-only 2674 * segments are remapped read-write. After relocations are done, the 2675 * segment's permissions are returned back to the modes specified in 2676 * the phdrs. If any relocation happened, or always for wired 2677 * program, COW is triggered. 2678 */ 2679 static int 2680 reloc_textrel_prot(Obj_Entry *obj, bool before) 2681 { 2682 const Elf_Phdr *ph; 2683 void *base; 2684 size_t l, sz; 2685 int prot; 2686 2687 for (l = obj->phsize / sizeof(*ph), ph = obj->phdr; l > 0; 2688 l--, ph++) { 2689 if (ph->p_type != PT_LOAD || (ph->p_flags & PF_W) != 0) 2690 continue; 2691 base = obj->relocbase + trunc_page(ph->p_vaddr); 2692 sz = round_page(ph->p_vaddr + ph->p_filesz) - 2693 trunc_page(ph->p_vaddr); 2694 prot = convert_prot(ph->p_flags) | (before ? PROT_WRITE : 0); 2695 if (mprotect(base, sz, prot) == -1) { 2696 _rtld_error("%s: Cannot write-%sable text segment: %s", 2697 obj->path, before ? "en" : "dis", 2698 rtld_strerror(errno)); 2699 return (-1); 2700 } 2701 } 2702 return (0); 2703 } 2704 2705 /* 2706 * Relocate single object. 2707 * Returns 0 on success, or -1 on failure. 2708 */ 2709 static int 2710 relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj, 2711 int flags, RtldLockState *lockstate) 2712 { 2713 2714 if (obj->relocated) 2715 return (0); 2716 obj->relocated = true; 2717 if (obj != rtldobj) 2718 dbg("relocating \"%s\"", obj->path); 2719 2720 if (obj->symtab == NULL || obj->strtab == NULL || 2721 !(obj->valid_hash_sysv || obj->valid_hash_gnu)) { 2722 _rtld_error("%s: Shared object has no run-time symbol table", 2723 obj->path); 2724 return (-1); 2725 } 2726 2727 /* There are relocations to the write-protected text segment. */ 2728 if (obj->textrel && reloc_textrel_prot(obj, true) != 0) 2729 return (-1); 2730 2731 /* Process the non-PLT non-IFUNC relocations. */ 2732 if (reloc_non_plt(obj, rtldobj, flags, lockstate)) 2733 return (-1); 2734 2735 /* Re-protected the text segment. */ 2736 if (obj->textrel && reloc_textrel_prot(obj, false) != 0) 2737 return (-1); 2738 2739 /* Set the special PLT or GOT entries. */ 2740 init_pltgot(obj); 2741 2742 /* Process the PLT relocations. */ 2743 if (reloc_plt(obj) == -1) 2744 return (-1); 2745 /* Relocate the jump slots if we are doing immediate binding. */ 2746 if (obj->bind_now || bind_now) 2747 if (reloc_jmpslots(obj, flags, lockstate) == -1) 2748 return (-1); 2749 2750 /* 2751 * Process the non-PLT IFUNC relocations. The relocations are 2752 * processed in two phases, because IFUNC resolvers may 2753 * reference other symbols, which must be readily processed 2754 * before resolvers are called. 2755 */ 2756 if (obj->non_plt_gnu_ifunc && 2757 reloc_non_plt(obj, rtldobj, flags | SYMLOOK_IFUNC, lockstate)) 2758 return (-1); 2759 2760 if (!obj->mainprog && obj_enforce_relro(obj) == -1) 2761 return (-1); 2762 2763 /* 2764 * Set up the magic number and version in the Obj_Entry. These 2765 * were checked in the crt1.o from the original ElfKit, so we 2766 * set them for backward compatibility. 2767 */ 2768 obj->magic = RTLD_MAGIC; 2769 obj->version = RTLD_VERSION; 2770 2771 return (0); 2772 } 2773 2774 /* 2775 * Relocate newly-loaded shared objects. The argument is a pointer to 2776 * the Obj_Entry for the first such object. All objects from the first 2777 * to the end of the list of objects are relocated. Returns 0 on success, 2778 * or -1 on failure. 2779 */ 2780 static int 2781 relocate_objects(Obj_Entry *first, bool bind_now, Obj_Entry *rtldobj, 2782 int flags, RtldLockState *lockstate) 2783 { 2784 Obj_Entry *obj; 2785 int error; 2786 2787 for (error = 0, obj = first; obj != NULL; 2788 obj = TAILQ_NEXT(obj, next)) { 2789 if (obj->marker) 2790 continue; 2791 error = relocate_object(obj, bind_now, rtldobj, flags, 2792 lockstate); 2793 if (error == -1) 2794 break; 2795 } 2796 return (error); 2797 } 2798 2799 /* 2800 * The handling of R_MACHINE_IRELATIVE relocations and jumpslots 2801 * referencing STT_GNU_IFUNC symbols is postponed till the other 2802 * relocations are done. The indirect functions specified as 2803 * ifunc are allowed to call other symbols, so we need to have 2804 * objects relocated before asking for resolution from indirects. 2805 * 2806 * The R_MACHINE_IRELATIVE slots are resolved in greedy fashion, 2807 * instead of the usual lazy handling of PLT slots. It is 2808 * consistent with how GNU does it. 2809 */ 2810 static int 2811 resolve_object_ifunc(Obj_Entry *obj, bool bind_now, int flags, 2812 RtldLockState *lockstate) 2813 { 2814 if (obj->irelative && reloc_iresolve(obj, lockstate) == -1) 2815 return (-1); 2816 if ((obj->bind_now || bind_now) && obj->gnu_ifunc && 2817 reloc_gnu_ifunc(obj, flags, lockstate) == -1) 2818 return (-1); 2819 return (0); 2820 } 2821 2822 static int 2823 resolve_objects_ifunc(Obj_Entry *first, bool bind_now, int flags, 2824 RtldLockState *lockstate) 2825 { 2826 Obj_Entry *obj; 2827 2828 for (obj = first; obj != NULL; obj = TAILQ_NEXT(obj, next)) { 2829 if (obj->marker) 2830 continue; 2831 if (resolve_object_ifunc(obj, bind_now, flags, lockstate) == -1) 2832 return (-1); 2833 } 2834 return (0); 2835 } 2836 2837 static int 2838 initlist_objects_ifunc(Objlist *list, bool bind_now, int flags, 2839 RtldLockState *lockstate) 2840 { 2841 Objlist_Entry *elm; 2842 2843 STAILQ_FOREACH(elm, list, link) { 2844 if (resolve_object_ifunc(elm->obj, bind_now, flags, 2845 lockstate) == -1) 2846 return (-1); 2847 } 2848 return (0); 2849 } 2850 2851 /* 2852 * Cleanup procedure. It will be called (by the atexit mechanism) just 2853 * before the process exits. 2854 */ 2855 static void 2856 rtld_exit(void) 2857 { 2858 RtldLockState lockstate; 2859 2860 wlock_acquire(rtld_bind_lock, &lockstate); 2861 dbg("rtld_exit()"); 2862 objlist_call_fini(&list_fini, NULL, &lockstate); 2863 /* No need to remove the items from the list, since we are exiting. */ 2864 if (!libmap_disable) 2865 lm_fini(); 2866 lock_release(rtld_bind_lock, &lockstate); 2867 } 2868 2869 /* 2870 * Iterate over a search path, translate each element, and invoke the 2871 * callback on the result. 2872 */ 2873 static void * 2874 path_enumerate(const char *path, path_enum_proc callback, void *arg) 2875 { 2876 const char *trans; 2877 if (path == NULL) 2878 return (NULL); 2879 2880 path += strspn(path, ":;"); 2881 while (*path != '\0') { 2882 size_t len; 2883 char *res; 2884 2885 len = strcspn(path, ":;"); 2886 trans = lm_findn(NULL, path, len); 2887 if (trans) 2888 res = callback(trans, strlen(trans), arg); 2889 else 2890 res = callback(path, len, arg); 2891 2892 if (res != NULL) 2893 return (res); 2894 2895 path += len; 2896 path += strspn(path, ":;"); 2897 } 2898 2899 return (NULL); 2900 } 2901 2902 struct try_library_args { 2903 const char *name; 2904 size_t namelen; 2905 char *buffer; 2906 size_t buflen; 2907 }; 2908 2909 static void * 2910 try_library_path(const char *dir, size_t dirlen, void *param) 2911 { 2912 struct try_library_args *arg; 2913 2914 arg = param; 2915 if (*dir == '/' || trust) { 2916 char *pathname; 2917 2918 if (dirlen + 1 + arg->namelen + 1 > arg->buflen) 2919 return (NULL); 2920 2921 pathname = arg->buffer; 2922 strncpy(pathname, dir, dirlen); 2923 pathname[dirlen] = '/'; 2924 strcpy(pathname + dirlen + 1, arg->name); 2925 2926 dbg(" Trying \"%s\"", pathname); 2927 if (access(pathname, F_OK) == 0) { /* We found it */ 2928 pathname = xmalloc(dirlen + 1 + arg->namelen + 1); 2929 strcpy(pathname, arg->buffer); 2930 return (pathname); 2931 } 2932 } 2933 return (NULL); 2934 } 2935 2936 static char * 2937 search_library_path(const char *name, const char *path) 2938 { 2939 char *p; 2940 struct try_library_args arg; 2941 2942 if (path == NULL) 2943 return NULL; 2944 2945 arg.name = name; 2946 arg.namelen = strlen(name); 2947 arg.buffer = xmalloc(PATH_MAX); 2948 arg.buflen = PATH_MAX; 2949 2950 p = path_enumerate(path, try_library_path, &arg); 2951 2952 free(arg.buffer); 2953 2954 return (p); 2955 } 2956 2957 2958 /* 2959 * Finds the library with the given name using the directory descriptors 2960 * listed in the LD_LIBRARY_PATH_FDS environment variable. 2961 * 2962 * Returns a freshly-opened close-on-exec file descriptor for the library, 2963 * or -1 if the library cannot be found. 2964 */ 2965 static char * 2966 search_library_pathfds(const char *name, const char *path, int *fdp) 2967 { 2968 char *envcopy, *fdstr, *found, *last_token; 2969 size_t len; 2970 int dirfd, fd; 2971 2972 dbg("%s('%s', '%s', fdp)", __func__, name, path); 2973 2974 /* Don't load from user-specified libdirs into setuid binaries. */ 2975 if (!trust) 2976 return (NULL); 2977 2978 /* We can't do anything if LD_LIBRARY_PATH_FDS isn't set. */ 2979 if (path == NULL) 2980 return (NULL); 2981 2982 /* LD_LIBRARY_PATH_FDS only works with relative paths. */ 2983 if (name[0] == '/') { 2984 dbg("Absolute path (%s) passed to %s", name, __func__); 2985 return (NULL); 2986 } 2987 2988 /* 2989 * Use strtok_r() to walk the FD:FD:FD list. This requires a local 2990 * copy of the path, as strtok_r rewrites separator tokens 2991 * with '\0'. 2992 */ 2993 found = NULL; 2994 envcopy = xstrdup(path); 2995 for (fdstr = strtok_r(envcopy, ":", &last_token); fdstr != NULL; 2996 fdstr = strtok_r(NULL, ":", &last_token)) { 2997 dirfd = parse_libdir(fdstr); 2998 if (dirfd < 0) 2999 break; 3000 fd = __sys_openat(dirfd, name, O_RDONLY | O_CLOEXEC | O_VERIFY); 3001 if (fd >= 0) { 3002 *fdp = fd; 3003 len = strlen(fdstr) + strlen(name) + 3; 3004 found = xmalloc(len); 3005 if (rtld_snprintf(found, len, "#%d/%s", dirfd, name) < 0) { 3006 _rtld_error("error generating '%d/%s'", 3007 dirfd, name); 3008 rtld_die(); 3009 } 3010 dbg("open('%s') => %d", found, fd); 3011 break; 3012 } 3013 } 3014 free(envcopy); 3015 3016 return (found); 3017 } 3018 3019 3020 int 3021 dlclose(void *handle) 3022 { 3023 RtldLockState lockstate; 3024 int error; 3025 3026 wlock_acquire(rtld_bind_lock, &lockstate); 3027 error = dlclose_locked(handle, &lockstate); 3028 lock_release(rtld_bind_lock, &lockstate); 3029 return (error); 3030 } 3031 3032 static int 3033 dlclose_locked(void *handle, RtldLockState *lockstate) 3034 { 3035 Obj_Entry *root; 3036 3037 root = dlcheck(handle); 3038 if (root == NULL) 3039 return -1; 3040 LD_UTRACE(UTRACE_DLCLOSE_START, handle, NULL, 0, root->dl_refcount, 3041 root->path); 3042 3043 /* Unreference the object and its dependencies. */ 3044 root->dl_refcount--; 3045 3046 if (root->refcount == 1) { 3047 /* 3048 * The object will be no longer referenced, so we must unload it. 3049 * First, call the fini functions. 3050 */ 3051 objlist_call_fini(&list_fini, root, lockstate); 3052 3053 unref_dag(root); 3054 3055 /* Finish cleaning up the newly-unreferenced objects. */ 3056 GDB_STATE(RT_DELETE,&root->linkmap); 3057 unload_object(root, lockstate); 3058 GDB_STATE(RT_CONSISTENT,NULL); 3059 } else 3060 unref_dag(root); 3061 3062 LD_UTRACE(UTRACE_DLCLOSE_STOP, handle, NULL, 0, 0, NULL); 3063 return 0; 3064 } 3065 3066 char * 3067 dlerror(void) 3068 { 3069 char *msg = error_message; 3070 error_message = NULL; 3071 return msg; 3072 } 3073 3074 /* 3075 * This function is deprecated and has no effect. 3076 */ 3077 void 3078 dllockinit(void *context, 3079 void *(*lock_create)(void *context), 3080 void (*rlock_acquire)(void *lock), 3081 void (*wlock_acquire)(void *lock), 3082 void (*lock_release)(void *lock), 3083 void (*lock_destroy)(void *lock), 3084 void (*context_destroy)(void *context)) 3085 { 3086 static void *cur_context; 3087 static void (*cur_context_destroy)(void *); 3088 3089 /* Just destroy the context from the previous call, if necessary. */ 3090 if (cur_context_destroy != NULL) 3091 cur_context_destroy(cur_context); 3092 cur_context = context; 3093 cur_context_destroy = context_destroy; 3094 } 3095 3096 void * 3097 dlopen(const char *name, int mode) 3098 { 3099 3100 return (rtld_dlopen(name, -1, mode)); 3101 } 3102 3103 void * 3104 fdlopen(int fd, int mode) 3105 { 3106 3107 return (rtld_dlopen(NULL, fd, mode)); 3108 } 3109 3110 static void * 3111 rtld_dlopen(const char *name, int fd, int mode) 3112 { 3113 RtldLockState lockstate; 3114 int lo_flags; 3115 3116 LD_UTRACE(UTRACE_DLOPEN_START, NULL, NULL, 0, mode, name); 3117 ld_tracing = (mode & RTLD_TRACE) == 0 ? NULL : "1"; 3118 if (ld_tracing != NULL) { 3119 rlock_acquire(rtld_bind_lock, &lockstate); 3120 if (sigsetjmp(lockstate.env, 0) != 0) 3121 lock_upgrade(rtld_bind_lock, &lockstate); 3122 environ = (char **)*get_program_var_addr("environ", &lockstate); 3123 lock_release(rtld_bind_lock, &lockstate); 3124 } 3125 lo_flags = RTLD_LO_DLOPEN; 3126 if (mode & RTLD_NODELETE) 3127 lo_flags |= RTLD_LO_NODELETE; 3128 if (mode & RTLD_NOLOAD) 3129 lo_flags |= RTLD_LO_NOLOAD; 3130 if (ld_tracing != NULL) 3131 lo_flags |= RTLD_LO_TRACE; 3132 3133 return (dlopen_object(name, fd, obj_main, lo_flags, 3134 mode & (RTLD_MODEMASK | RTLD_GLOBAL), NULL)); 3135 } 3136 3137 static void 3138 dlopen_cleanup(Obj_Entry *obj, RtldLockState *lockstate) 3139 { 3140 3141 obj->dl_refcount--; 3142 unref_dag(obj); 3143 if (obj->refcount == 0) 3144 unload_object(obj, lockstate); 3145 } 3146 3147 static Obj_Entry * 3148 dlopen_object(const char *name, int fd, Obj_Entry *refobj, int lo_flags, 3149 int mode, RtldLockState *lockstate) 3150 { 3151 Obj_Entry *old_obj_tail; 3152 Obj_Entry *obj; 3153 Objlist initlist; 3154 RtldLockState mlockstate; 3155 int result; 3156 3157 objlist_init(&initlist); 3158 3159 if (lockstate == NULL && !(lo_flags & RTLD_LO_EARLY)) { 3160 wlock_acquire(rtld_bind_lock, &mlockstate); 3161 lockstate = &mlockstate; 3162 } 3163 GDB_STATE(RT_ADD,NULL); 3164 3165 old_obj_tail = globallist_curr(TAILQ_LAST(&obj_list, obj_entry_q)); 3166 obj = NULL; 3167 if (name == NULL && fd == -1) { 3168 obj = obj_main; 3169 obj->refcount++; 3170 } else { 3171 obj = load_object(name, fd, refobj, lo_flags); 3172 } 3173 3174 if (obj) { 3175 obj->dl_refcount++; 3176 if (mode & RTLD_GLOBAL && objlist_find(&list_global, obj) == NULL) 3177 objlist_push_tail(&list_global, obj); 3178 if (globallist_next(old_obj_tail) != NULL) { 3179 /* We loaded something new. */ 3180 assert(globallist_next(old_obj_tail) == obj); 3181 result = load_needed_objects(obj, 3182 lo_flags & (RTLD_LO_DLOPEN | RTLD_LO_EARLY)); 3183 init_dag(obj); 3184 ref_dag(obj); 3185 if (result != -1) 3186 result = rtld_verify_versions(&obj->dagmembers); 3187 if (result != -1 && ld_tracing) 3188 goto trace; 3189 if (result == -1 || relocate_object_dag(obj, 3190 (mode & RTLD_MODEMASK) == RTLD_NOW, &obj_rtld, 3191 (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0, 3192 lockstate) == -1) { 3193 dlopen_cleanup(obj, lockstate); 3194 obj = NULL; 3195 } else if (lo_flags & RTLD_LO_EARLY) { 3196 /* 3197 * Do not call the init functions for early loaded 3198 * filtees. The image is still not initialized enough 3199 * for them to work. 3200 * 3201 * Our object is found by the global object list and 3202 * will be ordered among all init calls done right 3203 * before transferring control to main. 3204 */ 3205 } else { 3206 /* Make list of init functions to call. */ 3207 initlist_add_objects(obj, obj, &initlist); 3208 } 3209 /* 3210 * Process all no_delete or global objects here, given 3211 * them own DAGs to prevent their dependencies from being 3212 * unloaded. This has to be done after we have loaded all 3213 * of the dependencies, so that we do not miss any. 3214 */ 3215 if (obj != NULL) 3216 process_z(obj); 3217 } else { 3218 /* 3219 * Bump the reference counts for objects on this DAG. If 3220 * this is the first dlopen() call for the object that was 3221 * already loaded as a dependency, initialize the dag 3222 * starting at it. 3223 */ 3224 init_dag(obj); 3225 ref_dag(obj); 3226 3227 if ((lo_flags & RTLD_LO_TRACE) != 0) 3228 goto trace; 3229 } 3230 if (obj != NULL && ((lo_flags & RTLD_LO_NODELETE) != 0 || 3231 obj->z_nodelete) && !obj->ref_nodel) { 3232 dbg("obj %s nodelete", obj->path); 3233 ref_dag(obj); 3234 obj->z_nodelete = obj->ref_nodel = true; 3235 } 3236 } 3237 3238 LD_UTRACE(UTRACE_DLOPEN_STOP, obj, NULL, 0, obj ? obj->dl_refcount : 0, 3239 name); 3240 GDB_STATE(RT_CONSISTENT,obj ? &obj->linkmap : NULL); 3241 3242 if (!(lo_flags & RTLD_LO_EARLY)) { 3243 map_stacks_exec(lockstate); 3244 } 3245 3246 if (initlist_objects_ifunc(&initlist, (mode & RTLD_MODEMASK) == RTLD_NOW, 3247 (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0, 3248 lockstate) == -1) { 3249 objlist_clear(&initlist); 3250 dlopen_cleanup(obj, lockstate); 3251 if (lockstate == &mlockstate) 3252 lock_release(rtld_bind_lock, lockstate); 3253 return (NULL); 3254 } 3255 3256 if (!(lo_flags & RTLD_LO_EARLY)) { 3257 /* Call the init functions. */ 3258 objlist_call_init(&initlist, lockstate); 3259 } 3260 objlist_clear(&initlist); 3261 if (lockstate == &mlockstate) 3262 lock_release(rtld_bind_lock, lockstate); 3263 return obj; 3264 trace: 3265 trace_loaded_objects(obj); 3266 if (lockstate == &mlockstate) 3267 lock_release(rtld_bind_lock, lockstate); 3268 exit(0); 3269 } 3270 3271 static void * 3272 do_dlsym(void *handle, const char *name, void *retaddr, const Ver_Entry *ve, 3273 int flags) 3274 { 3275 DoneList donelist; 3276 const Obj_Entry *obj, *defobj; 3277 const Elf_Sym *def; 3278 SymLook req; 3279 RtldLockState lockstate; 3280 tls_index ti; 3281 void *sym; 3282 int res; 3283 3284 def = NULL; 3285 defobj = NULL; 3286 symlook_init(&req, name); 3287 req.ventry = ve; 3288 req.flags = flags | SYMLOOK_IN_PLT; 3289 req.lockstate = &lockstate; 3290 3291 LD_UTRACE(UTRACE_DLSYM_START, handle, NULL, 0, 0, name); 3292 rlock_acquire(rtld_bind_lock, &lockstate); 3293 if (sigsetjmp(lockstate.env, 0) != 0) 3294 lock_upgrade(rtld_bind_lock, &lockstate); 3295 if (handle == NULL || handle == RTLD_NEXT || 3296 handle == RTLD_DEFAULT || handle == RTLD_SELF) { 3297 3298 if ((obj = obj_from_addr(retaddr)) == NULL) { 3299 _rtld_error("Cannot determine caller's shared object"); 3300 lock_release(rtld_bind_lock, &lockstate); 3301 LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name); 3302 return NULL; 3303 } 3304 if (handle == NULL) { /* Just the caller's shared object. */ 3305 res = symlook_obj(&req, obj); 3306 if (res == 0) { 3307 def = req.sym_out; 3308 defobj = req.defobj_out; 3309 } 3310 } else if (handle == RTLD_NEXT || /* Objects after caller's */ 3311 handle == RTLD_SELF) { /* ... caller included */ 3312 if (handle == RTLD_NEXT) 3313 obj = globallist_next(obj); 3314 for (; obj != NULL; obj = TAILQ_NEXT(obj, next)) { 3315 if (obj->marker) 3316 continue; 3317 res = symlook_obj(&req, obj); 3318 if (res == 0) { 3319 if (def == NULL || 3320 ELF_ST_BIND(req.sym_out->st_info) != STB_WEAK) { 3321 def = req.sym_out; 3322 defobj = req.defobj_out; 3323 if (ELF_ST_BIND(def->st_info) != STB_WEAK) 3324 break; 3325 } 3326 } 3327 } 3328 /* 3329 * Search the dynamic linker itself, and possibly resolve the 3330 * symbol from there. This is how the application links to 3331 * dynamic linker services such as dlopen. 3332 */ 3333 if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) { 3334 res = symlook_obj(&req, &obj_rtld); 3335 if (res == 0) { 3336 def = req.sym_out; 3337 defobj = req.defobj_out; 3338 } 3339 } 3340 } else { 3341 assert(handle == RTLD_DEFAULT); 3342 res = symlook_default(&req, obj); 3343 if (res == 0) { 3344 defobj = req.defobj_out; 3345 def = req.sym_out; 3346 } 3347 } 3348 } else { 3349 if ((obj = dlcheck(handle)) == NULL) { 3350 lock_release(rtld_bind_lock, &lockstate); 3351 LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name); 3352 return NULL; 3353 } 3354 3355 donelist_init(&donelist); 3356 if (obj->mainprog) { 3357 /* Handle obtained by dlopen(NULL, ...) implies global scope. */ 3358 res = symlook_global(&req, &donelist); 3359 if (res == 0) { 3360 def = req.sym_out; 3361 defobj = req.defobj_out; 3362 } 3363 /* 3364 * Search the dynamic linker itself, and possibly resolve the 3365 * symbol from there. This is how the application links to 3366 * dynamic linker services such as dlopen. 3367 */ 3368 if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) { 3369 res = symlook_obj(&req, &obj_rtld); 3370 if (res == 0) { 3371 def = req.sym_out; 3372 defobj = req.defobj_out; 3373 } 3374 } 3375 } 3376 else { 3377 /* Search the whole DAG rooted at the given object. */ 3378 res = symlook_list(&req, &obj->dagmembers, &donelist); 3379 if (res == 0) { 3380 def = req.sym_out; 3381 defobj = req.defobj_out; 3382 } 3383 } 3384 } 3385 3386 if (def != NULL) { 3387 lock_release(rtld_bind_lock, &lockstate); 3388 3389 /* 3390 * The value required by the caller is derived from the value 3391 * of the symbol. this is simply the relocated value of the 3392 * symbol. 3393 */ 3394 if (ELF_ST_TYPE(def->st_info) == STT_FUNC) 3395 sym = make_function_pointer(def, defobj); 3396 else if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) 3397 sym = rtld_resolve_ifunc(defobj, def); 3398 else if (ELF_ST_TYPE(def->st_info) == STT_TLS) { 3399 ti.ti_module = defobj->tlsindex; 3400 ti.ti_offset = def->st_value; 3401 sym = __tls_get_addr(&ti); 3402 } else 3403 sym = defobj->relocbase + def->st_value; 3404 LD_UTRACE(UTRACE_DLSYM_STOP, handle, sym, 0, 0, name); 3405 return (sym); 3406 } 3407 3408 _rtld_error("Undefined symbol \"%s\"", name); 3409 lock_release(rtld_bind_lock, &lockstate); 3410 LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name); 3411 return NULL; 3412 } 3413 3414 void * 3415 dlsym(void *handle, const char *name) 3416 { 3417 return do_dlsym(handle, name, __builtin_return_address(0), NULL, 3418 SYMLOOK_DLSYM); 3419 } 3420 3421 dlfunc_t 3422 dlfunc(void *handle, const char *name) 3423 { 3424 union { 3425 void *d; 3426 dlfunc_t f; 3427 } rv; 3428 3429 rv.d = do_dlsym(handle, name, __builtin_return_address(0), NULL, 3430 SYMLOOK_DLSYM); 3431 return (rv.f); 3432 } 3433 3434 void * 3435 dlvsym(void *handle, const char *name, const char *version) 3436 { 3437 Ver_Entry ventry; 3438 3439 ventry.name = version; 3440 ventry.file = NULL; 3441 ventry.hash = elf_hash(version); 3442 ventry.flags= 0; 3443 return do_dlsym(handle, name, __builtin_return_address(0), &ventry, 3444 SYMLOOK_DLSYM); 3445 } 3446 3447 int 3448 _rtld_addr_phdr(const void *addr, struct dl_phdr_info *phdr_info) 3449 { 3450 const Obj_Entry *obj; 3451 RtldLockState lockstate; 3452 3453 rlock_acquire(rtld_bind_lock, &lockstate); 3454 obj = obj_from_addr(addr); 3455 if (obj == NULL) { 3456 _rtld_error("No shared object contains address"); 3457 lock_release(rtld_bind_lock, &lockstate); 3458 return (0); 3459 } 3460 rtld_fill_dl_phdr_info(obj, phdr_info); 3461 lock_release(rtld_bind_lock, &lockstate); 3462 return (1); 3463 } 3464 3465 int 3466 dladdr(const void *addr, Dl_info *info) 3467 { 3468 const Obj_Entry *obj; 3469 const Elf_Sym *def; 3470 void *symbol_addr; 3471 unsigned long symoffset; 3472 RtldLockState lockstate; 3473 3474 rlock_acquire(rtld_bind_lock, &lockstate); 3475 obj = obj_from_addr(addr); 3476 if (obj == NULL) { 3477 _rtld_error("No shared object contains address"); 3478 lock_release(rtld_bind_lock, &lockstate); 3479 return 0; 3480 } 3481 info->dli_fname = obj->path; 3482 info->dli_fbase = obj->mapbase; 3483 info->dli_saddr = (void *)0; 3484 info->dli_sname = NULL; 3485 3486 /* 3487 * Walk the symbol list looking for the symbol whose address is 3488 * closest to the address sent in. 3489 */ 3490 for (symoffset = 0; symoffset < obj->dynsymcount; symoffset++) { 3491 def = obj->symtab + symoffset; 3492 3493 /* 3494 * For skip the symbol if st_shndx is either SHN_UNDEF or 3495 * SHN_COMMON. 3496 */ 3497 if (def->st_shndx == SHN_UNDEF || def->st_shndx == SHN_COMMON) 3498 continue; 3499 3500 /* 3501 * If the symbol is greater than the specified address, or if it 3502 * is further away from addr than the current nearest symbol, 3503 * then reject it. 3504 */ 3505 symbol_addr = obj->relocbase + def->st_value; 3506 if (symbol_addr > addr || symbol_addr < info->dli_saddr) 3507 continue; 3508 3509 /* Update our idea of the nearest symbol. */ 3510 info->dli_sname = obj->strtab + def->st_name; 3511 info->dli_saddr = symbol_addr; 3512 3513 /* Exact match? */ 3514 if (info->dli_saddr == addr) 3515 break; 3516 } 3517 lock_release(rtld_bind_lock, &lockstate); 3518 return 1; 3519 } 3520 3521 int 3522 dlinfo(void *handle, int request, void *p) 3523 { 3524 const Obj_Entry *obj; 3525 RtldLockState lockstate; 3526 int error; 3527 3528 rlock_acquire(rtld_bind_lock, &lockstate); 3529 3530 if (handle == NULL || handle == RTLD_SELF) { 3531 void *retaddr; 3532 3533 retaddr = __builtin_return_address(0); /* __GNUC__ only */ 3534 if ((obj = obj_from_addr(retaddr)) == NULL) 3535 _rtld_error("Cannot determine caller's shared object"); 3536 } else 3537 obj = dlcheck(handle); 3538 3539 if (obj == NULL) { 3540 lock_release(rtld_bind_lock, &lockstate); 3541 return (-1); 3542 } 3543 3544 error = 0; 3545 switch (request) { 3546 case RTLD_DI_LINKMAP: 3547 *((struct link_map const **)p) = &obj->linkmap; 3548 break; 3549 case RTLD_DI_ORIGIN: 3550 error = rtld_dirname(obj->path, p); 3551 break; 3552 3553 case RTLD_DI_SERINFOSIZE: 3554 case RTLD_DI_SERINFO: 3555 error = do_search_info(obj, request, (struct dl_serinfo *)p); 3556 break; 3557 3558 default: 3559 _rtld_error("Invalid request %d passed to dlinfo()", request); 3560 error = -1; 3561 } 3562 3563 lock_release(rtld_bind_lock, &lockstate); 3564 3565 return (error); 3566 } 3567 3568 static void 3569 rtld_fill_dl_phdr_info(const Obj_Entry *obj, struct dl_phdr_info *phdr_info) 3570 { 3571 3572 phdr_info->dlpi_addr = (Elf_Addr)obj->relocbase; 3573 phdr_info->dlpi_name = obj->path; 3574 phdr_info->dlpi_phdr = obj->phdr; 3575 phdr_info->dlpi_phnum = obj->phsize / sizeof(obj->phdr[0]); 3576 phdr_info->dlpi_tls_modid = obj->tlsindex; 3577 phdr_info->dlpi_tls_data = obj->tlsinit; 3578 phdr_info->dlpi_adds = obj_loads; 3579 phdr_info->dlpi_subs = obj_loads - obj_count; 3580 } 3581 3582 int 3583 dl_iterate_phdr(__dl_iterate_hdr_callback callback, void *param) 3584 { 3585 struct dl_phdr_info phdr_info; 3586 Obj_Entry *obj, marker; 3587 RtldLockState bind_lockstate, phdr_lockstate; 3588 int error; 3589 3590 init_marker(&marker); 3591 error = 0; 3592 3593 wlock_acquire(rtld_phdr_lock, &phdr_lockstate); 3594 wlock_acquire(rtld_bind_lock, &bind_lockstate); 3595 for (obj = globallist_curr(TAILQ_FIRST(&obj_list)); obj != NULL;) { 3596 TAILQ_INSERT_AFTER(&obj_list, obj, &marker, next); 3597 rtld_fill_dl_phdr_info(obj, &phdr_info); 3598 hold_object(obj); 3599 lock_release(rtld_bind_lock, &bind_lockstate); 3600 3601 error = callback(&phdr_info, sizeof phdr_info, param); 3602 3603 wlock_acquire(rtld_bind_lock, &bind_lockstate); 3604 unhold_object(obj); 3605 obj = globallist_next(&marker); 3606 TAILQ_REMOVE(&obj_list, &marker, next); 3607 if (error != 0) { 3608 lock_release(rtld_bind_lock, &bind_lockstate); 3609 lock_release(rtld_phdr_lock, &phdr_lockstate); 3610 return (error); 3611 } 3612 } 3613 3614 if (error == 0) { 3615 rtld_fill_dl_phdr_info(&obj_rtld, &phdr_info); 3616 lock_release(rtld_bind_lock, &bind_lockstate); 3617 error = callback(&phdr_info, sizeof(phdr_info), param); 3618 } 3619 lock_release(rtld_phdr_lock, &phdr_lockstate); 3620 return (error); 3621 } 3622 3623 static void * 3624 fill_search_info(const char *dir, size_t dirlen, void *param) 3625 { 3626 struct fill_search_info_args *arg; 3627 3628 arg = param; 3629 3630 if (arg->request == RTLD_DI_SERINFOSIZE) { 3631 arg->serinfo->dls_cnt ++; 3632 arg->serinfo->dls_size += sizeof(struct dl_serpath) + dirlen + 1; 3633 } else { 3634 struct dl_serpath *s_entry; 3635 3636 s_entry = arg->serpath; 3637 s_entry->dls_name = arg->strspace; 3638 s_entry->dls_flags = arg->flags; 3639 3640 strncpy(arg->strspace, dir, dirlen); 3641 arg->strspace[dirlen] = '\0'; 3642 3643 arg->strspace += dirlen + 1; 3644 arg->serpath++; 3645 } 3646 3647 return (NULL); 3648 } 3649 3650 static int 3651 do_search_info(const Obj_Entry *obj, int request, struct dl_serinfo *info) 3652 { 3653 struct dl_serinfo _info; 3654 struct fill_search_info_args args; 3655 3656 args.request = RTLD_DI_SERINFOSIZE; 3657 args.serinfo = &_info; 3658 3659 _info.dls_size = __offsetof(struct dl_serinfo, dls_serpath); 3660 _info.dls_cnt = 0; 3661 3662 path_enumerate(obj->rpath, fill_search_info, &args); 3663 path_enumerate(ld_library_path, fill_search_info, &args); 3664 path_enumerate(obj->runpath, fill_search_info, &args); 3665 path_enumerate(gethints(obj->z_nodeflib), fill_search_info, &args); 3666 if (!obj->z_nodeflib) 3667 path_enumerate(ld_standard_library_path, fill_search_info, &args); 3668 3669 3670 if (request == RTLD_DI_SERINFOSIZE) { 3671 info->dls_size = _info.dls_size; 3672 info->dls_cnt = _info.dls_cnt; 3673 return (0); 3674 } 3675 3676 if (info->dls_cnt != _info.dls_cnt || info->dls_size != _info.dls_size) { 3677 _rtld_error("Uninitialized Dl_serinfo struct passed to dlinfo()"); 3678 return (-1); 3679 } 3680 3681 args.request = RTLD_DI_SERINFO; 3682 args.serinfo = info; 3683 args.serpath = &info->dls_serpath[0]; 3684 args.strspace = (char *)&info->dls_serpath[_info.dls_cnt]; 3685 3686 args.flags = LA_SER_RUNPATH; 3687 if (path_enumerate(obj->rpath, fill_search_info, &args) != NULL) 3688 return (-1); 3689 3690 args.flags = LA_SER_LIBPATH; 3691 if (path_enumerate(ld_library_path, fill_search_info, &args) != NULL) 3692 return (-1); 3693 3694 args.flags = LA_SER_RUNPATH; 3695 if (path_enumerate(obj->runpath, fill_search_info, &args) != NULL) 3696 return (-1); 3697 3698 args.flags = LA_SER_CONFIG; 3699 if (path_enumerate(gethints(obj->z_nodeflib), fill_search_info, &args) 3700 != NULL) 3701 return (-1); 3702 3703 args.flags = LA_SER_DEFAULT; 3704 if (!obj->z_nodeflib && 3705 path_enumerate(ld_standard_library_path, fill_search_info, &args) != NULL) 3706 return (-1); 3707 return (0); 3708 } 3709 3710 static int 3711 rtld_dirname(const char *path, char *bname) 3712 { 3713 const char *endp; 3714 3715 /* Empty or NULL string gets treated as "." */ 3716 if (path == NULL || *path == '\0') { 3717 bname[0] = '.'; 3718 bname[1] = '\0'; 3719 return (0); 3720 } 3721 3722 /* Strip trailing slashes */ 3723 endp = path + strlen(path) - 1; 3724 while (endp > path && *endp == '/') 3725 endp--; 3726 3727 /* Find the start of the dir */ 3728 while (endp > path && *endp != '/') 3729 endp--; 3730 3731 /* Either the dir is "/" or there are no slashes */ 3732 if (endp == path) { 3733 bname[0] = *endp == '/' ? '/' : '.'; 3734 bname[1] = '\0'; 3735 return (0); 3736 } else { 3737 do { 3738 endp--; 3739 } while (endp > path && *endp == '/'); 3740 } 3741 3742 if (endp - path + 2 > PATH_MAX) 3743 { 3744 _rtld_error("Filename is too long: %s", path); 3745 return(-1); 3746 } 3747 3748 strncpy(bname, path, endp - path + 1); 3749 bname[endp - path + 1] = '\0'; 3750 return (0); 3751 } 3752 3753 static int 3754 rtld_dirname_abs(const char *path, char *base) 3755 { 3756 char *last; 3757 3758 if (realpath(path, base) == NULL) 3759 return (-1); 3760 dbg("%s -> %s", path, base); 3761 last = strrchr(base, '/'); 3762 if (last == NULL) 3763 return (-1); 3764 if (last != base) 3765 *last = '\0'; 3766 return (0); 3767 } 3768 3769 static void 3770 linkmap_add(Obj_Entry *obj) 3771 { 3772 struct link_map *l = &obj->linkmap; 3773 struct link_map *prev; 3774 3775 obj->linkmap.l_name = obj->path; 3776 obj->linkmap.l_addr = obj->mapbase; 3777 obj->linkmap.l_ld = obj->dynamic; 3778 #ifdef __mips__ 3779 /* GDB needs load offset on MIPS to use the symbols */ 3780 obj->linkmap.l_offs = obj->relocbase; 3781 #endif 3782 3783 if (r_debug.r_map == NULL) { 3784 r_debug.r_map = l; 3785 return; 3786 } 3787 3788 /* 3789 * Scan to the end of the list, but not past the entry for the 3790 * dynamic linker, which we want to keep at the very end. 3791 */ 3792 for (prev = r_debug.r_map; 3793 prev->l_next != NULL && prev->l_next != &obj_rtld.linkmap; 3794 prev = prev->l_next) 3795 ; 3796 3797 /* Link in the new entry. */ 3798 l->l_prev = prev; 3799 l->l_next = prev->l_next; 3800 if (l->l_next != NULL) 3801 l->l_next->l_prev = l; 3802 prev->l_next = l; 3803 } 3804 3805 static void 3806 linkmap_delete(Obj_Entry *obj) 3807 { 3808 struct link_map *l = &obj->linkmap; 3809 3810 if (l->l_prev == NULL) { 3811 if ((r_debug.r_map = l->l_next) != NULL) 3812 l->l_next->l_prev = NULL; 3813 return; 3814 } 3815 3816 if ((l->l_prev->l_next = l->l_next) != NULL) 3817 l->l_next->l_prev = l->l_prev; 3818 } 3819 3820 /* 3821 * Function for the debugger to set a breakpoint on to gain control. 3822 * 3823 * The two parameters allow the debugger to easily find and determine 3824 * what the runtime loader is doing and to whom it is doing it. 3825 * 3826 * When the loadhook trap is hit (r_debug_state, set at program 3827 * initialization), the arguments can be found on the stack: 3828 * 3829 * +8 struct link_map *m 3830 * +4 struct r_debug *rd 3831 * +0 RetAddr 3832 */ 3833 void 3834 r_debug_state(struct r_debug* rd, struct link_map *m) 3835 { 3836 /* 3837 * The following is a hack to force the compiler to emit calls to 3838 * this function, even when optimizing. If the function is empty, 3839 * the compiler is not obliged to emit any code for calls to it, 3840 * even when marked __noinline. However, gdb depends on those 3841 * calls being made. 3842 */ 3843 __compiler_membar(); 3844 } 3845 3846 /* 3847 * A function called after init routines have completed. This can be used to 3848 * break before a program's entry routine is called, and can be used when 3849 * main is not available in the symbol table. 3850 */ 3851 void 3852 _r_debug_postinit(struct link_map *m) 3853 { 3854 3855 /* See r_debug_state(). */ 3856 __compiler_membar(); 3857 } 3858 3859 static void 3860 release_object(Obj_Entry *obj) 3861 { 3862 3863 if (obj->holdcount > 0) { 3864 obj->unholdfree = true; 3865 return; 3866 } 3867 munmap(obj->mapbase, obj->mapsize); 3868 linkmap_delete(obj); 3869 obj_free(obj); 3870 } 3871 3872 /* 3873 * Get address of the pointer variable in the main program. 3874 * Prefer non-weak symbol over the weak one. 3875 */ 3876 static const void ** 3877 get_program_var_addr(const char *name, RtldLockState *lockstate) 3878 { 3879 SymLook req; 3880 DoneList donelist; 3881 3882 symlook_init(&req, name); 3883 req.lockstate = lockstate; 3884 donelist_init(&donelist); 3885 if (symlook_global(&req, &donelist) != 0) 3886 return (NULL); 3887 if (ELF_ST_TYPE(req.sym_out->st_info) == STT_FUNC) 3888 return ((const void **)make_function_pointer(req.sym_out, 3889 req.defobj_out)); 3890 else if (ELF_ST_TYPE(req.sym_out->st_info) == STT_GNU_IFUNC) 3891 return ((const void **)rtld_resolve_ifunc(req.defobj_out, req.sym_out)); 3892 else 3893 return ((const void **)(req.defobj_out->relocbase + 3894 req.sym_out->st_value)); 3895 } 3896 3897 /* 3898 * Set a pointer variable in the main program to the given value. This 3899 * is used to set key variables such as "environ" before any of the 3900 * init functions are called. 3901 */ 3902 static void 3903 set_program_var(const char *name, const void *value) 3904 { 3905 const void **addr; 3906 3907 if ((addr = get_program_var_addr(name, NULL)) != NULL) { 3908 dbg("\"%s\": *%p <-- %p", name, addr, value); 3909 *addr = value; 3910 } 3911 } 3912 3913 /* 3914 * Search the global objects, including dependencies and main object, 3915 * for the given symbol. 3916 */ 3917 static int 3918 symlook_global(SymLook *req, DoneList *donelist) 3919 { 3920 SymLook req1; 3921 const Objlist_Entry *elm; 3922 int res; 3923 3924 symlook_init_from_req(&req1, req); 3925 3926 /* Search all objects loaded at program start up. */ 3927 if (req->defobj_out == NULL || 3928 ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) { 3929 res = symlook_list(&req1, &list_main, donelist); 3930 if (res == 0 && (req->defobj_out == NULL || 3931 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) { 3932 req->sym_out = req1.sym_out; 3933 req->defobj_out = req1.defobj_out; 3934 assert(req->defobj_out != NULL); 3935 } 3936 } 3937 3938 /* Search all DAGs whose roots are RTLD_GLOBAL objects. */ 3939 STAILQ_FOREACH(elm, &list_global, link) { 3940 if (req->defobj_out != NULL && 3941 ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK) 3942 break; 3943 res = symlook_list(&req1, &elm->obj->dagmembers, donelist); 3944 if (res == 0 && (req->defobj_out == NULL || 3945 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) { 3946 req->sym_out = req1.sym_out; 3947 req->defobj_out = req1.defobj_out; 3948 assert(req->defobj_out != NULL); 3949 } 3950 } 3951 3952 return (req->sym_out != NULL ? 0 : ESRCH); 3953 } 3954 3955 /* 3956 * Given a symbol name in a referencing object, find the corresponding 3957 * definition of the symbol. Returns a pointer to the symbol, or NULL if 3958 * no definition was found. Returns a pointer to the Obj_Entry of the 3959 * defining object via the reference parameter DEFOBJ_OUT. 3960 */ 3961 static int 3962 symlook_default(SymLook *req, const Obj_Entry *refobj) 3963 { 3964 DoneList donelist; 3965 const Objlist_Entry *elm; 3966 SymLook req1; 3967 int res; 3968 3969 donelist_init(&donelist); 3970 symlook_init_from_req(&req1, req); 3971 3972 /* 3973 * Look first in the referencing object if linked symbolically, 3974 * and similarly handle protected symbols. 3975 */ 3976 res = symlook_obj(&req1, refobj); 3977 if (res == 0 && (refobj->symbolic || 3978 ELF_ST_VISIBILITY(req1.sym_out->st_other) == STV_PROTECTED)) { 3979 req->sym_out = req1.sym_out; 3980 req->defobj_out = req1.defobj_out; 3981 assert(req->defobj_out != NULL); 3982 } 3983 if (refobj->symbolic || req->defobj_out != NULL) 3984 donelist_check(&donelist, refobj); 3985 3986 symlook_global(req, &donelist); 3987 3988 /* Search all dlopened DAGs containing the referencing object. */ 3989 STAILQ_FOREACH(elm, &refobj->dldags, link) { 3990 if (req->sym_out != NULL && 3991 ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK) 3992 break; 3993 res = symlook_list(&req1, &elm->obj->dagmembers, &donelist); 3994 if (res == 0 && (req->sym_out == NULL || 3995 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) { 3996 req->sym_out = req1.sym_out; 3997 req->defobj_out = req1.defobj_out; 3998 assert(req->defobj_out != NULL); 3999 } 4000 } 4001 4002 /* 4003 * Search the dynamic linker itself, and possibly resolve the 4004 * symbol from there. This is how the application links to 4005 * dynamic linker services such as dlopen. 4006 */ 4007 if (req->sym_out == NULL || 4008 ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) { 4009 res = symlook_obj(&req1, &obj_rtld); 4010 if (res == 0) { 4011 req->sym_out = req1.sym_out; 4012 req->defobj_out = req1.defobj_out; 4013 assert(req->defobj_out != NULL); 4014 } 4015 } 4016 4017 return (req->sym_out != NULL ? 0 : ESRCH); 4018 } 4019 4020 static int 4021 symlook_list(SymLook *req, const Objlist *objlist, DoneList *dlp) 4022 { 4023 const Elf_Sym *def; 4024 const Obj_Entry *defobj; 4025 const Objlist_Entry *elm; 4026 SymLook req1; 4027 int res; 4028 4029 def = NULL; 4030 defobj = NULL; 4031 STAILQ_FOREACH(elm, objlist, link) { 4032 if (donelist_check(dlp, elm->obj)) 4033 continue; 4034 symlook_init_from_req(&req1, req); 4035 if ((res = symlook_obj(&req1, elm->obj)) == 0) { 4036 if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) { 4037 def = req1.sym_out; 4038 defobj = req1.defobj_out; 4039 if (ELF_ST_BIND(def->st_info) != STB_WEAK) 4040 break; 4041 } 4042 } 4043 } 4044 if (def != NULL) { 4045 req->sym_out = def; 4046 req->defobj_out = defobj; 4047 return (0); 4048 } 4049 return (ESRCH); 4050 } 4051 4052 /* 4053 * Search the chain of DAGS cointed to by the given Needed_Entry 4054 * for a symbol of the given name. Each DAG is scanned completely 4055 * before advancing to the next one. Returns a pointer to the symbol, 4056 * or NULL if no definition was found. 4057 */ 4058 static int 4059 symlook_needed(SymLook *req, const Needed_Entry *needed, DoneList *dlp) 4060 { 4061 const Elf_Sym *def; 4062 const Needed_Entry *n; 4063 const Obj_Entry *defobj; 4064 SymLook req1; 4065 int res; 4066 4067 def = NULL; 4068 defobj = NULL; 4069 symlook_init_from_req(&req1, req); 4070 for (n = needed; n != NULL; n = n->next) { 4071 if (n->obj == NULL || 4072 (res = symlook_list(&req1, &n->obj->dagmembers, dlp)) != 0) 4073 continue; 4074 if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) { 4075 def = req1.sym_out; 4076 defobj = req1.defobj_out; 4077 if (ELF_ST_BIND(def->st_info) != STB_WEAK) 4078 break; 4079 } 4080 } 4081 if (def != NULL) { 4082 req->sym_out = def; 4083 req->defobj_out = defobj; 4084 return (0); 4085 } 4086 return (ESRCH); 4087 } 4088 4089 /* 4090 * Search the symbol table of a single shared object for a symbol of 4091 * the given name and version, if requested. Returns a pointer to the 4092 * symbol, or NULL if no definition was found. If the object is 4093 * filter, return filtered symbol from filtee. 4094 * 4095 * The symbol's hash value is passed in for efficiency reasons; that 4096 * eliminates many recomputations of the hash value. 4097 */ 4098 int 4099 symlook_obj(SymLook *req, const Obj_Entry *obj) 4100 { 4101 DoneList donelist; 4102 SymLook req1; 4103 int flags, res, mres; 4104 4105 /* 4106 * If there is at least one valid hash at this point, we prefer to 4107 * use the faster GNU version if available. 4108 */ 4109 if (obj->valid_hash_gnu) 4110 mres = symlook_obj1_gnu(req, obj); 4111 else if (obj->valid_hash_sysv) 4112 mres = symlook_obj1_sysv(req, obj); 4113 else 4114 return (EINVAL); 4115 4116 if (mres == 0) { 4117 if (obj->needed_filtees != NULL) { 4118 flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0; 4119 load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate); 4120 donelist_init(&donelist); 4121 symlook_init_from_req(&req1, req); 4122 res = symlook_needed(&req1, obj->needed_filtees, &donelist); 4123 if (res == 0) { 4124 req->sym_out = req1.sym_out; 4125 req->defobj_out = req1.defobj_out; 4126 } 4127 return (res); 4128 } 4129 if (obj->needed_aux_filtees != NULL) { 4130 flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0; 4131 load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate); 4132 donelist_init(&donelist); 4133 symlook_init_from_req(&req1, req); 4134 res = symlook_needed(&req1, obj->needed_aux_filtees, &donelist); 4135 if (res == 0) { 4136 req->sym_out = req1.sym_out; 4137 req->defobj_out = req1.defobj_out; 4138 return (res); 4139 } 4140 } 4141 } 4142 return (mres); 4143 } 4144 4145 /* Symbol match routine common to both hash functions */ 4146 static bool 4147 matched_symbol(SymLook *req, const Obj_Entry *obj, Sym_Match_Result *result, 4148 const unsigned long symnum) 4149 { 4150 Elf_Versym verndx; 4151 const Elf_Sym *symp; 4152 const char *strp; 4153 4154 symp = obj->symtab + symnum; 4155 strp = obj->strtab + symp->st_name; 4156 4157 switch (ELF_ST_TYPE(symp->st_info)) { 4158 case STT_FUNC: 4159 case STT_NOTYPE: 4160 case STT_OBJECT: 4161 case STT_COMMON: 4162 case STT_GNU_IFUNC: 4163 if (symp->st_value == 0) 4164 return (false); 4165 /* fallthrough */ 4166 case STT_TLS: 4167 if (symp->st_shndx != SHN_UNDEF) 4168 break; 4169 #ifndef __mips__ 4170 else if (((req->flags & SYMLOOK_IN_PLT) == 0) && 4171 (ELF_ST_TYPE(symp->st_info) == STT_FUNC)) 4172 break; 4173 /* fallthrough */ 4174 #endif 4175 default: 4176 return (false); 4177 } 4178 if (req->name[0] != strp[0] || strcmp(req->name, strp) != 0) 4179 return (false); 4180 4181 if (req->ventry == NULL) { 4182 if (obj->versyms != NULL) { 4183 verndx = VER_NDX(obj->versyms[symnum]); 4184 if (verndx > obj->vernum) { 4185 _rtld_error( 4186 "%s: symbol %s references wrong version %d", 4187 obj->path, obj->strtab + symnum, verndx); 4188 return (false); 4189 } 4190 /* 4191 * If we are not called from dlsym (i.e. this 4192 * is a normal relocation from unversioned 4193 * binary), accept the symbol immediately if 4194 * it happens to have first version after this 4195 * shared object became versioned. Otherwise, 4196 * if symbol is versioned and not hidden, 4197 * remember it. If it is the only symbol with 4198 * this name exported by the shared object, it 4199 * will be returned as a match by the calling 4200 * function. If symbol is global (verndx < 2) 4201 * accept it unconditionally. 4202 */ 4203 if ((req->flags & SYMLOOK_DLSYM) == 0 && 4204 verndx == VER_NDX_GIVEN) { 4205 result->sym_out = symp; 4206 return (true); 4207 } 4208 else if (verndx >= VER_NDX_GIVEN) { 4209 if ((obj->versyms[symnum] & VER_NDX_HIDDEN) 4210 == 0) { 4211 if (result->vsymp == NULL) 4212 result->vsymp = symp; 4213 result->vcount++; 4214 } 4215 return (false); 4216 } 4217 } 4218 result->sym_out = symp; 4219 return (true); 4220 } 4221 if (obj->versyms == NULL) { 4222 if (object_match_name(obj, req->ventry->name)) { 4223 _rtld_error("%s: object %s should provide version %s " 4224 "for symbol %s", obj_rtld.path, obj->path, 4225 req->ventry->name, obj->strtab + symnum); 4226 return (false); 4227 } 4228 } else { 4229 verndx = VER_NDX(obj->versyms[symnum]); 4230 if (verndx > obj->vernum) { 4231 _rtld_error("%s: symbol %s references wrong version %d", 4232 obj->path, obj->strtab + symnum, verndx); 4233 return (false); 4234 } 4235 if (obj->vertab[verndx].hash != req->ventry->hash || 4236 strcmp(obj->vertab[verndx].name, req->ventry->name)) { 4237 /* 4238 * Version does not match. Look if this is a 4239 * global symbol and if it is not hidden. If 4240 * global symbol (verndx < 2) is available, 4241 * use it. Do not return symbol if we are 4242 * called by dlvsym, because dlvsym looks for 4243 * a specific version and default one is not 4244 * what dlvsym wants. 4245 */ 4246 if ((req->flags & SYMLOOK_DLSYM) || 4247 (verndx >= VER_NDX_GIVEN) || 4248 (obj->versyms[symnum] & VER_NDX_HIDDEN)) 4249 return (false); 4250 } 4251 } 4252 result->sym_out = symp; 4253 return (true); 4254 } 4255 4256 /* 4257 * Search for symbol using SysV hash function. 4258 * obj->buckets is known not to be NULL at this point; the test for this was 4259 * performed with the obj->valid_hash_sysv assignment. 4260 */ 4261 static int 4262 symlook_obj1_sysv(SymLook *req, const Obj_Entry *obj) 4263 { 4264 unsigned long symnum; 4265 Sym_Match_Result matchres; 4266 4267 matchres.sym_out = NULL; 4268 matchres.vsymp = NULL; 4269 matchres.vcount = 0; 4270 4271 for (symnum = obj->buckets[req->hash % obj->nbuckets]; 4272 symnum != STN_UNDEF; symnum = obj->chains[symnum]) { 4273 if (symnum >= obj->nchains) 4274 return (ESRCH); /* Bad object */ 4275 4276 if (matched_symbol(req, obj, &matchres, symnum)) { 4277 req->sym_out = matchres.sym_out; 4278 req->defobj_out = obj; 4279 return (0); 4280 } 4281 } 4282 if (matchres.vcount == 1) { 4283 req->sym_out = matchres.vsymp; 4284 req->defobj_out = obj; 4285 return (0); 4286 } 4287 return (ESRCH); 4288 } 4289 4290 /* Search for symbol using GNU hash function */ 4291 static int 4292 symlook_obj1_gnu(SymLook *req, const Obj_Entry *obj) 4293 { 4294 Elf_Addr bloom_word; 4295 const Elf32_Word *hashval; 4296 Elf32_Word bucket; 4297 Sym_Match_Result matchres; 4298 unsigned int h1, h2; 4299 unsigned long symnum; 4300 4301 matchres.sym_out = NULL; 4302 matchres.vsymp = NULL; 4303 matchres.vcount = 0; 4304 4305 /* Pick right bitmask word from Bloom filter array */ 4306 bloom_word = obj->bloom_gnu[(req->hash_gnu / __ELF_WORD_SIZE) & 4307 obj->maskwords_bm_gnu]; 4308 4309 /* Calculate modulus word size of gnu hash and its derivative */ 4310 h1 = req->hash_gnu & (__ELF_WORD_SIZE - 1); 4311 h2 = ((req->hash_gnu >> obj->shift2_gnu) & (__ELF_WORD_SIZE - 1)); 4312 4313 /* Filter out the "definitely not in set" queries */ 4314 if (((bloom_word >> h1) & (bloom_word >> h2) & 1) == 0) 4315 return (ESRCH); 4316 4317 /* Locate hash chain and corresponding value element*/ 4318 bucket = obj->buckets_gnu[req->hash_gnu % obj->nbuckets_gnu]; 4319 if (bucket == 0) 4320 return (ESRCH); 4321 hashval = &obj->chain_zero_gnu[bucket]; 4322 do { 4323 if (((*hashval ^ req->hash_gnu) >> 1) == 0) { 4324 symnum = hashval - obj->chain_zero_gnu; 4325 if (matched_symbol(req, obj, &matchres, symnum)) { 4326 req->sym_out = matchres.sym_out; 4327 req->defobj_out = obj; 4328 return (0); 4329 } 4330 } 4331 } while ((*hashval++ & 1) == 0); 4332 if (matchres.vcount == 1) { 4333 req->sym_out = matchres.vsymp; 4334 req->defobj_out = obj; 4335 return (0); 4336 } 4337 return (ESRCH); 4338 } 4339 4340 static void 4341 trace_loaded_objects(Obj_Entry *obj) 4342 { 4343 char *fmt1, *fmt2, *fmt, *main_local, *list_containers; 4344 int c; 4345 4346 if ((main_local = getenv(_LD("TRACE_LOADED_OBJECTS_PROGNAME"))) == NULL) 4347 main_local = ""; 4348 4349 if ((fmt1 = getenv(_LD("TRACE_LOADED_OBJECTS_FMT1"))) == NULL) 4350 fmt1 = "\t%o => %p (%x)\n"; 4351 4352 if ((fmt2 = getenv(_LD("TRACE_LOADED_OBJECTS_FMT2"))) == NULL) 4353 fmt2 = "\t%o (%x)\n"; 4354 4355 list_containers = getenv(_LD("TRACE_LOADED_OBJECTS_ALL")); 4356 4357 for (; obj != NULL; obj = TAILQ_NEXT(obj, next)) { 4358 Needed_Entry *needed; 4359 char *name, *path; 4360 bool is_lib; 4361 4362 if (obj->marker) 4363 continue; 4364 if (list_containers && obj->needed != NULL) 4365 rtld_printf("%s:\n", obj->path); 4366 for (needed = obj->needed; needed; needed = needed->next) { 4367 if (needed->obj != NULL) { 4368 if (needed->obj->traced && !list_containers) 4369 continue; 4370 needed->obj->traced = true; 4371 path = needed->obj->path; 4372 } else 4373 path = "not found"; 4374 4375 name = (char *)obj->strtab + needed->name; 4376 is_lib = strncmp(name, "lib", 3) == 0; /* XXX - bogus */ 4377 4378 fmt = is_lib ? fmt1 : fmt2; 4379 while ((c = *fmt++) != '\0') { 4380 switch (c) { 4381 default: 4382 rtld_putchar(c); 4383 continue; 4384 case '\\': 4385 switch (c = *fmt) { 4386 case '\0': 4387 continue; 4388 case 'n': 4389 rtld_putchar('\n'); 4390 break; 4391 case 't': 4392 rtld_putchar('\t'); 4393 break; 4394 } 4395 break; 4396 case '%': 4397 switch (c = *fmt) { 4398 case '\0': 4399 continue; 4400 case '%': 4401 default: 4402 rtld_putchar(c); 4403 break; 4404 case 'A': 4405 rtld_putstr(main_local); 4406 break; 4407 case 'a': 4408 rtld_putstr(obj_main->path); 4409 break; 4410 case 'o': 4411 rtld_putstr(name); 4412 break; 4413 #if 0 4414 case 'm': 4415 rtld_printf("%d", sodp->sod_major); 4416 break; 4417 case 'n': 4418 rtld_printf("%d", sodp->sod_minor); 4419 break; 4420 #endif 4421 case 'p': 4422 rtld_putstr(path); 4423 break; 4424 case 'x': 4425 rtld_printf("%p", needed->obj ? needed->obj->mapbase : 4426 0); 4427 break; 4428 } 4429 break; 4430 } 4431 ++fmt; 4432 } 4433 } 4434 } 4435 } 4436 4437 /* 4438 * Unload a dlopened object and its dependencies from memory and from 4439 * our data structures. It is assumed that the DAG rooted in the 4440 * object has already been unreferenced, and that the object has a 4441 * reference count of 0. 4442 */ 4443 static void 4444 unload_object(Obj_Entry *root, RtldLockState *lockstate) 4445 { 4446 Obj_Entry marker, *obj, *next; 4447 4448 assert(root->refcount == 0); 4449 4450 /* 4451 * Pass over the DAG removing unreferenced objects from 4452 * appropriate lists. 4453 */ 4454 unlink_object(root); 4455 4456 /* Unmap all objects that are no longer referenced. */ 4457 for (obj = TAILQ_FIRST(&obj_list); obj != NULL; obj = next) { 4458 next = TAILQ_NEXT(obj, next); 4459 if (obj->marker || obj->refcount != 0) 4460 continue; 4461 LD_UTRACE(UTRACE_UNLOAD_OBJECT, obj, obj->mapbase, 4462 obj->mapsize, 0, obj->path); 4463 dbg("unloading \"%s\"", obj->path); 4464 /* 4465 * Unlink the object now to prevent new references from 4466 * being acquired while the bind lock is dropped in 4467 * recursive dlclose() invocations. 4468 */ 4469 TAILQ_REMOVE(&obj_list, obj, next); 4470 obj_count--; 4471 4472 if (obj->filtees_loaded) { 4473 if (next != NULL) { 4474 init_marker(&marker); 4475 TAILQ_INSERT_BEFORE(next, &marker, next); 4476 unload_filtees(obj, lockstate); 4477 next = TAILQ_NEXT(&marker, next); 4478 TAILQ_REMOVE(&obj_list, &marker, next); 4479 } else 4480 unload_filtees(obj, lockstate); 4481 } 4482 release_object(obj); 4483 } 4484 } 4485 4486 static void 4487 unlink_object(Obj_Entry *root) 4488 { 4489 Objlist_Entry *elm; 4490 4491 if (root->refcount == 0) { 4492 /* Remove the object from the RTLD_GLOBAL list. */ 4493 objlist_remove(&list_global, root); 4494 4495 /* Remove the object from all objects' DAG lists. */ 4496 STAILQ_FOREACH(elm, &root->dagmembers, link) { 4497 objlist_remove(&elm->obj->dldags, root); 4498 if (elm->obj != root) 4499 unlink_object(elm->obj); 4500 } 4501 } 4502 } 4503 4504 static void 4505 ref_dag(Obj_Entry *root) 4506 { 4507 Objlist_Entry *elm; 4508 4509 assert(root->dag_inited); 4510 STAILQ_FOREACH(elm, &root->dagmembers, link) 4511 elm->obj->refcount++; 4512 } 4513 4514 static void 4515 unref_dag(Obj_Entry *root) 4516 { 4517 Objlist_Entry *elm; 4518 4519 assert(root->dag_inited); 4520 STAILQ_FOREACH(elm, &root->dagmembers, link) 4521 elm->obj->refcount--; 4522 } 4523 4524 /* 4525 * Common code for MD __tls_get_addr(). 4526 */ 4527 static void *tls_get_addr_slow(Elf_Addr **, int, size_t) __noinline; 4528 static void * 4529 tls_get_addr_slow(Elf_Addr **dtvp, int index, size_t offset) 4530 { 4531 Elf_Addr *newdtv, *dtv; 4532 RtldLockState lockstate; 4533 int to_copy; 4534 4535 dtv = *dtvp; 4536 /* Check dtv generation in case new modules have arrived */ 4537 if (dtv[0] != tls_dtv_generation) { 4538 wlock_acquire(rtld_bind_lock, &lockstate); 4539 newdtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr)); 4540 to_copy = dtv[1]; 4541 if (to_copy > tls_max_index) 4542 to_copy = tls_max_index; 4543 memcpy(&newdtv[2], &dtv[2], to_copy * sizeof(Elf_Addr)); 4544 newdtv[0] = tls_dtv_generation; 4545 newdtv[1] = tls_max_index; 4546 free(dtv); 4547 lock_release(rtld_bind_lock, &lockstate); 4548 dtv = *dtvp = newdtv; 4549 } 4550 4551 /* Dynamically allocate module TLS if necessary */ 4552 if (dtv[index + 1] == 0) { 4553 /* Signal safe, wlock will block out signals. */ 4554 wlock_acquire(rtld_bind_lock, &lockstate); 4555 if (!dtv[index + 1]) 4556 dtv[index + 1] = (Elf_Addr)allocate_module_tls(index); 4557 lock_release(rtld_bind_lock, &lockstate); 4558 } 4559 return ((void *)(dtv[index + 1] + offset)); 4560 } 4561 4562 void * 4563 tls_get_addr_common(Elf_Addr **dtvp, int index, size_t offset) 4564 { 4565 Elf_Addr *dtv; 4566 4567 dtv = *dtvp; 4568 /* Check dtv generation in case new modules have arrived */ 4569 if (__predict_true(dtv[0] == tls_dtv_generation && 4570 dtv[index + 1] != 0)) 4571 return ((void *)(dtv[index + 1] + offset)); 4572 return (tls_get_addr_slow(dtvp, index, offset)); 4573 } 4574 4575 #if defined(__aarch64__) || defined(__arm__) || defined(__mips__) || \ 4576 defined(__powerpc__) || defined(__riscv__) 4577 4578 /* 4579 * Allocate Static TLS using the Variant I method. 4580 */ 4581 void * 4582 allocate_tls(Obj_Entry *objs, void *oldtcb, size_t tcbsize, size_t tcbalign) 4583 { 4584 Obj_Entry *obj; 4585 char *tcb; 4586 Elf_Addr **tls; 4587 Elf_Addr *dtv; 4588 Elf_Addr addr; 4589 int i; 4590 4591 if (oldtcb != NULL && tcbsize == TLS_TCB_SIZE) 4592 return (oldtcb); 4593 4594 assert(tcbsize >= TLS_TCB_SIZE); 4595 tcb = xcalloc(1, tls_static_space - TLS_TCB_SIZE + tcbsize); 4596 tls = (Elf_Addr **)(tcb + tcbsize - TLS_TCB_SIZE); 4597 4598 if (oldtcb != NULL) { 4599 memcpy(tls, oldtcb, tls_static_space); 4600 free(oldtcb); 4601 4602 /* Adjust the DTV. */ 4603 dtv = tls[0]; 4604 for (i = 0; i < dtv[1]; i++) { 4605 if (dtv[i+2] >= (Elf_Addr)oldtcb && 4606 dtv[i+2] < (Elf_Addr)oldtcb + tls_static_space) { 4607 dtv[i+2] = dtv[i+2] - (Elf_Addr)oldtcb + (Elf_Addr)tls; 4608 } 4609 } 4610 } else { 4611 dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr)); 4612 tls[0] = dtv; 4613 dtv[0] = tls_dtv_generation; 4614 dtv[1] = tls_max_index; 4615 4616 for (obj = globallist_curr(objs); obj != NULL; 4617 obj = globallist_next(obj)) { 4618 if (obj->tlsoffset > 0) { 4619 addr = (Elf_Addr)tls + obj->tlsoffset; 4620 if (obj->tlsinitsize > 0) 4621 memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize); 4622 if (obj->tlssize > obj->tlsinitsize) 4623 memset((void*) (addr + obj->tlsinitsize), 0, 4624 obj->tlssize - obj->tlsinitsize); 4625 dtv[obj->tlsindex + 1] = addr; 4626 } 4627 } 4628 } 4629 4630 return (tcb); 4631 } 4632 4633 void 4634 free_tls(void *tcb, size_t tcbsize, size_t tcbalign) 4635 { 4636 Elf_Addr *dtv; 4637 Elf_Addr tlsstart, tlsend; 4638 int dtvsize, i; 4639 4640 assert(tcbsize >= TLS_TCB_SIZE); 4641 4642 tlsstart = (Elf_Addr)tcb + tcbsize - TLS_TCB_SIZE; 4643 tlsend = tlsstart + tls_static_space; 4644 4645 dtv = *(Elf_Addr **)tlsstart; 4646 dtvsize = dtv[1]; 4647 for (i = 0; i < dtvsize; i++) { 4648 if (dtv[i+2] && (dtv[i+2] < tlsstart || dtv[i+2] >= tlsend)) { 4649 free((void*)dtv[i+2]); 4650 } 4651 } 4652 free(dtv); 4653 free(tcb); 4654 } 4655 4656 #endif 4657 4658 #if defined(__i386__) || defined(__amd64__) || defined(__sparc64__) 4659 4660 /* 4661 * Allocate Static TLS using the Variant II method. 4662 */ 4663 void * 4664 allocate_tls(Obj_Entry *objs, void *oldtls, size_t tcbsize, size_t tcbalign) 4665 { 4666 Obj_Entry *obj; 4667 size_t size, ralign; 4668 char *tls; 4669 Elf_Addr *dtv, *olddtv; 4670 Elf_Addr segbase, oldsegbase, addr; 4671 int i; 4672 4673 ralign = tcbalign; 4674 if (tls_static_max_align > ralign) 4675 ralign = tls_static_max_align; 4676 size = round(tls_static_space, ralign) + round(tcbsize, ralign); 4677 4678 assert(tcbsize >= 2*sizeof(Elf_Addr)); 4679 tls = malloc_aligned(size, ralign); 4680 dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr)); 4681 4682 segbase = (Elf_Addr)(tls + round(tls_static_space, ralign)); 4683 ((Elf_Addr*)segbase)[0] = segbase; 4684 ((Elf_Addr*)segbase)[1] = (Elf_Addr) dtv; 4685 4686 dtv[0] = tls_dtv_generation; 4687 dtv[1] = tls_max_index; 4688 4689 if (oldtls) { 4690 /* 4691 * Copy the static TLS block over whole. 4692 */ 4693 oldsegbase = (Elf_Addr) oldtls; 4694 memcpy((void *)(segbase - tls_static_space), 4695 (const void *)(oldsegbase - tls_static_space), 4696 tls_static_space); 4697 4698 /* 4699 * If any dynamic TLS blocks have been created tls_get_addr(), 4700 * move them over. 4701 */ 4702 olddtv = ((Elf_Addr**)oldsegbase)[1]; 4703 for (i = 0; i < olddtv[1]; i++) { 4704 if (olddtv[i+2] < oldsegbase - size || olddtv[i+2] > oldsegbase) { 4705 dtv[i+2] = olddtv[i+2]; 4706 olddtv[i+2] = 0; 4707 } 4708 } 4709 4710 /* 4711 * We assume that this block was the one we created with 4712 * allocate_initial_tls(). 4713 */ 4714 free_tls(oldtls, 2*sizeof(Elf_Addr), sizeof(Elf_Addr)); 4715 } else { 4716 for (obj = objs; obj != NULL; obj = TAILQ_NEXT(obj, next)) { 4717 if (obj->marker || obj->tlsoffset == 0) 4718 continue; 4719 addr = segbase - obj->tlsoffset; 4720 memset((void*) (addr + obj->tlsinitsize), 4721 0, obj->tlssize - obj->tlsinitsize); 4722 if (obj->tlsinit) 4723 memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize); 4724 dtv[obj->tlsindex + 1] = addr; 4725 } 4726 } 4727 4728 return (void*) segbase; 4729 } 4730 4731 void 4732 free_tls(void *tls, size_t tcbsize, size_t tcbalign) 4733 { 4734 Elf_Addr* dtv; 4735 size_t size, ralign; 4736 int dtvsize, i; 4737 Elf_Addr tlsstart, tlsend; 4738 4739 /* 4740 * Figure out the size of the initial TLS block so that we can 4741 * find stuff which ___tls_get_addr() allocated dynamically. 4742 */ 4743 ralign = tcbalign; 4744 if (tls_static_max_align > ralign) 4745 ralign = tls_static_max_align; 4746 size = round(tls_static_space, ralign); 4747 4748 dtv = ((Elf_Addr**)tls)[1]; 4749 dtvsize = dtv[1]; 4750 tlsend = (Elf_Addr) tls; 4751 tlsstart = tlsend - size; 4752 for (i = 0; i < dtvsize; i++) { 4753 if (dtv[i + 2] != 0 && (dtv[i + 2] < tlsstart || dtv[i + 2] > tlsend)) { 4754 free_aligned((void *)dtv[i + 2]); 4755 } 4756 } 4757 4758 free_aligned((void *)tlsstart); 4759 free((void*) dtv); 4760 } 4761 4762 #endif 4763 4764 /* 4765 * Allocate TLS block for module with given index. 4766 */ 4767 void * 4768 allocate_module_tls(int index) 4769 { 4770 Obj_Entry* obj; 4771 char* p; 4772 4773 TAILQ_FOREACH(obj, &obj_list, next) { 4774 if (obj->marker) 4775 continue; 4776 if (obj->tlsindex == index) 4777 break; 4778 } 4779 if (!obj) { 4780 _rtld_error("Can't find module with TLS index %d", index); 4781 rtld_die(); 4782 } 4783 4784 p = malloc_aligned(obj->tlssize, obj->tlsalign); 4785 memcpy(p, obj->tlsinit, obj->tlsinitsize); 4786 memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize); 4787 4788 return p; 4789 } 4790 4791 bool 4792 allocate_tls_offset(Obj_Entry *obj) 4793 { 4794 size_t off; 4795 4796 if (obj->tls_done) 4797 return true; 4798 4799 if (obj->tlssize == 0) { 4800 obj->tls_done = true; 4801 return true; 4802 } 4803 4804 if (tls_last_offset == 0) 4805 off = calculate_first_tls_offset(obj->tlssize, obj->tlsalign); 4806 else 4807 off = calculate_tls_offset(tls_last_offset, tls_last_size, 4808 obj->tlssize, obj->tlsalign); 4809 4810 /* 4811 * If we have already fixed the size of the static TLS block, we 4812 * must stay within that size. When allocating the static TLS, we 4813 * leave a small amount of space spare to be used for dynamically 4814 * loading modules which use static TLS. 4815 */ 4816 if (tls_static_space != 0) { 4817 if (calculate_tls_end(off, obj->tlssize) > tls_static_space) 4818 return false; 4819 } else if (obj->tlsalign > tls_static_max_align) { 4820 tls_static_max_align = obj->tlsalign; 4821 } 4822 4823 tls_last_offset = obj->tlsoffset = off; 4824 tls_last_size = obj->tlssize; 4825 obj->tls_done = true; 4826 4827 return true; 4828 } 4829 4830 void 4831 free_tls_offset(Obj_Entry *obj) 4832 { 4833 4834 /* 4835 * If we were the last thing to allocate out of the static TLS 4836 * block, we give our space back to the 'allocator'. This is a 4837 * simplistic workaround to allow libGL.so.1 to be loaded and 4838 * unloaded multiple times. 4839 */ 4840 if (calculate_tls_end(obj->tlsoffset, obj->tlssize) 4841 == calculate_tls_end(tls_last_offset, tls_last_size)) { 4842 tls_last_offset -= obj->tlssize; 4843 tls_last_size = 0; 4844 } 4845 } 4846 4847 void * 4848 _rtld_allocate_tls(void *oldtls, size_t tcbsize, size_t tcbalign) 4849 { 4850 void *ret; 4851 RtldLockState lockstate; 4852 4853 wlock_acquire(rtld_bind_lock, &lockstate); 4854 ret = allocate_tls(globallist_curr(TAILQ_FIRST(&obj_list)), oldtls, 4855 tcbsize, tcbalign); 4856 lock_release(rtld_bind_lock, &lockstate); 4857 return (ret); 4858 } 4859 4860 void 4861 _rtld_free_tls(void *tcb, size_t tcbsize, size_t tcbalign) 4862 { 4863 RtldLockState lockstate; 4864 4865 wlock_acquire(rtld_bind_lock, &lockstate); 4866 free_tls(tcb, tcbsize, tcbalign); 4867 lock_release(rtld_bind_lock, &lockstate); 4868 } 4869 4870 static void 4871 object_add_name(Obj_Entry *obj, const char *name) 4872 { 4873 Name_Entry *entry; 4874 size_t len; 4875 4876 len = strlen(name); 4877 entry = malloc(sizeof(Name_Entry) + len); 4878 4879 if (entry != NULL) { 4880 strcpy(entry->name, name); 4881 STAILQ_INSERT_TAIL(&obj->names, entry, link); 4882 } 4883 } 4884 4885 static int 4886 object_match_name(const Obj_Entry *obj, const char *name) 4887 { 4888 Name_Entry *entry; 4889 4890 STAILQ_FOREACH(entry, &obj->names, link) { 4891 if (strcmp(name, entry->name) == 0) 4892 return (1); 4893 } 4894 return (0); 4895 } 4896 4897 static Obj_Entry * 4898 locate_dependency(const Obj_Entry *obj, const char *name) 4899 { 4900 const Objlist_Entry *entry; 4901 const Needed_Entry *needed; 4902 4903 STAILQ_FOREACH(entry, &list_main, link) { 4904 if (object_match_name(entry->obj, name)) 4905 return entry->obj; 4906 } 4907 4908 for (needed = obj->needed; needed != NULL; needed = needed->next) { 4909 if (strcmp(obj->strtab + needed->name, name) == 0 || 4910 (needed->obj != NULL && object_match_name(needed->obj, name))) { 4911 /* 4912 * If there is DT_NEEDED for the name we are looking for, 4913 * we are all set. Note that object might not be found if 4914 * dependency was not loaded yet, so the function can 4915 * return NULL here. This is expected and handled 4916 * properly by the caller. 4917 */ 4918 return (needed->obj); 4919 } 4920 } 4921 _rtld_error("%s: Unexpected inconsistency: dependency %s not found", 4922 obj->path, name); 4923 rtld_die(); 4924 } 4925 4926 static int 4927 check_object_provided_version(Obj_Entry *refobj, const Obj_Entry *depobj, 4928 const Elf_Vernaux *vna) 4929 { 4930 const Elf_Verdef *vd; 4931 const char *vername; 4932 4933 vername = refobj->strtab + vna->vna_name; 4934 vd = depobj->verdef; 4935 if (vd == NULL) { 4936 _rtld_error("%s: version %s required by %s not defined", 4937 depobj->path, vername, refobj->path); 4938 return (-1); 4939 } 4940 for (;;) { 4941 if (vd->vd_version != VER_DEF_CURRENT) { 4942 _rtld_error("%s: Unsupported version %d of Elf_Verdef entry", 4943 depobj->path, vd->vd_version); 4944 return (-1); 4945 } 4946 if (vna->vna_hash == vd->vd_hash) { 4947 const Elf_Verdaux *aux = (const Elf_Verdaux *) 4948 ((char *)vd + vd->vd_aux); 4949 if (strcmp(vername, depobj->strtab + aux->vda_name) == 0) 4950 return (0); 4951 } 4952 if (vd->vd_next == 0) 4953 break; 4954 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next); 4955 } 4956 if (vna->vna_flags & VER_FLG_WEAK) 4957 return (0); 4958 _rtld_error("%s: version %s required by %s not found", 4959 depobj->path, vername, refobj->path); 4960 return (-1); 4961 } 4962 4963 static int 4964 rtld_verify_object_versions(Obj_Entry *obj) 4965 { 4966 const Elf_Verneed *vn; 4967 const Elf_Verdef *vd; 4968 const Elf_Verdaux *vda; 4969 const Elf_Vernaux *vna; 4970 const Obj_Entry *depobj; 4971 int maxvernum, vernum; 4972 4973 if (obj->ver_checked) 4974 return (0); 4975 obj->ver_checked = true; 4976 4977 maxvernum = 0; 4978 /* 4979 * Walk over defined and required version records and figure out 4980 * max index used by any of them. Do very basic sanity checking 4981 * while there. 4982 */ 4983 vn = obj->verneed; 4984 while (vn != NULL) { 4985 if (vn->vn_version != VER_NEED_CURRENT) { 4986 _rtld_error("%s: Unsupported version %d of Elf_Verneed entry", 4987 obj->path, vn->vn_version); 4988 return (-1); 4989 } 4990 vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux); 4991 for (;;) { 4992 vernum = VER_NEED_IDX(vna->vna_other); 4993 if (vernum > maxvernum) 4994 maxvernum = vernum; 4995 if (vna->vna_next == 0) 4996 break; 4997 vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next); 4998 } 4999 if (vn->vn_next == 0) 5000 break; 5001 vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next); 5002 } 5003 5004 vd = obj->verdef; 5005 while (vd != NULL) { 5006 if (vd->vd_version != VER_DEF_CURRENT) { 5007 _rtld_error("%s: Unsupported version %d of Elf_Verdef entry", 5008 obj->path, vd->vd_version); 5009 return (-1); 5010 } 5011 vernum = VER_DEF_IDX(vd->vd_ndx); 5012 if (vernum > maxvernum) 5013 maxvernum = vernum; 5014 if (vd->vd_next == 0) 5015 break; 5016 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next); 5017 } 5018 5019 if (maxvernum == 0) 5020 return (0); 5021 5022 /* 5023 * Store version information in array indexable by version index. 5024 * Verify that object version requirements are satisfied along the 5025 * way. 5026 */ 5027 obj->vernum = maxvernum + 1; 5028 obj->vertab = xcalloc(obj->vernum, sizeof(Ver_Entry)); 5029 5030 vd = obj->verdef; 5031 while (vd != NULL) { 5032 if ((vd->vd_flags & VER_FLG_BASE) == 0) { 5033 vernum = VER_DEF_IDX(vd->vd_ndx); 5034 assert(vernum <= maxvernum); 5035 vda = (const Elf_Verdaux *)((char *)vd + vd->vd_aux); 5036 obj->vertab[vernum].hash = vd->vd_hash; 5037 obj->vertab[vernum].name = obj->strtab + vda->vda_name; 5038 obj->vertab[vernum].file = NULL; 5039 obj->vertab[vernum].flags = 0; 5040 } 5041 if (vd->vd_next == 0) 5042 break; 5043 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next); 5044 } 5045 5046 vn = obj->verneed; 5047 while (vn != NULL) { 5048 depobj = locate_dependency(obj, obj->strtab + vn->vn_file); 5049 if (depobj == NULL) 5050 return (-1); 5051 vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux); 5052 for (;;) { 5053 if (check_object_provided_version(obj, depobj, vna)) 5054 return (-1); 5055 vernum = VER_NEED_IDX(vna->vna_other); 5056 assert(vernum <= maxvernum); 5057 obj->vertab[vernum].hash = vna->vna_hash; 5058 obj->vertab[vernum].name = obj->strtab + vna->vna_name; 5059 obj->vertab[vernum].file = obj->strtab + vn->vn_file; 5060 obj->vertab[vernum].flags = (vna->vna_other & VER_NEED_HIDDEN) ? 5061 VER_INFO_HIDDEN : 0; 5062 if (vna->vna_next == 0) 5063 break; 5064 vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next); 5065 } 5066 if (vn->vn_next == 0) 5067 break; 5068 vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next); 5069 } 5070 return 0; 5071 } 5072 5073 static int 5074 rtld_verify_versions(const Objlist *objlist) 5075 { 5076 Objlist_Entry *entry; 5077 int rc; 5078 5079 rc = 0; 5080 STAILQ_FOREACH(entry, objlist, link) { 5081 /* 5082 * Skip dummy objects or objects that have their version requirements 5083 * already checked. 5084 */ 5085 if (entry->obj->strtab == NULL || entry->obj->vertab != NULL) 5086 continue; 5087 if (rtld_verify_object_versions(entry->obj) == -1) { 5088 rc = -1; 5089 if (ld_tracing == NULL) 5090 break; 5091 } 5092 } 5093 if (rc == 0 || ld_tracing != NULL) 5094 rc = rtld_verify_object_versions(&obj_rtld); 5095 return rc; 5096 } 5097 5098 const Ver_Entry * 5099 fetch_ventry(const Obj_Entry *obj, unsigned long symnum) 5100 { 5101 Elf_Versym vernum; 5102 5103 if (obj->vertab) { 5104 vernum = VER_NDX(obj->versyms[symnum]); 5105 if (vernum >= obj->vernum) { 5106 _rtld_error("%s: symbol %s has wrong verneed value %d", 5107 obj->path, obj->strtab + symnum, vernum); 5108 } else if (obj->vertab[vernum].hash != 0) { 5109 return &obj->vertab[vernum]; 5110 } 5111 } 5112 return NULL; 5113 } 5114 5115 int 5116 _rtld_get_stack_prot(void) 5117 { 5118 5119 return (stack_prot); 5120 } 5121 5122 int 5123 _rtld_is_dlopened(void *arg) 5124 { 5125 Obj_Entry *obj; 5126 RtldLockState lockstate; 5127 int res; 5128 5129 rlock_acquire(rtld_bind_lock, &lockstate); 5130 obj = dlcheck(arg); 5131 if (obj == NULL) 5132 obj = obj_from_addr(arg); 5133 if (obj == NULL) { 5134 _rtld_error("No shared object contains address"); 5135 lock_release(rtld_bind_lock, &lockstate); 5136 return (-1); 5137 } 5138 res = obj->dlopened ? 1 : 0; 5139 lock_release(rtld_bind_lock, &lockstate); 5140 return (res); 5141 } 5142 5143 int 5144 obj_enforce_relro(Obj_Entry *obj) 5145 { 5146 5147 if (obj->relro_size > 0 && mprotect(obj->relro_page, obj->relro_size, 5148 PROT_READ) == -1) { 5149 _rtld_error("%s: Cannot enforce relro protection: %s", 5150 obj->path, rtld_strerror(errno)); 5151 return (-1); 5152 } 5153 return (0); 5154 } 5155 5156 static void 5157 map_stacks_exec(RtldLockState *lockstate) 5158 { 5159 void (*thr_map_stacks_exec)(void); 5160 5161 if ((max_stack_flags & PF_X) == 0 || (stack_prot & PROT_EXEC) != 0) 5162 return; 5163 thr_map_stacks_exec = (void (*)(void))(uintptr_t) 5164 get_program_var_addr("__pthread_map_stacks_exec", lockstate); 5165 if (thr_map_stacks_exec != NULL) { 5166 stack_prot |= PROT_EXEC; 5167 thr_map_stacks_exec(); 5168 } 5169 } 5170 5171 void 5172 symlook_init(SymLook *dst, const char *name) 5173 { 5174 5175 bzero(dst, sizeof(*dst)); 5176 dst->name = name; 5177 dst->hash = elf_hash(name); 5178 dst->hash_gnu = gnu_hash(name); 5179 } 5180 5181 static void 5182 symlook_init_from_req(SymLook *dst, const SymLook *src) 5183 { 5184 5185 dst->name = src->name; 5186 dst->hash = src->hash; 5187 dst->hash_gnu = src->hash_gnu; 5188 dst->ventry = src->ventry; 5189 dst->flags = src->flags; 5190 dst->defobj_out = NULL; 5191 dst->sym_out = NULL; 5192 dst->lockstate = src->lockstate; 5193 } 5194 5195 5196 /* 5197 * Parse a file descriptor number without pulling in more of libc (e.g. atoi). 5198 */ 5199 static int 5200 parse_libdir(const char *str) 5201 { 5202 static const int RADIX = 10; /* XXXJA: possibly support hex? */ 5203 const char *orig; 5204 int fd; 5205 char c; 5206 5207 orig = str; 5208 fd = 0; 5209 for (c = *str; c != '\0'; c = *++str) { 5210 if (c < '0' || c > '9') 5211 return (-1); 5212 5213 fd *= RADIX; 5214 fd += c - '0'; 5215 } 5216 5217 /* Make sure we actually parsed something. */ 5218 if (str == orig) { 5219 _rtld_error("failed to parse directory FD from '%s'", str); 5220 return (-1); 5221 } 5222 return (fd); 5223 } 5224 5225 /* 5226 * Overrides for libc_pic-provided functions. 5227 */ 5228 5229 int 5230 __getosreldate(void) 5231 { 5232 size_t len; 5233 int oid[2]; 5234 int error, osrel; 5235 5236 if (osreldate != 0) 5237 return (osreldate); 5238 5239 oid[0] = CTL_KERN; 5240 oid[1] = KERN_OSRELDATE; 5241 osrel = 0; 5242 len = sizeof(osrel); 5243 error = sysctl(oid, 2, &osrel, &len, NULL, 0); 5244 if (error == 0 && osrel > 0 && len == sizeof(osrel)) 5245 osreldate = osrel; 5246 return (osreldate); 5247 } 5248 5249 void 5250 exit(int status) 5251 { 5252 5253 _exit(status); 5254 } 5255 5256 void (*__cleanup)(void); 5257 int __isthreaded = 0; 5258 int _thread_autoinit_dummy_decl = 1; 5259 5260 /* 5261 * No unresolved symbols for rtld. 5262 */ 5263 void 5264 __pthread_cxa_finalize(struct dl_phdr_info *a) 5265 { 5266 } 5267 5268 void 5269 __stack_chk_fail(void) 5270 { 5271 5272 _rtld_error("stack overflow detected; terminated"); 5273 rtld_die(); 5274 } 5275 __weak_reference(__stack_chk_fail, __stack_chk_fail_local); 5276 5277 void 5278 __chk_fail(void) 5279 { 5280 5281 _rtld_error("buffer overflow detected; terminated"); 5282 rtld_die(); 5283 } 5284 5285 const char * 5286 rtld_strerror(int errnum) 5287 { 5288 5289 if (errnum < 0 || errnum >= sys_nerr) 5290 return ("Unknown error"); 5291 return (sys_errlist[errnum]); 5292 } 5293